hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
406d83843b6bba6b340af9c72cb1f385debe501e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <cv.h>
#include <highgui.h>
#include "functions.h"
#include "common/cuda/BufferManager.h"
#include "common/cuda/Types.h"
#include "common/cuda/Util.cuh"
#include "common/cuda/Memory.h"
#include "common/cuda/EventManagement.cuh"
template<typename T>
__device__ __inline__ float Convolution3x3(hipTextureObject_t in,
int ctr_t_x, int ctr_t_y) {
/** Since Gaussian filter is symmetric -
1 2 1
2 4 2
1 2 1
l c r
We can spilt it into 3 columns: left, center, right, respectively.
As the convolution goes, we noticed that the result of r can be reused as l in the next convolution.
Why? Because of the downsampling game we play:
O O O O
O X O X
O O O O
O X O X
The above is 4x4 tile and X are the pixels we picked in the down sampling.
Only the convoluted result of 'X' is important, we skipped all the 'O' pixels. Note that between 'X' they shares the
same column of the pixels, just different side (right vs. left). */
// now we have avoid expensive SHL, SHR
int top_t_y = ctr_t_y - 1;
int btm_t_y = ctr_t_y + 1;
int lft_t_x = ctr_t_x - 1;
int rgt_t_x = ctr_t_x + 1;
float lft, ctr, rgt; // left center right (columns)
lft = read2D<T>(in, lft_t_x, top_t_y)
+ (read2D<T>(in, lft_t_x, ctr_t_y) * 2.0)
+ read2D<T>(in, lft_t_x, btm_t_y);
ctr = read2D<T>(in, ctr_t_x, top_t_y)
+ (read2D<T>(in, ctr_t_x, ctr_t_y) * 2.0)
+ read2D<T>(in, ctr_t_x, btm_t_y);
rgt = read2D<T>(in, rgt_t_x, top_t_y)
+ (read2D<T>(in, rgt_t_x, ctr_t_y) * 2.0)
+ read2D<T>(in, rgt_t_x, btm_t_y);
return (lft + (ctr * 2.0) + rgt) / 16.0;
}
template<typename T>
__global__ void DownSampleHalfScale3x3(hipSurfaceObject_t out, hipTextureObject_t in,
int x_offset, int y_offset, int width, int height) {
int x = x_offset + blockIdx.x * blockDim.x + threadIdx.x;
int y = y_offset + blockIdx.y * blockDim.y + threadIdx.y;
if (x < (width >> 1) && y < (height >> 1)) {
float val = Convolution3x3<T>(in, (x << 1) + 1, (y << 1) + 1);
T vout = (T)val;
write2D<T>(out, val, x, y);
}
}
void convolution_test(){
std::string filename = "image.bmp";
cv::Mat image = cv::imread(filename, 0);
cv::Mat out(image.rows/2, image.cols/2, CV_8UC1);
BufferManager d_in, d_out;
#ifdef PITCH2D_TEST
d_in.create(image.cols, image.rows, UCHAR, PITCH2D, RD_ELEMENT_TYPE);
d_out.create(image.cols, image.rows, UCHAR, PITCH2D, RD_ELEMENT_TYPE);
#else
d_in.create(image.cols, image.rows, UCHAR, BLOCK_LINEAR, RD_ELEMENT_TYPE);
d_out.create(image.cols/2, image.rows/2, UCHAR, BLOCK_LINEAR, RD_ELEMENT_TYPE);
#endif
d_in.upload(image.data, image.step, 0, 0, image.cols, image.rows);
/*EventRecord time;
time.addRecord("Start");*/
//event
float time_elapsed = 0;
hipEvent_t start, stop;
hipEventCreate(&start); //Event
hipEventCreate(&stop);
hipEventRecord(start, 0); //
dim3 blocks(16, 8);
dim3 grids(DIVUP(image.cols / 2, blocks.x), DIVUP(image.rows/2, blocks.y));
DownSampleHalfScale3x3<uchar> << <grids, blocks >> >(d_out.cu_surf_obj(), d_in.cu_tex_obj(), 0, 0, image.cols, image.rows);
hipEventRecord(stop, 0); //
hipEventSynchronize(start); //Waits for an event to complete.
hipEventSynchronize(stop); //Waits for an event to complete.Record
hipEventElapsedTime(&time_elapsed, start, stop); //
printf("%f(ms)\n", time_elapsed);
//time.addRecord("Stop");
////time.print();
//std::cout << "time costs: " << time.getCurrentTime() << std::endl;
d_out.download(out.data, image.cols/2, 0, 0, image.cols/2, image.rows/2);
cv::imshow("image", image);
cv::imshow("out", out);
cv::waitKey(0);
}
|
406d83843b6bba6b340af9c72cb1f385debe501e.cu
|
#include <cuda_runtime.h>
#include <cv.h>
#include <highgui.h>
#include "functions.h"
#include "common/cuda/BufferManager.h"
#include "common/cuda/Types.h"
#include "common/cuda/Util.cuh"
#include "common/cuda/Memory.h"
#include "common/cuda/EventManagement.cuh"
template<typename T>
__device__ __inline__ float Convolution3x3(cudaTextureObject_t in,
int ctr_t_x, int ctr_t_y) {
/** Since Gaussian filter is symmetric -
1 2 1
2 4 2
1 2 1
l c r
We can spilt it into 3 columns: left, center, right, respectively.
As the convolution goes, we noticed that the result of r can be reused as l in the next convolution.
Why? Because of the downsampling game we play:
O O O O
O X O X
O O O O
O X O X
The above is 4x4 tile and X are the pixels we picked in the down sampling.
Only the convoluted result of 'X' is important, we skipped all the 'O' pixels. Note that between 'X' they shares the
same column of the pixels, just different side (right vs. left). */
// now we have avoid expensive SHL, SHR
int top_t_y = ctr_t_y - 1;
int btm_t_y = ctr_t_y + 1;
int lft_t_x = ctr_t_x - 1;
int rgt_t_x = ctr_t_x + 1;
float lft, ctr, rgt; // left center right (columns)
lft = read2D<T>(in, lft_t_x, top_t_y)
+ (read2D<T>(in, lft_t_x, ctr_t_y) * 2.0)
+ read2D<T>(in, lft_t_x, btm_t_y);
ctr = read2D<T>(in, ctr_t_x, top_t_y)
+ (read2D<T>(in, ctr_t_x, ctr_t_y) * 2.0)
+ read2D<T>(in, ctr_t_x, btm_t_y);
rgt = read2D<T>(in, rgt_t_x, top_t_y)
+ (read2D<T>(in, rgt_t_x, ctr_t_y) * 2.0)
+ read2D<T>(in, rgt_t_x, btm_t_y);
return (lft + (ctr * 2.0) + rgt) / 16.0;
}
template<typename T>
__global__ void DownSampleHalfScale3x3(cudaSurfaceObject_t out, cudaTextureObject_t in,
int x_offset, int y_offset, int width, int height) {
int x = x_offset + blockIdx.x * blockDim.x + threadIdx.x;
int y = y_offset + blockIdx.y * blockDim.y + threadIdx.y;
if (x < (width >> 1) && y < (height >> 1)) {
float val = Convolution3x3<T>(in, (x << 1) + 1, (y << 1) + 1);
T vout = (T)val;
write2D<T>(out, val, x, y);
}
}
void convolution_test(){
std::string filename = "image.bmp";
cv::Mat image = cv::imread(filename, 0);
cv::Mat out(image.rows/2, image.cols/2, CV_8UC1);
BufferManager d_in, d_out;
#ifdef PITCH2D_TEST
d_in.create(image.cols, image.rows, UCHAR, PITCH2D, RD_ELEMENT_TYPE);
d_out.create(image.cols, image.rows, UCHAR, PITCH2D, RD_ELEMENT_TYPE);
#else
d_in.create(image.cols, image.rows, UCHAR, BLOCK_LINEAR, RD_ELEMENT_TYPE);
d_out.create(image.cols/2, image.rows/2, UCHAR, BLOCK_LINEAR, RD_ELEMENT_TYPE);
#endif
d_in.upload(image.data, image.step, 0, 0, image.cols, image.rows);
/*EventRecord time;
time.addRecord("Start");*/
//使用event计算时间
float time_elapsed = 0;
cudaEvent_t start, stop;
cudaEventCreate(&start); //创建Event
cudaEventCreate(&stop);
cudaEventRecord(start, 0); //记录当前时间
dim3 blocks(16, 8);
dim3 grids(DIVUP(image.cols / 2, blocks.x), DIVUP(image.rows/2, blocks.y));
DownSampleHalfScale3x3<uchar> << <grids, blocks >> >(d_out.cu_surf_obj(), d_in.cu_tex_obj(), 0, 0, image.cols, image.rows);
cudaEventRecord(stop, 0); //记录当前时间
cudaEventSynchronize(start); //Waits for an event to complete.
cudaEventSynchronize(stop); //Waits for an event to complete.Record之前的任务
cudaEventElapsedTime(&time_elapsed, start, stop); //计算时间差
printf("执行时间:%f(ms)\n", time_elapsed);
//time.addRecord("Stop");
////time.print();
//std::cout << "time costs: " << time.getCurrentTime() << std::endl;
d_out.download(out.data, image.cols/2, 0, 0, image.cols/2, image.rows/2);
cv::imshow("image", image);
cv::imshow("out", out);
cv::waitKey(0);
}
|
94311d61cf91a0bf3efa548bec3b07efa09f0ec7.hip
|
// !!! This is a file automatically generated by hipify!!!
////////////////////////////////////////////////////////////////////////
// *cudaFGM*
// A fire growth model (FGM) that runs on NVIDIA GPUs.
//
//firelib is used to create most of the fire properties. Main firelib
//funcions used are "Fire_FuelCatalogCreateStandard" to create the fire
//catalog, "Fire_SpreadNoWindNoSlope" and "Fire_SpreadWindSlopeMax" are
//used to compute fire ellipse properties.
//
//The kernel "FGM" is launched in each iteration of the fire growth model.
//
//Main output is the ignition map "ignMap_###.dat", where ### is a user
//defined tag
//
//Grass aspect and slope files can be read with "./cudaFGM 1". Grass slope files must
//be written in "percentage".
//
//Inputs provided in "RunSet.in": Map width (meters)
// Map height
// Number of rows
// Number of cols
// Fuel Model (NFFL models) or 14-custom
// Wind speed
// Wind Direction
// Moisture (M1, M10, M100, Mherb, Mwood)
// Custom Particle load (Not relevant if using
// one of theNFFL 13)
// ignition point (X) as a %(0-1) of map width
// ignition point (Y) as a %(0-1) of map height
// GPU device
// ignition map file name (no spaces)
// Verbosity (1 - more 0 - less)
// init ignMap to BEHAVE elipse for faster solution (1 - Yes, 0 - No)
//
//To do list: - Maps must be read from outside for fuel, initial ignMap, etc
//
//Change log: 12/03/2012 Initial V fire shape is hardcoded
// 17/02/2012 cudaFGM reads Grass aspect and slope file formats.
//
#include "fireLib_float.h"
#include "header.h"
#include <rocblas.h>
#include <time.h>
//////////////////
//Function headers
//
int PrintMap ( float*, char* );
//
float* BEHAVEelipse( float, int, int, float, float, float);
//
int Print_CatalogStruct(FuelCatalogPtr);
//
int FGM_cycle( float*, float*,
float*, float*,
float*, float*,
float*, float*,
float*, float*,
float*, float*,
float*, float*,
float*,
float,
dim3,
dim3,
float,
float,
int);
//////////////////
//Global variables
int Rows; //Map dimensions are global variables
int Cols; //
float mapW_m, mapH_m; //map width height (meters)
float mapW, mapH; //map width height (feet)
size_t Model;
float WindSpd;
float WindDir;
float M1;
float M10;
float M100;
float Mherb;
float Mwood;
//////
//Main
int main ( int argc, char *argv[] )
{
int grass = atoi(argv[1]); //Grass files are used if grass==1
float slp_tmp, asp_tmp; //slope and aspect temporary values
unsigned int device; //Assigned GPU device
//
float Residue = INFINITY; //Residue value
float Residue_max = Smidgen;//Maximum residue
//
int row, col, cell; /* row, col, and index of current cell */
int Cells; /* total number of map cells */
float ignX, ignY; // ignition points
float CellWd;
//float CellHt; //!Not used yet!
//
FuelCatalogPtr catalog; /* fuel catalog handle */
float moisture[6]; /* fuel moisture content at current cell */
//
float particle_load; //*CUSTOM FUEL MODEL* - particle load
//
float *initialMap; //BEHAVE eliptical ignition map
size_t *fuelMap; /* ptr to fuel model map */
float *ignMap; /* ptr to ignition time map (minutes) */
float *ignMap_new; /* ptr to ignition time map (minutes) */
float *slpMap; /* ptr to slope map (rise/reach) */
float *aspMap; /* ptr to aspect map (degrees from north) */
float *wspdMap; /* ptr to wind speed map (ft/min) */
float *wdirMap; /* ptr to wind direction map (deg from north) */
float *m1Map; /* ptr to 1-hr dead fuel moisture map */
float *m10Map; /* ptr to 10-hr dead fuel moisture map */
float *m100Map; /* ptr to 100-hr dead fuel moisture map */
float *mherbMap; /* ptr to live herbaceous fuel moisture map */
float *mwoodMap; /* ptr to live stem fuel moisture map */
float *spread0Map;
float *spreadMaxMap;
float *azimuthMaxMap;
float *eccentricityMap;
float *phiEffWindMap;
//
float *ignMap_d;
float *ignMap_new_d;
float *spread0Map_d;
float *spreadMaxMap_d;
float *phiEffWindMap_d;
float *eccentricityMap_d;
float *azimuthMaxMap_d;
float *diff_d;
//
FILE *IN, *slope_file, *aspect_file;
char buffer[100]; //buffer to use when fgets skips lines
char ignFileName[40];
int n;
int verbosity; //level of verbosity of shell output
int init_elipse; //init ignMap to BEHAVE elipse for faster solution
//
clock_t start, end;
double time;
////////////////
//Read RunSet.in
IN = fopen("RunSet.in", "r");
//skips 22 text lines of RunSet.in
for (n = 0; n < 24; n++)
fgets(buffer, 100, IN);
fscanf(IN, "%f %f %d %d %d %f %f %f %f %f %f %f %f %f %f %d %s %d %d",
&mapW_m, &mapH_m, &Rows, &Cols, &Model, &WindSpd, &WindDir,
&M1, &M10, &M100, &Mherb, &Mwood, &particle_load, &ignX, &ignY, &device, &ignFileName, &verbosity, &init_elipse);
//Input Checks
if ( ignX > 1 || ignX < 0 || ignY > 1 || ignY < 0 )
{
printf("\nERROR: Runset.in - ignition point must be 0 <= ign(X,Y) <=1!\n");
return (0);
}
if ( Cols != Rows )
{
printf("\nERROR: Runset.in - Rows must be equal to Cols!\n");
return (0);
}
if ( mapW_m != mapH_m )
{
printf("\nERROR: Runset.in - Width must be equal to height!\n");
return (0);
}
if ( Cols%BLOCK_SIZE != 0 || Rows%BLOCK_SIZE != 0)
{
printf("\nERROR: Cols and Rows must be multiples of BLOCK_SIZE! (cuda related restriction)\n");
return (0);
}
////////////////
//set GPU device
hipSetDevice(device);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(Rows/BLOCK_SIZE, Cols/BLOCK_SIZE);
//////////////
//clublas init
printf("\n>>Initializing CUBLAS...");
if( hipblasInit() != HIPBLAS_STATUS_SUCCESS)
{
printf("\nERROR: CUBLAS initialization!\n");
return (1);
}
else
printf("Done.\n");
///////////////////////
//Allocate all the maps
Cells = Rows * Cols;
if ( (ignMap = (float *) calloc(Cells, sizeof(float))) == NULL
|| (ignMap_new = (float *) calloc(Cells, sizeof(float))) == NULL
|| (azimuthMaxMap = (float *) calloc(Cells, sizeof(float))) == NULL
|| (spread0Map = (float *) calloc(Cells, sizeof(float))) == NULL
|| (spreadMaxMap = (float *) calloc(Cells, sizeof(float))) == NULL
|| (eccentricityMap = (float *) calloc(Cells, sizeof(float))) == NULL
|| (phiEffWindMap = (float *) calloc(Cells, sizeof(float))) == NULL
|| (slpMap = (float *) calloc(Cells, sizeof(float))) == NULL
|| (aspMap = (float *) calloc(Cells, sizeof(float))) == NULL
|| (wspdMap = (float *) calloc(Cells, sizeof(float))) == NULL
|| (wdirMap = (float *) calloc(Cells, sizeof(float))) == NULL
|| (m1Map = (float *) calloc(Cells, sizeof(float))) == NULL
|| (m10Map = (float *) calloc(Cells, sizeof(float))) == NULL
|| (m100Map = (float *) calloc(Cells, sizeof(float))) == NULL
|| (mherbMap = (float *) calloc(Cells, sizeof(float))) == NULL
|| (mwoodMap = (float *) calloc(Cells, sizeof(float))) == NULL
|| (fuelMap = (size_t *) calloc(Cells, sizeof(size_t))) == NULL )
{
fprintf(stderr, "Unable to allocate maps with %d cols and %d rows.\n",
Cols, Rows);
return (1);
}
//Cuda maps
hipMalloc((void**)&spread0Map_d, Cells*sizeof(float));
hipMalloc((void**)&spreadMaxMap_d, Cells*sizeof(float));
hipMalloc((void**)&phiEffWindMap_d, Cells*sizeof(float));
hipMalloc((void**)&eccentricityMap_d, Cells*sizeof(float));
hipMalloc((void**)&azimuthMaxMap_d, Cells*sizeof(float));
hipMalloc((void**)&ignMap_d, Cells*sizeof(float));
hipMalloc((void**)&ignMap_new_d, Cells*sizeof(float));
hipMalloc((void**)&diff_d, Cells*sizeof(float));
////////////
//Map set up
mapW = MetersToFeet(mapW_m);
mapH = MetersToFeet(mapH_m);
CellWd = mapW/(Cols -1);
//CellHt = mapH/(Rows -1);
//slope and aspect file read
slope_file = fopen("slope.map","r");
aspect_file = fopen("aspect.map","r");
//If using grass file format for aspect and slope
if (grass == 1)
{
//fgets to skip header in aspect and slope files
for (n = 0; n < 6; n++)
{
fgets(buffer, 100, slope_file);
fgets(buffer, 100, aspect_file);
}
//data assignment to maps
for ( row = 0; row < Rows; row++ )
{
for ( col = 0; col < Cols; col ++)
{
cell = col + row*Cols;
fscanf(aspect_file, "%f", &asp_tmp);
fscanf(slope_file, "%f", &slp_tmp);
slpMap[cell] = slp_tmp/100; //Slope in firelib is a fraction
asp_tmp = (asp_tmp - 90 < 0) ? //while in Grass is percentage rise/reach.
asp_tmp - 90 + 360 : asp_tmp - 90 ; //Aspect in firelib is N=0 and clockwise
aspMap[cell] = 360 - asp_tmp; //while aspect in Grass is E=0 counter-clockwise
fuelMap[cell] = Model;
wspdMap[cell] = 88. * WindSpd; /* convert mph into ft/min */
wdirMap[cell] = WindDir;
m1Map[cell] = M1;
m10Map[cell] = M10;
m100Map[cell] = M100;
mherbMap[cell] = Mherb;
mwoodMap[cell] = Mwood;
ignMap[cell] = 500;
ignMap_new[cell] = 500;
}
}
PrintMap(aspMap,"aspectTest.map");
}
else
{
//data assignment to maps
for ( row = 0; row < Rows; row++ )
{
for ( col = 0; col < Cols; col ++)
{
cell = col + row*Cols;
fscanf(aspect_file, "%f", &aspMap[cell] );
fscanf(slope_file, "%f", &slpMap[cell] );
wspdMap[cell] = 88. * WindSpd; /* convert mph into ft/min */
wdirMap[cell] = WindDir;
m1Map[cell] = M1;
m10Map[cell] = M10;
m100Map[cell] = M100;
mherbMap[cell] = Mherb;
mwoodMap[cell] = Mwood;
fuelMap[cell] = Model;
//Aqui est a definicao do caso de frente de chama em V
//os mapas de ignio so editados de acordo com
//o caso experimental do Viegas
//parametros das rectas
float m_one = -tan( DegToRad(70));
float m_two = tan( DegToRad(70));
float b_one = MetersToFeet( 0.52) - m_one*MetersToFeet(2);
float b_two = MetersToFeet( 0.52) - m_two*MetersToFeet(2);
//definicao da fire line - fora do v (inclusive) tudo considerado ardido
//o V a linha de ignicao, por isso tem de ser iniciada a zero
if (( ((Rows - 1) - row) >= (m_one*col + b_one/CellWd - MetersToFeet( 0.02)/CellWd) )
&& ( ((Rows - 1) - row) >= (m_two*col + b_two/CellWd - MetersToFeet( 0.02)/CellWd) ) )
{
ignMap[cell] = 2;
ignMap_new[cell] = 2;
}
else
{
ignMap[cell] = 0;
ignMap_new[cell] = 0;
}
}
}
}
//ignition point - ignX and ignY is a percentage of the map height and width
//cell = Cols*ignX + Cols*Rows*ignY;
//ignMap[cell] = 0;
//ignMap_new[cell] = 0;
////////////////////////////////
//Create fuel catalog
//Create 13 + 0 (no fuel model) standard NFFL models and creates space for
//aditional custom model
printf ("\n>>Creating standard fire models...");
catalog = Fire_FuelCatalogCreateStandard("Standard", 14);
printf ("Done.\n");
//Create aditional custom model based on NFFL1
//Only the PARTICLE LOAD is customized at the moment
if ( Fire_FuelModelCreate (
catalog, //FuelCatalogData instance
14, //fuel model number
"CUSTOM", //Name
"Custom Fuel model", //longer description
0.197, //bed depth (ft)
Fuel_Mext(catalog, 1), //moisture of extinction (dl)
Fuel_SpreadAdjustment(catalog, 1), //spread adjustment factor (dl)
1) != FIRE_STATUS_OK ) //maximum number of particles
{
fprintf(stderr, "%s\n", FuelCat_Error(catalog));
Fire_FuelCatalogDestroy(catalog);
return (NULL);
}
//Add a particle to the custom model n 14
printf ("\n>>Creating custom fire model...");
start = clock();
if ( Fire_FuelParticleAdd (
catalog, // FuelCatalogData instance pointer
14, //Custom fuel model id
Fuel_Type(catalog,1,0),
particle_load, // Custom particle load (lbs/ft2)
1500.38, // surface-area-to-volume ratio (ft2/ft3)
Fuel_Density(catalog,1,0), //density (lbs/ft3)
Fuel_Heat(catalog,1,0), //heat of combustion (btus/lb)
Fuel_SiTotal(catalog,1,0), //total silica content (lb/lb)
Fuel_SiEffective(catalog,1,0))//effective silica content (lb/lb)
!= FIRE_STATUS_OK )
{
fprintf(stderr, "%s\n", FuelCat_Error(catalog));
Fire_FuelCatalogDestroy(catalog);
return (NULL);
}
else
{
end = clock();
time = ((double) (end - start))/CLOCKS_PER_SEC;
printf("Done with %lf seconds.\n",time);
}
/////////////////////////
//Print catalog structure
if (verbosity == 1) Print_CatalogStruct(catalog);
//////////////////////////////////////////////////////////////
//Preprocessing stage:Create sprea0 and spreadMax maps
//Initialize ignMap and ignMap_new to BEHAVE eliptical ign map
printf("\n>>Running preprocessor...");
start = clock();
for (cell = 0; cell < Cells; cell++)
{
Model = fuelMap[cell];
moisture[0] = m1Map[cell];
moisture[1] = m10Map[cell];
moisture[2] = m100Map[cell];
moisture[3] = m100Map[cell];
moisture[4] = mherbMap[cell];
moisture[5] = mwoodMap[cell];
Fire_SpreadNoWindNoSlope(catalog, Model, moisture);
Fire_SpreadWindSlopeMax(catalog, Model, wspdMap[cell],
wdirMap[cell], slpMap[cell], aspMap[cell]);
spread0Map[cell] = Fuel_Spread0(catalog,Model);
spreadMaxMap[cell] = Fuel_SpreadMax(catalog,Model);
azimuthMaxMap[cell] = Fuel_AzimuthMax(catalog,Model);
eccentricityMap[cell] = Fuel_Eccentricity(catalog,Model);
phiEffWindMap[cell] = Fuel_PhiEffWind(catalog,Model);
}
//Initialize BEHAVE Elipse and update ignMap and ignMap_new
if (init_elipse == 1)
{
cell = Cols*ignX + Cols*Rows*ignY; //elipse is created with ignition point values
initialMap = BEHAVEelipse( CellWd, Rows*ignY, Cols*ignX, spreadMaxMap[cell], eccentricityMap[cell], azimuthMaxMap[cell]);
for (cell = 0; cell < Cells; cell++)
ignMap[cell] = ignMap_new[cell] = initialMap[cell];
}
end = clock();
time = ((double) (end - start))/CLOCKS_PER_SEC;
printf("Done with %lf seconds.\n",time);
///////////////////////////////////
//Fire growth model iterative cycle
printf("\n>>Running FGM cycle...");
start = clock();
if ( FGM_cycle( ignMap, ignMap_d,
ignMap_new, ignMap_new_d,
spread0Map, spread0Map_d,
spreadMaxMap, spreadMaxMap_d,
phiEffWindMap, phiEffWindMap_d,
eccentricityMap, eccentricityMap_d,
azimuthMaxMap, azimuthMaxMap_d,
diff_d,
CellWd,
dimGrid,
dimBlock,
Residue,
Residue_max,
Cells) !=1)
{
printf("\nERROR: FGM cycle.\n");
}
else
{
end = clock();
time = ((double) (end - start))/CLOCKS_PER_SEC;
printf("Done with %lf seconds.\n",time);
}
//////////////////////////
//Mem copy of ignition map
hipMemcpy( ignMap, ignMap_d, Cells*sizeof(float), hipMemcpyDeviceToHost);
////////////////////
//Print Ignition Map
PrintMap(ignMap, ignFileName);
//////////////
//Close Cublas
printf ("\n>>Closing CUBLAS...");
if( hipblasShutdown() != HIPBLAS_STATUS_SUCCESS)
{
printf("\nERROR: CUBLAS Shutdown!\n");
return (1);
}
else
printf("Done.\n");
}
/////////
//The END
////////////////
//More Functions
////////////////
///////////////////////////////////////////////////////////////////////////////
// Exact Solution Scenario 1
//
// Spread rate in a given direction is function of de ROS of the Wind and Slope
// case , times a coefficient, function of the angle between the maximum spread
// rate (direction of the wind, etc) and the direction of the actual propagation
//
//R = RMax * F (azimuth, azimuthMax)
///////////////////////////////////////////////////////////////////////////////
float* BEHAVEelipse( float CellWd, int ignRow, int ignCol, float Rmax,
float Ecc, float azimuth_max)
{
float* elipseMap;
int row, col;
float Dist, DistH, DistW;
float azimuth; //cell direction to north
float dir; //angle between azimuth max and azimuth
float F; //Factor to be apllied to Rmax
float ignTime = 0;
float CellHt = CellWd; //CellHt is equal to cell width
//Code dos not handle yet rectangular domains
elipseMap = (float*)malloc(Rows*Cols*sizeof(float));
for (row = 0 ; row < Rows; row++)
{
for (col = 0; col < Cols; col++)
{
DistW = (col - ignCol) * CellWd;
DistH = (row - ignRow) * CellHt;
//Special Cases
if ((col - ignCol) == 0 && (row - ignRow) < 0)
azimuth = 0.;
else if ((col - ignCol) > 0 && (row - ignRow) == 0)
azimuth = 90.;
else if ((col - ignCol) == 0 && (row - ignRow) > 0)
azimuth = 180.;
else if ((col - ignCol) < 0 && (row - ignRow) == 0)
azimuth = 270.;
//1st Quadrant
else if ( (col - ignCol) > 0 && (row - ignRow) < 0 )
{
azimuth = fabs( atanf( DistW / DistH ) );
azimuth = RadToDeg(azimuth);
}
//2nd Quadrant
else if ( (col - ignCol) > 0 && (row - ignRow) > 0 )
{
azimuth = atanf( DistH / DistW );
azimuth = RadToDeg(azimuth) + 90.;
}
//3rd Quadrant
else if ( (col - ignCol) < 0 && (row - ignRow) > 0 )
{
azimuth = fabs(atanf( DistW / DistH ));
azimuth = RadToDeg(azimuth) + 180.;
}
//4th Quadrant
else if ( (col - ignCol) < 0 && (row - ignRow) < 0 )
{
azimuth = atanf( DistH / DistW );
azimuth = RadToDeg(azimuth) + 270.;
}
if ((dir = fabs( azimuth_max - azimuth )) > 180. )
dir = 360. - dir; // minimum distance between lines
dir = DegToRad(dir);
F = (1- Ecc) / (1 - Ecc*cosf(dir));
Dist = sqrt( DistH*DistH + DistW*DistW);
elipseMap[col + Cols*row] = ignTime + Dist / Rmax/F;
}
}
return(elipseMap);
}
////////////
//Print Maps
int PrintMap ( float* map, char* fileName )
{
FILE *fPtr;
int cell, col, row;
if ( (fPtr = fopen(fileName, "w")) == NULL )
{
printf("Unable to open output map \"%s\".\n", fileName);
return (FIRE_STATUS_ERROR);
}
for ( row = 0; row < Rows; row++ )
{
for ( cell=row*Cols, col=0; col<Cols; col++, cell++ )
{
fprintf(fPtr, " %5.2f ", (map[cell]==INFINITY) ? 000.00 : map[cell]);
}
fprintf(fPtr, "\n");
}
fclose(fPtr);
return (FIRE_STATUS_OK);
}
/////////////////////////
//Print catalog structure
int Print_CatalogStruct(FuelCatalogPtr catalog)
{
int now_model, now_particle;
long partdescription;
printf("\n>>There is a total of %2u Models in the catalog %s:\n\n", catalog->maxModels, catalog->name);
printf(" Model ID | name | description | mext | MaxPartc.\n");
printf(" ----------------------------------------------------------------------\n");
for (now_model = 0; now_model < catalog->maxModels; now_model++)
{
printf(" %2u | %6s | %30s | %5.3f | %1u\n",catalog->modelPtr[now_model]->modelId,
catalog->modelPtr[now_model]->name,
catalog->modelPtr[now_model]->desc,
catalog->modelPtr[now_model]->mext,
catalog->modelPtr[now_model]->maxParticles);
}
printf("\n\nThere is a total of 39 number of particles in the catalog:\n\n");
for (now_model = 1; now_model < catalog->maxModels; now_model++)
{
printf(">>For model %2u: %s\n ", catalog->modelPtr[now_model]->modelId,
catalog->modelPtr[now_model]->desc);
printf(" Prt. ID | Type | load | S/V | dens | Silica | Si_eff | Area | Sigma \n");
printf(" ------------------------------------------------------------------------------------ \n");
for (now_particle = 0; now_particle < catalog->modelPtr[now_model]->maxParticles ; now_particle++)
{
if (catalog->modelPtr[now_model]->partPtr[now_particle]->type == 1)
{
partdescription = (long)"Dead particle";
}
else if (catalog->modelPtr[now_model]->partPtr[now_particle]->type == 2)
{
partdescription = (long)"Live Herb ";
}
else
{
partdescription = (long)"Live Wood ";
}
printf(" %2d | %s | %5.3f | %6.1f | %3.1f | %5.4f | %5.3f | %6.3f | %5.3f\n",
now_particle,
partdescription,
catalog->modelPtr[now_model]->partPtr[now_particle]->load,
catalog->modelPtr[now_model]->partPtr[now_particle]->savr,
catalog->modelPtr[now_model]->partPtr[now_particle]->dens,
catalog->modelPtr[now_model]->partPtr[now_particle]->stot,
catalog->modelPtr[now_model]->partPtr[now_particle]->seff,
catalog->modelPtr[now_model]->partPtr[now_particle]->area,
catalog->modelPtr[now_model]->partPtr[now_particle]->sigma);
}
putchar('\n');
}
return(0);
}
|
94311d61cf91a0bf3efa548bec3b07efa09f0ec7.cu
|
////////////////////////////////////////////////////////////////////////
// *cudaFGM*
// A fire growth model (FGM) that runs on NVIDIA GPUs.
//
//firelib is used to create most of the fire properties. Main firelib
//funcions used are "Fire_FuelCatalogCreateStandard" to create the fire
//catalog, "Fire_SpreadNoWindNoSlope" and "Fire_SpreadWindSlopeMax" are
//used to compute fire ellipse properties.
//
//The kernel "FGM" is launched in each iteration of the fire growth model.
//
//Main output is the ignition map "ignMap_###.dat", where ### is a user
//defined tag
//
//Grass aspect and slope files can be read with "./cudaFGM 1". Grass slope files must
//be written in "percentage".
//
//Inputs provided in "RunSet.in": Map width (meters)
// Map height
// Number of rows
// Number of cols
// Fuel Model (NFFL models) or 14-custom
// Wind speed
// Wind Direction
// Moisture (M1, M10, M100, Mherb, Mwood)
// Custom Particle load (Not relevant if using
// one of theNFFL 13)
// ignition point (X) as a %(0-1) of map width
// ignition point (Y) as a %(0-1) of map height
// GPU device
// ignition map file name (no spaces)
// Verbosity (1 - more 0 - less)
// init ignMap to BEHAVE elipse for faster solution (1 - Yes, 0 - No)
//
//To do list: - Maps must be read from outside for fuel, initial ignMap, etc
//
//Change log: 12/03/2012 Initial V fire shape is hardcoded
// 17/02/2012 cudaFGM reads Grass aspect and slope file formats.
//
#include "fireLib_float.h"
#include "header.h"
#include <cublas.h>
#include <time.h>
//////////////////
//Function headers
//
int PrintMap ( float*, char* );
//
float* BEHAVEelipse( float, int, int, float, float, float);
//
int Print_CatalogStruct(FuelCatalogPtr);
//
int FGM_cycle( float*, float*,
float*, float*,
float*, float*,
float*, float*,
float*, float*,
float*, float*,
float*, float*,
float*,
float,
dim3,
dim3,
float,
float,
int);
//////////////////
//Global variables
int Rows; //Map dimensions are global variables
int Cols; //
float mapW_m, mapH_m; //map width height (meters)
float mapW, mapH; //map width height (feet)
size_t Model;
float WindSpd;
float WindDir;
float M1;
float M10;
float M100;
float Mherb;
float Mwood;
//////
//Main
int main ( int argc, char *argv[] )
{
int grass = atoi(argv[1]); //Grass files are used if grass==1
float slp_tmp, asp_tmp; //slope and aspect temporary values
unsigned int device; //Assigned GPU device
//
float Residue = INFINITY; //Residue value
float Residue_max = Smidgen;//Maximum residue
//
int row, col, cell; /* row, col, and index of current cell */
int Cells; /* total number of map cells */
float ignX, ignY; // ignition points
float CellWd;
//float CellHt; //!Not used yet!
//
FuelCatalogPtr catalog; /* fuel catalog handle */
float moisture[6]; /* fuel moisture content at current cell */
//
float particle_load; //*CUSTOM FUEL MODEL* - particle load
//
float *initialMap; //BEHAVE eliptical ignition map
size_t *fuelMap; /* ptr to fuel model map */
float *ignMap; /* ptr to ignition time map (minutes) */
float *ignMap_new; /* ptr to ignition time map (minutes) */
float *slpMap; /* ptr to slope map (rise/reach) */
float *aspMap; /* ptr to aspect map (degrees from north) */
float *wspdMap; /* ptr to wind speed map (ft/min) */
float *wdirMap; /* ptr to wind direction map (deg from north) */
float *m1Map; /* ptr to 1-hr dead fuel moisture map */
float *m10Map; /* ptr to 10-hr dead fuel moisture map */
float *m100Map; /* ptr to 100-hr dead fuel moisture map */
float *mherbMap; /* ptr to live herbaceous fuel moisture map */
float *mwoodMap; /* ptr to live stem fuel moisture map */
float *spread0Map;
float *spreadMaxMap;
float *azimuthMaxMap;
float *eccentricityMap;
float *phiEffWindMap;
//
float *ignMap_d;
float *ignMap_new_d;
float *spread0Map_d;
float *spreadMaxMap_d;
float *phiEffWindMap_d;
float *eccentricityMap_d;
float *azimuthMaxMap_d;
float *diff_d;
//
FILE *IN, *slope_file, *aspect_file;
char buffer[100]; //buffer to use when fgets skips lines
char ignFileName[40];
int n;
int verbosity; //level of verbosity of shell output
int init_elipse; //init ignMap to BEHAVE elipse for faster solution
//
clock_t start, end;
double time;
////////////////
//Read RunSet.in
IN = fopen("RunSet.in", "r");
//skips 22 text lines of RunSet.in
for (n = 0; n < 24; n++)
fgets(buffer, 100, IN);
fscanf(IN, "%f %f %d %d %d %f %f %f %f %f %f %f %f %f %f %d %s %d %d",
&mapW_m, &mapH_m, &Rows, &Cols, &Model, &WindSpd, &WindDir,
&M1, &M10, &M100, &Mherb, &Mwood, &particle_load, &ignX, &ignY, &device, &ignFileName, &verbosity, &init_elipse);
//Input Checks
if ( ignX > 1 || ignX < 0 || ignY > 1 || ignY < 0 )
{
printf("\nERROR: Runset.in - ignition point must be 0 <= ign(X,Y) <=1!\n");
return (0);
}
if ( Cols != Rows )
{
printf("\nERROR: Runset.in - Rows must be equal to Cols!\n");
return (0);
}
if ( mapW_m != mapH_m )
{
printf("\nERROR: Runset.in - Width must be equal to height!\n");
return (0);
}
if ( Cols%BLOCK_SIZE != 0 || Rows%BLOCK_SIZE != 0)
{
printf("\nERROR: Cols and Rows must be multiples of BLOCK_SIZE! (cuda related restriction)\n");
return (0);
}
////////////////
//set GPU device
cudaSetDevice(device);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(Rows/BLOCK_SIZE, Cols/BLOCK_SIZE);
//////////////
//clublas init
printf("\n>>Initializing CUBLAS...");
if( cublasInit() != CUBLAS_STATUS_SUCCESS)
{
printf("\nERROR: CUBLAS initialization!\n");
return (1);
}
else
printf("Done.\n");
///////////////////////
//Allocate all the maps
Cells = Rows * Cols;
if ( (ignMap = (float *) calloc(Cells, sizeof(float))) == NULL
|| (ignMap_new = (float *) calloc(Cells, sizeof(float))) == NULL
|| (azimuthMaxMap = (float *) calloc(Cells, sizeof(float))) == NULL
|| (spread0Map = (float *) calloc(Cells, sizeof(float))) == NULL
|| (spreadMaxMap = (float *) calloc(Cells, sizeof(float))) == NULL
|| (eccentricityMap = (float *) calloc(Cells, sizeof(float))) == NULL
|| (phiEffWindMap = (float *) calloc(Cells, sizeof(float))) == NULL
|| (slpMap = (float *) calloc(Cells, sizeof(float))) == NULL
|| (aspMap = (float *) calloc(Cells, sizeof(float))) == NULL
|| (wspdMap = (float *) calloc(Cells, sizeof(float))) == NULL
|| (wdirMap = (float *) calloc(Cells, sizeof(float))) == NULL
|| (m1Map = (float *) calloc(Cells, sizeof(float))) == NULL
|| (m10Map = (float *) calloc(Cells, sizeof(float))) == NULL
|| (m100Map = (float *) calloc(Cells, sizeof(float))) == NULL
|| (mherbMap = (float *) calloc(Cells, sizeof(float))) == NULL
|| (mwoodMap = (float *) calloc(Cells, sizeof(float))) == NULL
|| (fuelMap = (size_t *) calloc(Cells, sizeof(size_t))) == NULL )
{
fprintf(stderr, "Unable to allocate maps with %d cols and %d rows.\n",
Cols, Rows);
return (1);
}
//Cuda maps
cudaMalloc((void**)&spread0Map_d, Cells*sizeof(float));
cudaMalloc((void**)&spreadMaxMap_d, Cells*sizeof(float));
cudaMalloc((void**)&phiEffWindMap_d, Cells*sizeof(float));
cudaMalloc((void**)&eccentricityMap_d, Cells*sizeof(float));
cudaMalloc((void**)&azimuthMaxMap_d, Cells*sizeof(float));
cudaMalloc((void**)&ignMap_d, Cells*sizeof(float));
cudaMalloc((void**)&ignMap_new_d, Cells*sizeof(float));
cudaMalloc((void**)&diff_d, Cells*sizeof(float));
////////////
//Map set up
mapW = MetersToFeet(mapW_m);
mapH = MetersToFeet(mapH_m);
CellWd = mapW/(Cols -1);
//CellHt = mapH/(Rows -1);
//slope and aspect file read
slope_file = fopen("slope.map","r");
aspect_file = fopen("aspect.map","r");
//If using grass file format for aspect and slope
if (grass == 1)
{
//fgets to skip header in aspect and slope files
for (n = 0; n < 6; n++)
{
fgets(buffer, 100, slope_file);
fgets(buffer, 100, aspect_file);
}
//data assignment to maps
for ( row = 0; row < Rows; row++ )
{
for ( col = 0; col < Cols; col ++)
{
cell = col + row*Cols;
fscanf(aspect_file, "%f", &asp_tmp);
fscanf(slope_file, "%f", &slp_tmp);
slpMap[cell] = slp_tmp/100; //Slope in firelib is a fraction
asp_tmp = (asp_tmp - 90 < 0) ? //while in Grass is percentage rise/reach.
asp_tmp - 90 + 360 : asp_tmp - 90 ; //Aspect in firelib is N=0 and clockwise
aspMap[cell] = 360 - asp_tmp; //while aspect in Grass is E=0 counter-clockwise
fuelMap[cell] = Model;
wspdMap[cell] = 88. * WindSpd; /* convert mph into ft/min */
wdirMap[cell] = WindDir;
m1Map[cell] = M1;
m10Map[cell] = M10;
m100Map[cell] = M100;
mherbMap[cell] = Mherb;
mwoodMap[cell] = Mwood;
ignMap[cell] = 500;
ignMap_new[cell] = 500;
}
}
PrintMap(aspMap,"aspectTest.map");
}
else
{
//data assignment to maps
for ( row = 0; row < Rows; row++ )
{
for ( col = 0; col < Cols; col ++)
{
cell = col + row*Cols;
fscanf(aspect_file, "%f", &aspMap[cell] );
fscanf(slope_file, "%f", &slpMap[cell] );
wspdMap[cell] = 88. * WindSpd; /* convert mph into ft/min */
wdirMap[cell] = WindDir;
m1Map[cell] = M1;
m10Map[cell] = M10;
m100Map[cell] = M100;
mherbMap[cell] = Mherb;
mwoodMap[cell] = Mwood;
fuelMap[cell] = Model;
//Aqui está a definicao do caso de frente de chama em V
//os mapas de ignição são editados de acordo com
//o caso experimental do Viegas
//parametros das rectas
float m_one = -tan( DegToRad(70));
float m_two = tan( DegToRad(70));
float b_one = MetersToFeet( 0.52) - m_one*MetersToFeet(2);
float b_two = MetersToFeet( 0.52) - m_two*MetersToFeet(2);
//definicao da fire line - fora do v (inclusive) é tudo considerado ardido
//o V é a linha de ignicao, por isso tem de ser iniciada a zero
if (( ((Rows - 1) - row) >= (m_one*col + b_one/CellWd - MetersToFeet( 0.02)/CellWd) )
&& ( ((Rows - 1) - row) >= (m_two*col + b_two/CellWd - MetersToFeet( 0.02)/CellWd) ) )
{
ignMap[cell] = 2;
ignMap_new[cell] = 2;
}
else
{
ignMap[cell] = 0;
ignMap_new[cell] = 0;
}
}
}
}
//ignition point - ignX and ignY is a percentage of the map height and width
//cell = Cols*ignX + Cols*Rows*ignY;
//ignMap[cell] = 0;
//ignMap_new[cell] = 0;
////////////////////////////////
//Create fuel catalog
//Create 13 + 0 (no fuel model) standard NFFL models and creates space for
//aditional custom model
printf ("\n>>Creating standard fire models...");
catalog = Fire_FuelCatalogCreateStandard("Standard", 14);
printf ("Done.\n");
//Create aditional custom model based on NFFL1
//Only the PARTICLE LOAD is customized at the moment
if ( Fire_FuelModelCreate (
catalog, //FuelCatalogData instance
14, //fuel model number
"CUSTOM", //Name
"Custom Fuel model", //longer description
0.197, //bed depth (ft)
Fuel_Mext(catalog, 1), //moisture of extinction (dl)
Fuel_SpreadAdjustment(catalog, 1), //spread adjustment factor (dl)
1) != FIRE_STATUS_OK ) //maximum number of particles
{
fprintf(stderr, "%s\n", FuelCat_Error(catalog));
Fire_FuelCatalogDestroy(catalog);
return (NULL);
}
//Add a particle to the custom model nº 14
printf ("\n>>Creating custom fire model...");
start = clock();
if ( Fire_FuelParticleAdd (
catalog, // FuelCatalogData instance pointer
14, //Custom fuel model id
Fuel_Type(catalog,1,0),
particle_load, // Custom particle load (lbs/ft2)
1500.38, // surface-area-to-volume ratio (ft2/ft3)
Fuel_Density(catalog,1,0), //density (lbs/ft3)
Fuel_Heat(catalog,1,0), //heat of combustion (btus/lb)
Fuel_SiTotal(catalog,1,0), //total silica content (lb/lb)
Fuel_SiEffective(catalog,1,0))//effective silica content (lb/lb)
!= FIRE_STATUS_OK )
{
fprintf(stderr, "%s\n", FuelCat_Error(catalog));
Fire_FuelCatalogDestroy(catalog);
return (NULL);
}
else
{
end = clock();
time = ((double) (end - start))/CLOCKS_PER_SEC;
printf("Done with %lf seconds.\n",time);
}
/////////////////////////
//Print catalog structure
if (verbosity == 1) Print_CatalogStruct(catalog);
//////////////////////////////////////////////////////////////
//Preprocessing stage:Create sprea0 and spreadMax maps
//Initialize ignMap and ignMap_new to BEHAVE eliptical ign map
printf("\n>>Running preprocessor...");
start = clock();
for (cell = 0; cell < Cells; cell++)
{
Model = fuelMap[cell];
moisture[0] = m1Map[cell];
moisture[1] = m10Map[cell];
moisture[2] = m100Map[cell];
moisture[3] = m100Map[cell];
moisture[4] = mherbMap[cell];
moisture[5] = mwoodMap[cell];
Fire_SpreadNoWindNoSlope(catalog, Model, moisture);
Fire_SpreadWindSlopeMax(catalog, Model, wspdMap[cell],
wdirMap[cell], slpMap[cell], aspMap[cell]);
spread0Map[cell] = Fuel_Spread0(catalog,Model);
spreadMaxMap[cell] = Fuel_SpreadMax(catalog,Model);
azimuthMaxMap[cell] = Fuel_AzimuthMax(catalog,Model);
eccentricityMap[cell] = Fuel_Eccentricity(catalog,Model);
phiEffWindMap[cell] = Fuel_PhiEffWind(catalog,Model);
}
//Initialize BEHAVE Elipse and update ignMap and ignMap_new
if (init_elipse == 1)
{
cell = Cols*ignX + Cols*Rows*ignY; //elipse is created with ignition point values
initialMap = BEHAVEelipse( CellWd, Rows*ignY, Cols*ignX, spreadMaxMap[cell], eccentricityMap[cell], azimuthMaxMap[cell]);
for (cell = 0; cell < Cells; cell++)
ignMap[cell] = ignMap_new[cell] = initialMap[cell];
}
end = clock();
time = ((double) (end - start))/CLOCKS_PER_SEC;
printf("Done with %lf seconds.\n",time);
///////////////////////////////////
//Fire growth model iterative cycle
printf("\n>>Running FGM cycle...");
start = clock();
if ( FGM_cycle( ignMap, ignMap_d,
ignMap_new, ignMap_new_d,
spread0Map, spread0Map_d,
spreadMaxMap, spreadMaxMap_d,
phiEffWindMap, phiEffWindMap_d,
eccentricityMap, eccentricityMap_d,
azimuthMaxMap, azimuthMaxMap_d,
diff_d,
CellWd,
dimGrid,
dimBlock,
Residue,
Residue_max,
Cells) !=1)
{
printf("\nERROR: FGM cycle.\n");
}
else
{
end = clock();
time = ((double) (end - start))/CLOCKS_PER_SEC;
printf("Done with %lf seconds.\n",time);
}
//////////////////////////
//Mem copy of ignition map
cudaMemcpy( ignMap, ignMap_d, Cells*sizeof(float), cudaMemcpyDeviceToHost);
////////////////////
//Print Ignition Map
PrintMap(ignMap, ignFileName);
//////////////
//Close Cublas
printf ("\n>>Closing CUBLAS...");
if( cublasShutdown() != CUBLAS_STATUS_SUCCESS)
{
printf("\nERROR: CUBLAS Shutdown!\n");
return (1);
}
else
printf("Done.\n");
}
/////////
//The END
////////////////
//More Functions
////////////////
///////////////////////////////////////////////////////////////////////////////
// Exact Solution Scenario 1
//
// Spread rate in a given direction is function of de ROS of the Wind and Slope
// case , times a coefficient, function of the angle between the maximum spread
// rate (direction of the wind, etc) and the direction of the actual propagation
//
//R = RMax * F (azimuth, azimuthMax)
///////////////////////////////////////////////////////////////////////////////
float* BEHAVEelipse( float CellWd, int ignRow, int ignCol, float Rmax,
float Ecc, float azimuth_max)
{
float* elipseMap;
int row, col;
float Dist, DistH, DistW;
float azimuth; //cell direction to north
float dir; //angle between azimuth max and azimuth
float F; //Factor to be apllied to Rmax
float ignTime = 0;
float CellHt = CellWd; //CellHt is equal to cell width
//Code dos not handle yet rectangular domains
elipseMap = (float*)malloc(Rows*Cols*sizeof(float));
for (row = 0 ; row < Rows; row++)
{
for (col = 0; col < Cols; col++)
{
DistW = (col - ignCol) * CellWd;
DistH = (row - ignRow) * CellHt;
//Special Cases
if ((col - ignCol) == 0 && (row - ignRow) < 0)
azimuth = 0.;
else if ((col - ignCol) > 0 && (row - ignRow) == 0)
azimuth = 90.;
else if ((col - ignCol) == 0 && (row - ignRow) > 0)
azimuth = 180.;
else if ((col - ignCol) < 0 && (row - ignRow) == 0)
azimuth = 270.;
//1st Quadrant
else if ( (col - ignCol) > 0 && (row - ignRow) < 0 )
{
azimuth = fabs( atanf( DistW / DistH ) );
azimuth = RadToDeg(azimuth);
}
//2nd Quadrant
else if ( (col - ignCol) > 0 && (row - ignRow) > 0 )
{
azimuth = atanf( DistH / DistW );
azimuth = RadToDeg(azimuth) + 90.;
}
//3rd Quadrant
else if ( (col - ignCol) < 0 && (row - ignRow) > 0 )
{
azimuth = fabs(atanf( DistW / DistH ));
azimuth = RadToDeg(azimuth) + 180.;
}
//4th Quadrant
else if ( (col - ignCol) < 0 && (row - ignRow) < 0 )
{
azimuth = atanf( DistH / DistW );
azimuth = RadToDeg(azimuth) + 270.;
}
if ((dir = fabs( azimuth_max - azimuth )) > 180. )
dir = 360. - dir; // minimum distance between lines
dir = DegToRad(dir);
F = (1- Ecc) / (1 - Ecc*cosf(dir));
Dist = sqrt( DistH*DistH + DistW*DistW);
elipseMap[col + Cols*row] = ignTime + Dist / Rmax/F;
}
}
return(elipseMap);
}
////////////
//Print Maps
int PrintMap ( float* map, char* fileName )
{
FILE *fPtr;
int cell, col, row;
if ( (fPtr = fopen(fileName, "w")) == NULL )
{
printf("Unable to open output map \"%s\".\n", fileName);
return (FIRE_STATUS_ERROR);
}
for ( row = 0; row < Rows; row++ )
{
for ( cell=row*Cols, col=0; col<Cols; col++, cell++ )
{
fprintf(fPtr, " %5.2f ", (map[cell]==INFINITY) ? 000.00 : map[cell]);
}
fprintf(fPtr, "\n");
}
fclose(fPtr);
return (FIRE_STATUS_OK);
}
/////////////////////////
//Print catalog structure
int Print_CatalogStruct(FuelCatalogPtr catalog)
{
int now_model, now_particle;
long partdescription;
printf("\n>>There is a total of %2u Models in the catalog %s:\n\n", catalog->maxModels, catalog->name);
printf(" Model ID | name | description | mext | MaxPartc.\n");
printf(" ----------------------------------------------------------------------\n");
for (now_model = 0; now_model < catalog->maxModels; now_model++)
{
printf(" %2u | %6s | %30s | %5.3f | %1u\n",catalog->modelPtr[now_model]->modelId,
catalog->modelPtr[now_model]->name,
catalog->modelPtr[now_model]->desc,
catalog->modelPtr[now_model]->mext,
catalog->modelPtr[now_model]->maxParticles);
}
printf("\n\nThere is a total of 39 number of particles in the catalog:\n\n");
for (now_model = 1; now_model < catalog->maxModels; now_model++)
{
printf(">>For model %2u: %s\n ", catalog->modelPtr[now_model]->modelId,
catalog->modelPtr[now_model]->desc);
printf(" Prt. ID | Type | load | S/V | dens | Silica | Si_eff | Area | Sigma \n");
printf(" ------------------------------------------------------------------------------------ \n");
for (now_particle = 0; now_particle < catalog->modelPtr[now_model]->maxParticles ; now_particle++)
{
if (catalog->modelPtr[now_model]->partPtr[now_particle]->type == 1)
{
partdescription = (long)"Dead particle";
}
else if (catalog->modelPtr[now_model]->partPtr[now_particle]->type == 2)
{
partdescription = (long)"Live Herb ";
}
else
{
partdescription = (long)"Live Wood ";
}
printf(" %2d | %s | %5.3f | %6.1f | %3.1f | %5.4f | %5.3f | %6.3f | %5.3f\n",
now_particle,
partdescription,
catalog->modelPtr[now_model]->partPtr[now_particle]->load,
catalog->modelPtr[now_model]->partPtr[now_particle]->savr,
catalog->modelPtr[now_model]->partPtr[now_particle]->dens,
catalog->modelPtr[now_model]->partPtr[now_particle]->stot,
catalog->modelPtr[now_model]->partPtr[now_particle]->seff,
catalog->modelPtr[now_model]->partPtr[now_particle]->area,
catalog->modelPtr[now_model]->partPtr[now_particle]->sigma);
}
putchar('\n');
}
return(0);
}
|
star2d2r-512-8-128_host.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <stdio.h>
#include "star2d2r-512-8-128_kernel.hu"
#define BENCH_DIM 2
#define BENCH_FPP 17
#define BENCH_RAD 2
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 5 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
hipError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != hipSuccess) { \
fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == hipSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(hipGetLastError()); \
} while(0)
double *dev_A;
cudaCheckReturn(hipMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double)));
{
cudaCheckReturn(hipMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), hipMemcpyHostToDevice));
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 8;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 480;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
hipLaunchKernelGGL(( kernel0_8), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 1)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 2)
{
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 3)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 4)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 5)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 6)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 7)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
}
else if (__c0Len % __side0LenMax)
{
if (__c0Len % __side0LenMax == 1)
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 2)
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 3)
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 4)
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 5)
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 6)
{
const AN5D_TYPE __side0Len = 6;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 488;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_6), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 7)
{
const AN5D_TYPE __side0Len = 7;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 484;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_7), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(hipMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), hipMemcpyDeviceToHost));
}
cudaCheckReturn(hipFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
A[(t+1)%2][i][j] =
0.09371f * A[t%2][i-2][j] + 0.09374f * A[t%2][i-1][j] + 0.09376f * A[t%2][i][j-2] +
0.09372f * A[t%2][i][j-1] + 0.25001f * A[t%2][i][j] + 0.09377f * A[t%2][i][j+1] +
0.09373f * A[t%2][i][j+2] + 0.09375f * A[t%2][i+1][j] + 0.09378f * A[t%2][i+2][j];
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
|
star2d2r-512-8-128_host.cu
|
#include <assert.h>
#include <stdio.h>
#include "star2d2r-512-8-128_kernel.hu"
#define BENCH_DIM 2
#define BENCH_FPP 17
#define BENCH_RAD 2
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 5 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
cudaError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != cudaSuccess) { \
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == cudaSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(cudaGetLastError()); \
} while(0)
double *dev_A;
cudaCheckReturn(cudaMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double)));
{
cudaCheckReturn(cudaMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), cudaMemcpyHostToDevice));
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 8;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 480;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
kernel0_8<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 1)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 2)
{
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 3)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 4)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 5)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 6)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 7)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
}
else if (__c0Len % __side0LenMax)
{
if (__c0Len % __side0LenMax == 1)
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 2)
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 3)
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 4)
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 5)
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 6)
{
const AN5D_TYPE __side0Len = 6;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 488;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_6<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 7)
{
const AN5D_TYPE __side0Len = 7;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 484;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_7<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(cudaMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), cudaMemcpyDeviceToHost));
}
cudaCheckReturn(cudaFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
A[(t+1)%2][i][j] =
0.09371f * A[t%2][i-2][j] + 0.09374f * A[t%2][i-1][j] + 0.09376f * A[t%2][i][j-2] +
0.09372f * A[t%2][i][j-1] + 0.25001f * A[t%2][i][j] + 0.09377f * A[t%2][i][j+1] +
0.09373f * A[t%2][i][j+2] + 0.09375f * A[t%2][i+1][j] + 0.09378f * A[t%2][i+2][j];
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
|
a1b5a567a442fab61bb01497ca8bac638678cce7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) OpenMMLab. All rights reserved.
// Modified from
// https://github.com/csuhan/s2anet/blob/master/mmdet/ops/orn/src/cuda/ActiveRotatingFilter_cuda.cu
#include "active_rotated_filter_cuda_kernel.cuh"
#include "pytorch_cuda_helper.hpp"
void ActiveRotatedFilterForwardCUDAKernelLauncher(const Tensor input,
const Tensor indices,
Tensor output) {
int num_output_planes = input.size(0);
int num_input_planes = input.size(1);
int num_orientations = input.size(2);
int kH = input.size(3);
int kW = input.size(4);
int num_rotations = indices.size(3);
int nEntry = num_orientations * kH * kW;
int output_size = input.numel();
at::hip::HIPGuardMasqueradingAsCUDA device_guard(input.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "active_rotated_filter_forward_cuda_kernel", [&] {
hipLaunchKernelGGL(( active_rotated_filter_forward_cuda_kernel<scalar_t>)
, dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, stream,
output_size, input.data_ptr<scalar_t>(),
indices.data_ptr<int>(), num_input_planes, num_output_planes,
num_orientations, num_rotations, nEntry,
output.data_ptr<scalar_t>());
});
AT_CUDA_CHECK(hipGetLastError());
}
void ActiveRotatedFilterBackwardCUDAKernelLauncher(const Tensor grad_out,
const Tensor indices,
Tensor grad_in) {
int num_orientations = indices.size(0);
int kH = indices.size(1);
int kW = indices.size(2);
int num_rotations = indices.size(3);
int num_output_planes = grad_out.size(0) / num_rotations;
int num_input_planes = grad_out.size(1) / num_orientations;
int nEntry = num_orientations * kH * kW;
int output_size = grad_in.numel();
at::hip::HIPGuardMasqueradingAsCUDA device_guard(indices.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad_out.scalar_type(), "active_rotated_filter_backward_cuda_kernel",
[&] {
hipLaunchKernelGGL(( active_rotated_filter_backward_cuda_kernel<scalar_t>)
, dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, stream,
output_size, grad_out.data_ptr<scalar_t>(),
indices.data_ptr<int>(), num_input_planes, num_output_planes,
num_orientations, num_rotations, nEntry,
grad_in.data_ptr<scalar_t>());
});
AT_CUDA_CHECK(hipGetLastError());
}
|
a1b5a567a442fab61bb01497ca8bac638678cce7.cu
|
// Copyright (c) OpenMMLab. All rights reserved.
// Modified from
// https://github.com/csuhan/s2anet/blob/master/mmdet/ops/orn/src/cuda/ActiveRotatingFilter_cuda.cu
#include "active_rotated_filter_cuda_kernel.cuh"
#include "pytorch_cuda_helper.hpp"
void ActiveRotatedFilterForwardCUDAKernelLauncher(const Tensor input,
const Tensor indices,
Tensor output) {
int num_output_planes = input.size(0);
int num_input_planes = input.size(1);
int num_orientations = input.size(2);
int kH = input.size(3);
int kW = input.size(4);
int num_rotations = indices.size(3);
int nEntry = num_orientations * kH * kW;
int output_size = input.numel();
at::cuda::CUDAGuard device_guard(input.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "active_rotated_filter_forward_cuda_kernel", [&] {
active_rotated_filter_forward_cuda_kernel<scalar_t>
<<<GET_BLOCKS(output_size), THREADS_PER_BLOCK, 0, stream>>>(
output_size, input.data_ptr<scalar_t>(),
indices.data_ptr<int>(), num_input_planes, num_output_planes,
num_orientations, num_rotations, nEntry,
output.data_ptr<scalar_t>());
});
AT_CUDA_CHECK(cudaGetLastError());
}
void ActiveRotatedFilterBackwardCUDAKernelLauncher(const Tensor grad_out,
const Tensor indices,
Tensor grad_in) {
int num_orientations = indices.size(0);
int kH = indices.size(1);
int kW = indices.size(2);
int num_rotations = indices.size(3);
int num_output_planes = grad_out.size(0) / num_rotations;
int num_input_planes = grad_out.size(1) / num_orientations;
int nEntry = num_orientations * kH * kW;
int output_size = grad_in.numel();
at::cuda::CUDAGuard device_guard(indices.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad_out.scalar_type(), "active_rotated_filter_backward_cuda_kernel",
[&] {
active_rotated_filter_backward_cuda_kernel<scalar_t>
<<<GET_BLOCKS(output_size), THREADS_PER_BLOCK, 0, stream>>>(
output_size, grad_out.data_ptr<scalar_t>(),
indices.data_ptr<int>(), num_input_planes, num_output_planes,
num_orientations, num_rotations, nEntry,
grad_in.data_ptr<scalar_t>());
});
AT_CUDA_CHECK(cudaGetLastError());
}
|
53d3bd586b1233fd6cdd2b2d5d3cd319d6038c3d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/gpu/device/saturate_cast.hpp"
#include "opencv2/gpu/device/transform.hpp"
#include "opencv2/gpu/device/functional.hpp"
namespace cv { namespace gpu { namespace device
{
void writeScalar(const uchar*);
void writeScalar(const schar*);
void writeScalar(const ushort*);
void writeScalar(const short int*);
void writeScalar(const int*);
void writeScalar(const float*);
void writeScalar(const double*);
void convert_gpu(PtrStepSzb, int, PtrStepSzb, int, double, double, hipStream_t);
}}}
namespace cv { namespace gpu { namespace device
{
template <typename T> struct shift_and_sizeof;
template <> struct shift_and_sizeof<signed char> { enum { shift = 0 }; };
template <> struct shift_and_sizeof<unsigned char> { enum { shift = 0 }; };
template <> struct shift_and_sizeof<short> { enum { shift = 1 }; };
template <> struct shift_and_sizeof<unsigned short> { enum { shift = 1 }; };
template <> struct shift_and_sizeof<int> { enum { shift = 2 }; };
template <> struct shift_and_sizeof<float> { enum { shift = 2 }; };
template <> struct shift_and_sizeof<double> { enum { shift = 3 }; };
///////////////////////////////////////////////////////////////////////////
////////////////////////////////// CopyTo /////////////////////////////////
///////////////////////////////////////////////////////////////////////////
template <typename T> void copyToWithMask(PtrStepSzb src, PtrStepSzb dst, int cn, PtrStepSzb mask, bool colorMask, hipStream_t stream)
{
if (colorMask)
cv::gpu::device::transform((PtrStepSz<T>)src, (PtrStepSz<T>)dst, identity<T>(), SingleMask(mask), stream);
else
cv::gpu::device::transform((PtrStepSz<T>)src, (PtrStepSz<T>)dst, identity<T>(), SingleMaskChannels(mask, cn), stream);
}
void copyToWithMask_gpu(PtrStepSzb src, PtrStepSzb dst, size_t elemSize1, int cn, PtrStepSzb mask, bool colorMask, hipStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, PtrStepSzb dst, int cn, PtrStepSzb mask, bool colorMask, hipStream_t stream);
static func_t tab[] =
{
0,
copyToWithMask<unsigned char>,
copyToWithMask<unsigned short>,
0,
copyToWithMask<int>,
0,
0,
0,
copyToWithMask<double>
};
tab[elemSize1](src, dst, cn, mask, colorMask, stream);
}
///////////////////////////////////////////////////////////////////////////
////////////////////////////////// SetTo //////////////////////////////////
///////////////////////////////////////////////////////////////////////////
__constant__ uchar scalar_8u[4];
__constant__ schar scalar_8s[4];
__constant__ ushort scalar_16u[4];
__constant__ short scalar_16s[4];
__constant__ int scalar_32s[4];
__constant__ float scalar_32f[4];
__constant__ double scalar_64f[4];
template <typename T> __device__ __forceinline__ T readScalar(int i);
template <> __device__ __forceinline__ uchar readScalar<uchar>(int i) {return scalar_8u[i];}
template <> __device__ __forceinline__ schar readScalar<schar>(int i) {return scalar_8s[i];}
template <> __device__ __forceinline__ ushort readScalar<ushort>(int i) {return scalar_16u[i];}
template <> __device__ __forceinline__ short readScalar<short>(int i) {return scalar_16s[i];}
template <> __device__ __forceinline__ int readScalar<int>(int i) {return scalar_32s[i];}
template <> __device__ __forceinline__ float readScalar<float>(int i) {return scalar_32f[i];}
template <> __device__ __forceinline__ double readScalar<double>(int i) {return scalar_64f[i];}
void writeScalar(const uchar* vals)
{
cudaSafeCall( hipMemcpyToSymbol(scalar_8u, vals, sizeof(uchar) * 4) );
}
void writeScalar(const schar* vals)
{
cudaSafeCall( hipMemcpyToSymbol(scalar_8s, vals, sizeof(schar) * 4) );
}
void writeScalar(const ushort* vals)
{
cudaSafeCall( hipMemcpyToSymbol(scalar_16u, vals, sizeof(ushort) * 4) );
}
void writeScalar(const short* vals)
{
cudaSafeCall( hipMemcpyToSymbol(scalar_16s, vals, sizeof(short) * 4) );
}
void writeScalar(const int* vals)
{
cudaSafeCall( hipMemcpyToSymbol(scalar_32s, vals, sizeof(int) * 4) );
}
void writeScalar(const float* vals)
{
cudaSafeCall( hipMemcpyToSymbol(scalar_32f, vals, sizeof(float) * 4) );
}
void writeScalar(const double* vals)
{
cudaSafeCall( hipMemcpyToSymbol(scalar_64f, vals, sizeof(double) * 4) );
}
template<typename T>
__global__ void set_to_without_mask(T* mat, int cols, int rows, size_t step, int channels)
{
size_t x = blockIdx.x * blockDim.x + threadIdx.x;
size_t y = blockIdx.y * blockDim.y + threadIdx.y;
if ((x < cols * channels ) && (y < rows))
{
size_t idx = y * ( step >> shift_and_sizeof<T>::shift ) + x;
mat[idx] = readScalar<T>(x % channels);
}
}
template<typename T>
__global__ void set_to_with_mask(T* mat, const uchar* mask, int cols, int rows, size_t step, int channels, size_t step_mask)
{
size_t x = blockIdx.x * blockDim.x + threadIdx.x;
size_t y = blockIdx.y * blockDim.y + threadIdx.y;
if ((x < cols * channels ) && (y < rows))
if (mask[y * step_mask + x / channels] != 0)
{
size_t idx = y * ( step >> shift_and_sizeof<T>::shift ) + x;
mat[idx] = readScalar<T>(x % channels);
}
}
template <typename T>
void set_to_gpu(PtrStepSzb mat, const T* scalar, PtrStepSzb mask, int channels, hipStream_t stream)
{
writeScalar(scalar);
dim3 threadsPerBlock(32, 8, 1);
dim3 numBlocks (mat.cols * channels / threadsPerBlock.x + 1, mat.rows / threadsPerBlock.y + 1, 1);
hipLaunchKernelGGL(( set_to_with_mask<T>), dim3(numBlocks), dim3(threadsPerBlock), 0, stream, (T*)mat.data, (uchar*)mask.data, mat.cols, mat.rows, mat.step, channels, mask.step);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall ( hipDeviceSynchronize() );
}
template void set_to_gpu<uchar >(PtrStepSzb mat, const uchar* scalar, PtrStepSzb mask, int channels, hipStream_t stream);
template void set_to_gpu<schar >(PtrStepSzb mat, const schar* scalar, PtrStepSzb mask, int channels, hipStream_t stream);
template void set_to_gpu<ushort>(PtrStepSzb mat, const ushort* scalar, PtrStepSzb mask, int channels, hipStream_t stream);
template void set_to_gpu<short >(PtrStepSzb mat, const short* scalar, PtrStepSzb mask, int channels, hipStream_t stream);
template void set_to_gpu<int >(PtrStepSzb mat, const int* scalar, PtrStepSzb mask, int channels, hipStream_t stream);
template void set_to_gpu<float >(PtrStepSzb mat, const float* scalar, PtrStepSzb mask, int channels, hipStream_t stream);
template void set_to_gpu<double>(PtrStepSzb mat, const double* scalar, PtrStepSzb mask, int channels, hipStream_t stream);
template <typename T>
void set_to_gpu(PtrStepSzb mat, const T* scalar, int channels, hipStream_t stream)
{
writeScalar(scalar);
dim3 threadsPerBlock(32, 8, 1);
dim3 numBlocks (mat.cols * channels / threadsPerBlock.x + 1, mat.rows / threadsPerBlock.y + 1, 1);
hipLaunchKernelGGL(( set_to_without_mask<T>), dim3(numBlocks), dim3(threadsPerBlock), 0, stream, (T*)mat.data, mat.cols, mat.rows, mat.step, channels);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall ( hipDeviceSynchronize() );
}
template void set_to_gpu<uchar >(PtrStepSzb mat, const uchar* scalar, int channels, hipStream_t stream);
template void set_to_gpu<schar >(PtrStepSzb mat, const schar* scalar, int channels, hipStream_t stream);
template void set_to_gpu<ushort>(PtrStepSzb mat, const ushort* scalar, int channels, hipStream_t stream);
template void set_to_gpu<short >(PtrStepSzb mat, const short* scalar, int channels, hipStream_t stream);
template void set_to_gpu<int >(PtrStepSzb mat, const int* scalar, int channels, hipStream_t stream);
template void set_to_gpu<float >(PtrStepSzb mat, const float* scalar, int channels, hipStream_t stream);
template void set_to_gpu<double>(PtrStepSzb mat, const double* scalar, int channels, hipStream_t stream);
///////////////////////////////////////////////////////////////////////////
//////////////////////////////// ConvertTo ////////////////////////////////
///////////////////////////////////////////////////////////////////////////
template <typename T, typename D> struct Convertor : unary_function<T, D>
{
Convertor(double alpha_, double beta_) : alpha(alpha_), beta(beta_) {}
__device__ __forceinline__ D operator()(const T& src) const
{
return saturate_cast<D>(alpha * src + beta);
}
double alpha, beta;
};
namespace detail
{
template <size_t src_size, size_t dst_size, typename F> struct ConvertTraitsDispatcher : DefaultTransformFunctorTraits<F>
{
};
template <typename F> struct ConvertTraitsDispatcher<1, 1, F> : DefaultTransformFunctorTraits<F>
{
enum { smart_shift = 8 };
};
template <typename F> struct ConvertTraitsDispatcher<1, 2, F> : DefaultTransformFunctorTraits<F>
{
enum { smart_shift = 4 };
};
template <typename F> struct ConvertTraitsDispatcher<1, 4, F> : DefaultTransformFunctorTraits<F>
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <typename F> struct ConvertTraitsDispatcher<2, 2, F> : DefaultTransformFunctorTraits<F>
{
enum { smart_shift = 4 };
};
template <typename F> struct ConvertTraitsDispatcher<2, 4, F> : DefaultTransformFunctorTraits<F>
{
enum { smart_shift = 2 };
};
template <typename F> struct ConvertTraitsDispatcher<4, 2, F> : DefaultTransformFunctorTraits<F>
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <typename F> struct ConvertTraitsDispatcher<4, 4, F> : DefaultTransformFunctorTraits<F>
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 2 };
};
template <typename F> struct ConvertTraits : ConvertTraitsDispatcher<sizeof(typename F::argument_type), sizeof(typename F::result_type), F>
{
};
}
template <typename T, typename D> struct TransformFunctorTraits< Convertor<T, D> > : detail::ConvertTraits< Convertor<T, D> >
{
};
template<typename T, typename D>
void cvt_(PtrStepSzb src, PtrStepSzb dst, double alpha, double beta, hipStream_t stream)
{
cudaSafeCall( hipSetDoubleForDevice(&alpha) );
cudaSafeCall( hipSetDoubleForDevice(&beta) );
Convertor<T, D> op(alpha, beta);
cv::gpu::device::transform((PtrStepSz<T>)src, (PtrStepSz<D>)dst, op, WithOutMask(), stream);
}
#if defined __clang__
# pragma clang diagnostic push
# pragma clang diagnostic ignored "-Wmissing-declarations"
#endif
void convert_gpu(PtrStepSzb src, int sdepth, PtrStepSzb dst, int ddepth, double alpha, double beta, hipStream_t stream)
{
typedef void (*caller_t)(PtrStepSzb src, PtrStepSzb dst, double alpha, double beta, hipStream_t stream);
static const caller_t tab[8][8] =
{
{cvt_<uchar, uchar>, cvt_<uchar, schar>, cvt_<uchar, ushort>, cvt_<uchar, short>,
cvt_<uchar, int>, cvt_<uchar, float>, cvt_<uchar, double>, 0},
{cvt_<schar, uchar>, cvt_<schar, schar>, cvt_<schar, ushort>, cvt_<schar, short>,
cvt_<schar, int>, cvt_<schar, float>, cvt_<schar, double>, 0},
{cvt_<ushort, uchar>, cvt_<ushort, schar>, cvt_<ushort, ushort>, cvt_<ushort, short>,
cvt_<ushort, int>, cvt_<ushort, float>, cvt_<ushort, double>, 0},
{cvt_<short, uchar>, cvt_<short, schar>, cvt_<short, ushort>, cvt_<short, short>,
cvt_<short, int>, cvt_<short, float>, cvt_<short, double>, 0},
{cvt_<int, uchar>, cvt_<int, schar>, cvt_<int, ushort>,
cvt_<int, short>, cvt_<int, int>, cvt_<int, float>, cvt_<int, double>, 0},
{cvt_<float, uchar>, cvt_<float, schar>, cvt_<float, ushort>,
cvt_<float, short>, cvt_<float, int>, cvt_<float, float>, cvt_<float, double>, 0},
{cvt_<double, uchar>, cvt_<double, schar>, cvt_<double, ushort>,
cvt_<double, short>, cvt_<double, int>, cvt_<double, float>, cvt_<double, double>, 0},
{0,0,0,0,0,0,0,0}
};
caller_t func = tab[sdepth][ddepth];
if (!func)
cv::gpu::error("Unsupported convert operation", __FILE__, __LINE__, "convert_gpu");
func(src, dst, alpha, beta, stream);
}
#if defined __clang__
# pragma clang diagnostic pop
#endif
}}} // namespace cv { namespace gpu { namespace device
#endif /* CUDA_DISABLER */
|
53d3bd586b1233fd6cdd2b2d5d3cd319d6038c3d.cu
|
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/gpu/device/saturate_cast.hpp"
#include "opencv2/gpu/device/transform.hpp"
#include "opencv2/gpu/device/functional.hpp"
namespace cv { namespace gpu { namespace device
{
void writeScalar(const uchar*);
void writeScalar(const schar*);
void writeScalar(const ushort*);
void writeScalar(const short int*);
void writeScalar(const int*);
void writeScalar(const float*);
void writeScalar(const double*);
void convert_gpu(PtrStepSzb, int, PtrStepSzb, int, double, double, cudaStream_t);
}}}
namespace cv { namespace gpu { namespace device
{
template <typename T> struct shift_and_sizeof;
template <> struct shift_and_sizeof<signed char> { enum { shift = 0 }; };
template <> struct shift_and_sizeof<unsigned char> { enum { shift = 0 }; };
template <> struct shift_and_sizeof<short> { enum { shift = 1 }; };
template <> struct shift_and_sizeof<unsigned short> { enum { shift = 1 }; };
template <> struct shift_and_sizeof<int> { enum { shift = 2 }; };
template <> struct shift_and_sizeof<float> { enum { shift = 2 }; };
template <> struct shift_and_sizeof<double> { enum { shift = 3 }; };
///////////////////////////////////////////////////////////////////////////
////////////////////////////////// CopyTo /////////////////////////////////
///////////////////////////////////////////////////////////////////////////
template <typename T> void copyToWithMask(PtrStepSzb src, PtrStepSzb dst, int cn, PtrStepSzb mask, bool colorMask, cudaStream_t stream)
{
if (colorMask)
cv::gpu::device::transform((PtrStepSz<T>)src, (PtrStepSz<T>)dst, identity<T>(), SingleMask(mask), stream);
else
cv::gpu::device::transform((PtrStepSz<T>)src, (PtrStepSz<T>)dst, identity<T>(), SingleMaskChannels(mask, cn), stream);
}
void copyToWithMask_gpu(PtrStepSzb src, PtrStepSzb dst, size_t elemSize1, int cn, PtrStepSzb mask, bool colorMask, cudaStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, PtrStepSzb dst, int cn, PtrStepSzb mask, bool colorMask, cudaStream_t stream);
static func_t tab[] =
{
0,
copyToWithMask<unsigned char>,
copyToWithMask<unsigned short>,
0,
copyToWithMask<int>,
0,
0,
0,
copyToWithMask<double>
};
tab[elemSize1](src, dst, cn, mask, colorMask, stream);
}
///////////////////////////////////////////////////////////////////////////
////////////////////////////////// SetTo //////////////////////////////////
///////////////////////////////////////////////////////////////////////////
__constant__ uchar scalar_8u[4];
__constant__ schar scalar_8s[4];
__constant__ ushort scalar_16u[4];
__constant__ short scalar_16s[4];
__constant__ int scalar_32s[4];
__constant__ float scalar_32f[4];
__constant__ double scalar_64f[4];
template <typename T> __device__ __forceinline__ T readScalar(int i);
template <> __device__ __forceinline__ uchar readScalar<uchar>(int i) {return scalar_8u[i];}
template <> __device__ __forceinline__ schar readScalar<schar>(int i) {return scalar_8s[i];}
template <> __device__ __forceinline__ ushort readScalar<ushort>(int i) {return scalar_16u[i];}
template <> __device__ __forceinline__ short readScalar<short>(int i) {return scalar_16s[i];}
template <> __device__ __forceinline__ int readScalar<int>(int i) {return scalar_32s[i];}
template <> __device__ __forceinline__ float readScalar<float>(int i) {return scalar_32f[i];}
template <> __device__ __forceinline__ double readScalar<double>(int i) {return scalar_64f[i];}
void writeScalar(const uchar* vals)
{
cudaSafeCall( cudaMemcpyToSymbol(scalar_8u, vals, sizeof(uchar) * 4) );
}
void writeScalar(const schar* vals)
{
cudaSafeCall( cudaMemcpyToSymbol(scalar_8s, vals, sizeof(schar) * 4) );
}
void writeScalar(const ushort* vals)
{
cudaSafeCall( cudaMemcpyToSymbol(scalar_16u, vals, sizeof(ushort) * 4) );
}
void writeScalar(const short* vals)
{
cudaSafeCall( cudaMemcpyToSymbol(scalar_16s, vals, sizeof(short) * 4) );
}
void writeScalar(const int* vals)
{
cudaSafeCall( cudaMemcpyToSymbol(scalar_32s, vals, sizeof(int) * 4) );
}
void writeScalar(const float* vals)
{
cudaSafeCall( cudaMemcpyToSymbol(scalar_32f, vals, sizeof(float) * 4) );
}
void writeScalar(const double* vals)
{
cudaSafeCall( cudaMemcpyToSymbol(scalar_64f, vals, sizeof(double) * 4) );
}
template<typename T>
__global__ void set_to_without_mask(T* mat, int cols, int rows, size_t step, int channels)
{
size_t x = blockIdx.x * blockDim.x + threadIdx.x;
size_t y = blockIdx.y * blockDim.y + threadIdx.y;
if ((x < cols * channels ) && (y < rows))
{
size_t idx = y * ( step >> shift_and_sizeof<T>::shift ) + x;
mat[idx] = readScalar<T>(x % channels);
}
}
template<typename T>
__global__ void set_to_with_mask(T* mat, const uchar* mask, int cols, int rows, size_t step, int channels, size_t step_mask)
{
size_t x = blockIdx.x * blockDim.x + threadIdx.x;
size_t y = blockIdx.y * blockDim.y + threadIdx.y;
if ((x < cols * channels ) && (y < rows))
if (mask[y * step_mask + x / channels] != 0)
{
size_t idx = y * ( step >> shift_and_sizeof<T>::shift ) + x;
mat[idx] = readScalar<T>(x % channels);
}
}
template <typename T>
void set_to_gpu(PtrStepSzb mat, const T* scalar, PtrStepSzb mask, int channels, cudaStream_t stream)
{
writeScalar(scalar);
dim3 threadsPerBlock(32, 8, 1);
dim3 numBlocks (mat.cols * channels / threadsPerBlock.x + 1, mat.rows / threadsPerBlock.y + 1, 1);
set_to_with_mask<T><<<numBlocks, threadsPerBlock, 0, stream>>>((T*)mat.data, (uchar*)mask.data, mat.cols, mat.rows, mat.step, channels, mask.step);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall ( cudaDeviceSynchronize() );
}
template void set_to_gpu<uchar >(PtrStepSzb mat, const uchar* scalar, PtrStepSzb mask, int channels, cudaStream_t stream);
template void set_to_gpu<schar >(PtrStepSzb mat, const schar* scalar, PtrStepSzb mask, int channels, cudaStream_t stream);
template void set_to_gpu<ushort>(PtrStepSzb mat, const ushort* scalar, PtrStepSzb mask, int channels, cudaStream_t stream);
template void set_to_gpu<short >(PtrStepSzb mat, const short* scalar, PtrStepSzb mask, int channels, cudaStream_t stream);
template void set_to_gpu<int >(PtrStepSzb mat, const int* scalar, PtrStepSzb mask, int channels, cudaStream_t stream);
template void set_to_gpu<float >(PtrStepSzb mat, const float* scalar, PtrStepSzb mask, int channels, cudaStream_t stream);
template void set_to_gpu<double>(PtrStepSzb mat, const double* scalar, PtrStepSzb mask, int channels, cudaStream_t stream);
template <typename T>
void set_to_gpu(PtrStepSzb mat, const T* scalar, int channels, cudaStream_t stream)
{
writeScalar(scalar);
dim3 threadsPerBlock(32, 8, 1);
dim3 numBlocks (mat.cols * channels / threadsPerBlock.x + 1, mat.rows / threadsPerBlock.y + 1, 1);
set_to_without_mask<T><<<numBlocks, threadsPerBlock, 0, stream>>>((T*)mat.data, mat.cols, mat.rows, mat.step, channels);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall ( cudaDeviceSynchronize() );
}
template void set_to_gpu<uchar >(PtrStepSzb mat, const uchar* scalar, int channels, cudaStream_t stream);
template void set_to_gpu<schar >(PtrStepSzb mat, const schar* scalar, int channels, cudaStream_t stream);
template void set_to_gpu<ushort>(PtrStepSzb mat, const ushort* scalar, int channels, cudaStream_t stream);
template void set_to_gpu<short >(PtrStepSzb mat, const short* scalar, int channels, cudaStream_t stream);
template void set_to_gpu<int >(PtrStepSzb mat, const int* scalar, int channels, cudaStream_t stream);
template void set_to_gpu<float >(PtrStepSzb mat, const float* scalar, int channels, cudaStream_t stream);
template void set_to_gpu<double>(PtrStepSzb mat, const double* scalar, int channels, cudaStream_t stream);
///////////////////////////////////////////////////////////////////////////
//////////////////////////////// ConvertTo ////////////////////////////////
///////////////////////////////////////////////////////////////////////////
template <typename T, typename D> struct Convertor : unary_function<T, D>
{
Convertor(double alpha_, double beta_) : alpha(alpha_), beta(beta_) {}
__device__ __forceinline__ D operator()(const T& src) const
{
return saturate_cast<D>(alpha * src + beta);
}
double alpha, beta;
};
namespace detail
{
template <size_t src_size, size_t dst_size, typename F> struct ConvertTraitsDispatcher : DefaultTransformFunctorTraits<F>
{
};
template <typename F> struct ConvertTraitsDispatcher<1, 1, F> : DefaultTransformFunctorTraits<F>
{
enum { smart_shift = 8 };
};
template <typename F> struct ConvertTraitsDispatcher<1, 2, F> : DefaultTransformFunctorTraits<F>
{
enum { smart_shift = 4 };
};
template <typename F> struct ConvertTraitsDispatcher<1, 4, F> : DefaultTransformFunctorTraits<F>
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <typename F> struct ConvertTraitsDispatcher<2, 2, F> : DefaultTransformFunctorTraits<F>
{
enum { smart_shift = 4 };
};
template <typename F> struct ConvertTraitsDispatcher<2, 4, F> : DefaultTransformFunctorTraits<F>
{
enum { smart_shift = 2 };
};
template <typename F> struct ConvertTraitsDispatcher<4, 2, F> : DefaultTransformFunctorTraits<F>
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <typename F> struct ConvertTraitsDispatcher<4, 4, F> : DefaultTransformFunctorTraits<F>
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 2 };
};
template <typename F> struct ConvertTraits : ConvertTraitsDispatcher<sizeof(typename F::argument_type), sizeof(typename F::result_type), F>
{
};
}
template <typename T, typename D> struct TransformFunctorTraits< Convertor<T, D> > : detail::ConvertTraits< Convertor<T, D> >
{
};
template<typename T, typename D>
void cvt_(PtrStepSzb src, PtrStepSzb dst, double alpha, double beta, cudaStream_t stream)
{
cudaSafeCall( cudaSetDoubleForDevice(&alpha) );
cudaSafeCall( cudaSetDoubleForDevice(&beta) );
Convertor<T, D> op(alpha, beta);
cv::gpu::device::transform((PtrStepSz<T>)src, (PtrStepSz<D>)dst, op, WithOutMask(), stream);
}
#if defined __clang__
# pragma clang diagnostic push
# pragma clang diagnostic ignored "-Wmissing-declarations"
#endif
void convert_gpu(PtrStepSzb src, int sdepth, PtrStepSzb dst, int ddepth, double alpha, double beta, cudaStream_t stream)
{
typedef void (*caller_t)(PtrStepSzb src, PtrStepSzb dst, double alpha, double beta, cudaStream_t stream);
static const caller_t tab[8][8] =
{
{cvt_<uchar, uchar>, cvt_<uchar, schar>, cvt_<uchar, ushort>, cvt_<uchar, short>,
cvt_<uchar, int>, cvt_<uchar, float>, cvt_<uchar, double>, 0},
{cvt_<schar, uchar>, cvt_<schar, schar>, cvt_<schar, ushort>, cvt_<schar, short>,
cvt_<schar, int>, cvt_<schar, float>, cvt_<schar, double>, 0},
{cvt_<ushort, uchar>, cvt_<ushort, schar>, cvt_<ushort, ushort>, cvt_<ushort, short>,
cvt_<ushort, int>, cvt_<ushort, float>, cvt_<ushort, double>, 0},
{cvt_<short, uchar>, cvt_<short, schar>, cvt_<short, ushort>, cvt_<short, short>,
cvt_<short, int>, cvt_<short, float>, cvt_<short, double>, 0},
{cvt_<int, uchar>, cvt_<int, schar>, cvt_<int, ushort>,
cvt_<int, short>, cvt_<int, int>, cvt_<int, float>, cvt_<int, double>, 0},
{cvt_<float, uchar>, cvt_<float, schar>, cvt_<float, ushort>,
cvt_<float, short>, cvt_<float, int>, cvt_<float, float>, cvt_<float, double>, 0},
{cvt_<double, uchar>, cvt_<double, schar>, cvt_<double, ushort>,
cvt_<double, short>, cvt_<double, int>, cvt_<double, float>, cvt_<double, double>, 0},
{0,0,0,0,0,0,0,0}
};
caller_t func = tab[sdepth][ddepth];
if (!func)
cv::gpu::error("Unsupported convert operation", __FILE__, __LINE__, "convert_gpu");
func(src, dst, alpha, beta, stream);
}
#if defined __clang__
# pragma clang diagnostic pop
#endif
}}} // namespace cv { namespace gpu { namespace device
#endif /* CUDA_DISABLER */
|
bed4d2e4cc8459e23e5b53f450eb006fe7b20fcf.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef USE_CUDNN
#include <vector>
<<<<<<< HEAD
#include "caffe/layers/cudnn_conv_layer.hpp"
=======
#include "caffe/filler.hpp"
#include "caffe/layer.hpp"
#include "caffe/util/im2col.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
>>>>>>> caffe-yolo/master
namespace caffe {
__global__ void sync_conv_groups() { }
template <typename Dtype>
void CuDNNConvolutionLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const Dtype* weight = this->blobs_[0]->gpu_data();
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* top_data = top[i]->mutable_gpu_data();
// Forward through cuDNN in parallel over groups.
for (int g = 0; g < this->group_; g++) {
// Filters.
CUDNN_CHECK(cudnnConvolutionForward(handle_[g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
filter_desc_, weight + this->weight_offset_ * g,
conv_descs_[i],
fwd_algo_[i], workspace[g], workspace_fwd_sizes_[i],
cudnn::dataType<Dtype>::zero,
top_descs_[i], top_data + top_offset_ * g));
// Bias.
if (this->bias_term_) {
const Dtype* bias_data = this->blobs_[1]->gpu_data();
<<<<<<< HEAD
CUDNN_CHECK(cudnnAddTensor(handle_[g],
=======
CUDNN_CHECK(cudnnAddTensor(handle_[g], CUDNN_ADD_SAME_C,
>>>>>>> caffe-yolo/master
cudnn::dataType<Dtype>::one,
bias_desc_, bias_data + bias_offset_ * g,
cudnn::dataType<Dtype>::one,
top_descs_[i], top_data + top_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sync_conv_groups), dim3(1), dim3(1), 0, 0, );
}
}
template <typename Dtype>
void CuDNNConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* weight = NULL;
Dtype* weight_diff = NULL;
if (this->param_propagate_down_[0]) {
weight = this->blobs_[0]->gpu_data();
weight_diff = this->blobs_[0]->mutable_gpu_diff();
<<<<<<< HEAD
=======
caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), weight_diff);
>>>>>>> caffe-yolo/master
}
Dtype* bias_diff = NULL;
if (this->bias_term_ && this->param_propagate_down_[1]) {
bias_diff = this->blobs_[1]->mutable_gpu_diff();
<<<<<<< HEAD
=======
caffe_gpu_set(this->blobs_[1]->count(), Dtype(0), bias_diff);
>>>>>>> caffe-yolo/master
}
for (int i = 0; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
// Backward through cuDNN in parallel over groups and gradients.
for (int g = 0; g < this->group_; g++) {
// Gradient w.r.t. bias.
if (this->bias_term_ && this->param_propagate_down_[1]) {
CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0*this->group_ + g],
cudnn::dataType<Dtype>::one,
top_descs_[i], top_diff + top_offset_ * g,
cudnn::dataType<Dtype>::one,
bias_desc_, bias_diff + bias_offset_ * g));
}
// Gradient w.r.t. weights.
if (this->param_propagate_down_[0]) {
const Dtype* bottom_data = bottom[i]->gpu_data();
<<<<<<< HEAD
CUDNN_CHECK(cudnnConvolutionBackwardFilter(
=======
CUDNN_CHECK(cudnnConvolutionBackwardFilter_v3(
>>>>>>> caffe-yolo/master
handle_[1*this->group_ + g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
bwd_filter_algo_[i], workspace[1*this->group_ + g],
workspace_bwd_filter_sizes_[i],
cudnn::dataType<Dtype>::one,
filter_desc_, weight_diff + this->weight_offset_ * g));
}
// Gradient w.r.t. bottom data.
if (propagate_down[i]) {
if (weight == NULL) {
weight = this->blobs_[0]->gpu_data();
}
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
<<<<<<< HEAD
CUDNN_CHECK(cudnnConvolutionBackwardData(
=======
CUDNN_CHECK(cudnnConvolutionBackwardData_v3(
>>>>>>> caffe-yolo/master
handle_[2*this->group_ + g],
cudnn::dataType<Dtype>::one,
filter_desc_, weight + this->weight_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
bwd_data_algo_[i], workspace[2*this->group_ + g],
workspace_bwd_data_sizes_[i],
cudnn::dataType<Dtype>::zero,
bottom_descs_[i], bottom_diff + bottom_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sync_conv_groups), dim3(1), dim3(1), 0, 0, );
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CuDNNConvolutionLayer);
} // namespace caffe
#endif
|
bed4d2e4cc8459e23e5b53f450eb006fe7b20fcf.cu
|
#ifdef USE_CUDNN
#include <vector>
<<<<<<< HEAD
#include "caffe/layers/cudnn_conv_layer.hpp"
=======
#include "caffe/filler.hpp"
#include "caffe/layer.hpp"
#include "caffe/util/im2col.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
>>>>>>> caffe-yolo/master
namespace caffe {
__global__ void sync_conv_groups() { }
template <typename Dtype>
void CuDNNConvolutionLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const Dtype* weight = this->blobs_[0]->gpu_data();
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* top_data = top[i]->mutable_gpu_data();
// Forward through cuDNN in parallel over groups.
for (int g = 0; g < this->group_; g++) {
// Filters.
CUDNN_CHECK(cudnnConvolutionForward(handle_[g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
filter_desc_, weight + this->weight_offset_ * g,
conv_descs_[i],
fwd_algo_[i], workspace[g], workspace_fwd_sizes_[i],
cudnn::dataType<Dtype>::zero,
top_descs_[i], top_data + top_offset_ * g));
// Bias.
if (this->bias_term_) {
const Dtype* bias_data = this->blobs_[1]->gpu_data();
<<<<<<< HEAD
CUDNN_CHECK(cudnnAddTensor(handle_[g],
=======
CUDNN_CHECK(cudnnAddTensor(handle_[g], CUDNN_ADD_SAME_C,
>>>>>>> caffe-yolo/master
cudnn::dataType<Dtype>::one,
bias_desc_, bias_data + bias_offset_ * g,
cudnn::dataType<Dtype>::one,
top_descs_[i], top_data + top_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
sync_conv_groups<<<1, 1>>>();
}
}
template <typename Dtype>
void CuDNNConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* weight = NULL;
Dtype* weight_diff = NULL;
if (this->param_propagate_down_[0]) {
weight = this->blobs_[0]->gpu_data();
weight_diff = this->blobs_[0]->mutable_gpu_diff();
<<<<<<< HEAD
=======
caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), weight_diff);
>>>>>>> caffe-yolo/master
}
Dtype* bias_diff = NULL;
if (this->bias_term_ && this->param_propagate_down_[1]) {
bias_diff = this->blobs_[1]->mutable_gpu_diff();
<<<<<<< HEAD
=======
caffe_gpu_set(this->blobs_[1]->count(), Dtype(0), bias_diff);
>>>>>>> caffe-yolo/master
}
for (int i = 0; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
// Backward through cuDNN in parallel over groups and gradients.
for (int g = 0; g < this->group_; g++) {
// Gradient w.r.t. bias.
if (this->bias_term_ && this->param_propagate_down_[1]) {
CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0*this->group_ + g],
cudnn::dataType<Dtype>::one,
top_descs_[i], top_diff + top_offset_ * g,
cudnn::dataType<Dtype>::one,
bias_desc_, bias_diff + bias_offset_ * g));
}
// Gradient w.r.t. weights.
if (this->param_propagate_down_[0]) {
const Dtype* bottom_data = bottom[i]->gpu_data();
<<<<<<< HEAD
CUDNN_CHECK(cudnnConvolutionBackwardFilter(
=======
CUDNN_CHECK(cudnnConvolutionBackwardFilter_v3(
>>>>>>> caffe-yolo/master
handle_[1*this->group_ + g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
bwd_filter_algo_[i], workspace[1*this->group_ + g],
workspace_bwd_filter_sizes_[i],
cudnn::dataType<Dtype>::one,
filter_desc_, weight_diff + this->weight_offset_ * g));
}
// Gradient w.r.t. bottom data.
if (propagate_down[i]) {
if (weight == NULL) {
weight = this->blobs_[0]->gpu_data();
}
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
<<<<<<< HEAD
CUDNN_CHECK(cudnnConvolutionBackwardData(
=======
CUDNN_CHECK(cudnnConvolutionBackwardData_v3(
>>>>>>> caffe-yolo/master
handle_[2*this->group_ + g],
cudnn::dataType<Dtype>::one,
filter_desc_, weight + this->weight_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
bwd_data_algo_[i], workspace[2*this->group_ + g],
workspace_bwd_data_sizes_[i],
cudnn::dataType<Dtype>::zero,
bottom_descs_[i], bottom_diff + bottom_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
sync_conv_groups<<<1, 1>>>();
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CuDNNConvolutionLayer);
} // namespace caffe
#endif
|
028027fee91aaec19ff3247f0b997136b4d22d12.hip
|
// !!! This is a file automatically generated by hipify!!!
//
// auto-generated by op2.m on 18-Apr-2012 15:00:24
//
// header
#include "op_lib_cpp.h"
#include "op_cuda_rt_support.h"
#include "op_cuda_reduction.h"
// global constants
#ifndef MAX_CONST_SIZE
#define MAX_CONST_SIZE 128
#endif
__constant__ double gam;
__constant__ double gm1;
__constant__ double cfl;
__constant__ double eps;
__constant__ double mach;
__constant__ double alpha;
__constant__ double qinf[4];
void op_decl_const_char(int dim, char const *type,
int size, char *dat, char const *name){
cutilSafeCall(hipMemcpyToSymbol(name, dat, dim*size));
}
// user kernel files
#include "save_soln_kernel.cu"
#include "adt_calc_kernel.hip"
#include "res_calc_kernel.cu"
#include "bres_calc_kernel.cu"
#include "update_kernel.hip"
|
028027fee91aaec19ff3247f0b997136b4d22d12.cu
|
//
// auto-generated by op2.m on 18-Apr-2012 15:00:24
//
// header
#include "op_lib_cpp.h"
#include "op_cuda_rt_support.h"
#include "op_cuda_reduction.h"
// global constants
#ifndef MAX_CONST_SIZE
#define MAX_CONST_SIZE 128
#endif
__constant__ double gam;
__constant__ double gm1;
__constant__ double cfl;
__constant__ double eps;
__constant__ double mach;
__constant__ double alpha;
__constant__ double qinf[4];
void op_decl_const_char(int dim, char const *type,
int size, char *dat, char const *name){
cutilSafeCall(cudaMemcpyToSymbol(name, dat, dim*size));
}
// user kernel files
#include "save_soln_kernel.cu"
#include "adt_calc_kernel.cu"
#include "res_calc_kernel.cu"
#include "bres_calc_kernel.cu"
#include "update_kernel.cu"
|
fada076740646d68a8c511c59900f55de7912b18.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <accelerate_cuda.h>
extern "C" __global__ void map(const Int64 shIn0_2, const Int64 shIn0_1, const Int64 shIn0_0, const double* __restrict__ arrIn0_3, const double* __restrict__ arrIn0_2, const double* __restrict__ arrIn0_1, const double* __restrict__ arrIn0_0, const Int64 shOut_2, const Int64 shOut_1, const Int64 shOut_0, double* __restrict__ arrOut_0)
{
const int shapeSize = shOut_2 * (shOut_1 * shOut_0);
const int gridSize = blockDim.x * gridDim.x;
int ix;
for (ix = blockDim.x * blockIdx.x + threadIdx.x; ix < shapeSize; ix += gridSize) {
const double x3 = arrIn0_3[ix];
arrOut_0[ix] = x3;
}
}
|
fada076740646d68a8c511c59900f55de7912b18.cu
|
#include <accelerate_cuda.h>
extern "C" __global__ void map(const Int64 shIn0_2, const Int64 shIn0_1, const Int64 shIn0_0, const double* __restrict__ arrIn0_3, const double* __restrict__ arrIn0_2, const double* __restrict__ arrIn0_1, const double* __restrict__ arrIn0_0, const Int64 shOut_2, const Int64 shOut_1, const Int64 shOut_0, double* __restrict__ arrOut_0)
{
const int shapeSize = shOut_2 * (shOut_1 * shOut_0);
const int gridSize = blockDim.x * gridDim.x;
int ix;
for (ix = blockDim.x * blockIdx.x + threadIdx.x; ix < shapeSize; ix += gridSize) {
const double x3 = arrIn0_3[ix];
arrOut_0[ix] = x3;
}
}
|
fdc41a364e7d6969e52a618e5d7a93004a3e2005.hip
|
// !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// The MIT License (MIT)
//
// Copyright (c) 2018-2021 www.open3d.org
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// ----------------------------------------------------------------------------
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "open3d/t/pipelines/kernel/TransformationConverterImpl.h"
namespace open3d {
namespace t {
namespace pipelines {
namespace kernel {
template <typename scalar_t>
__global__ void PoseToTransformationKernel(scalar_t *transformation_ptr,
const scalar_t *X_ptr) {
PoseToTransformationImpl(transformation_ptr, X_ptr);
}
template <typename scalar_t>
void PoseToTransformationCUDA(scalar_t *transformation_ptr,
const scalar_t *X_ptr) {
utility::LogError("Unsupported data type.");
}
template <>
void PoseToTransformationCUDA<float>(float *transformation_ptr,
const float *X_ptr) {
hipLaunchKernelGGL(( PoseToTransformationKernel<float>)
, dim3(1), dim3(1), 0, core::cuda::GetStream(), transformation_ptr, X_ptr);
}
template <>
void PoseToTransformationCUDA<double>(double *transformation_ptr,
const double *X_ptr) {
hipLaunchKernelGGL(( PoseToTransformationKernel<double>)
, dim3(1), dim3(1), 0, core::cuda::GetStream(), transformation_ptr, X_ptr);
}
} // namespace kernel
} // namespace pipelines
} // namespace t
} // namespace open3d
|
fdc41a364e7d6969e52a618e5d7a93004a3e2005.cu
|
// ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// The MIT License (MIT)
//
// Copyright (c) 2018-2021 www.open3d.org
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// ----------------------------------------------------------------------------
#include <cuda.h>
#include <cuda_runtime.h>
#include "open3d/t/pipelines/kernel/TransformationConverterImpl.h"
namespace open3d {
namespace t {
namespace pipelines {
namespace kernel {
template <typename scalar_t>
__global__ void PoseToTransformationKernel(scalar_t *transformation_ptr,
const scalar_t *X_ptr) {
PoseToTransformationImpl(transformation_ptr, X_ptr);
}
template <typename scalar_t>
void PoseToTransformationCUDA(scalar_t *transformation_ptr,
const scalar_t *X_ptr) {
utility::LogError("Unsupported data type.");
}
template <>
void PoseToTransformationCUDA<float>(float *transformation_ptr,
const float *X_ptr) {
PoseToTransformationKernel<float>
<<<1, 1, 0, core::cuda::GetStream()>>>(transformation_ptr, X_ptr);
}
template <>
void PoseToTransformationCUDA<double>(double *transformation_ptr,
const double *X_ptr) {
PoseToTransformationKernel<double>
<<<1, 1, 0, core::cuda::GetStream()>>>(transformation_ptr, X_ptr);
}
} // namespace kernel
} // namespace pipelines
} // namespace t
} // namespace open3d
|
98db07c7f73baa9d656bc2897ad4c4695414cf9f.hip
|
// !!! This is a file automatically generated by hipify!!!
// PP_vectadd_host.cpp : 'main' . .
//
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <cstdio>
//#include <iostream>
//c,a,b device .
__global__ void addKernel(int* c, const int* a, const int* b) {
int i = threadIdx.x;// core ( core) x
c[i] = a[i] + b[i];
}
int main(void) {
const int SIZE = 5;
const int a[SIZE] = { 1,2,3,4,5 };
const int b[SIZE] = { 10,20,30,40,50 };a
int c[SIZE] = { 0 };
int* dev_a = 0;
int* dev_b = 0;
int* dev_c = 0;
//allocate device memory
hipMalloc((void**)&dev_a, SIZE * sizeof(int));
hipMalloc((void**)&dev_b, SIZE * sizeof(int));
hipMalloc((void**)&dev_c , SIZE * sizeof(int));
//copy from host to device
hipMemcpy(dev_a, a, SIZE * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, SIZE * sizeof(int), hipMemcpyHostToDevice);
//lauch a kernel on the GPU with one thread for each element
hipLaunchKernelGGL(( addKernel) , dim3(1), dim3(SIZE) , 0, 0, dev_c, dev_a, dev_b);
//copy from device to host
hipMemcpy(c, dev_c, SIZE * sizeof(int), hipMemcpyDeviceToHost);
//free device memory
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
//print the result
printf("{%d, %d, %d,%d,%d}+{%d, %d, %d,%d,%d}""={%d, %d, %d,%d,%d}\n",
a[0], a[1], a[2], a[3], a[4],
b[0], b[1], b[2], b[3], b[4],
c[0], c[1], c[2], c[3], c[4]);
return 0;
}
|
98db07c7f73baa9d656bc2897ad4c4695414cf9f.cu
|
// PP_vectadd_host.cpp : 이 파일에는 'main' 함수가 포함됩니다. 거기서 프로그램 실행이 시작되고 종료됩니다.
//
#include "cuda.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cstdio>
//#include <iostream>
//c,a,b 배열은 device에 선언되어 있어야 한다.
__global__ void addKernel(int* c, const int* a, const int* b) {
int i = threadIdx.x;//어느 core가 실행중인지가 (몇번쨰 core가) x에 담김
c[i] = a[i] + b[i];
}
int main(void) {
const int SIZE = 5;
const int a[SIZE] = { 1,2,3,4,5 };
const int b[SIZE] = { 10,20,30,40,50 };a
int c[SIZE] = { 0 };
int* dev_a = 0;
int* dev_b = 0;
int* dev_c = 0;
//allocate device memory
cudaMalloc((void**)&dev_a, SIZE * sizeof(int));
cudaMalloc((void**)&dev_b, SIZE * sizeof(int));
cudaMalloc((void**)&dev_c , SIZE * sizeof(int));
//copy from host to device
cudaMemcpy(dev_a, a, SIZE * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, SIZE * sizeof(int), cudaMemcpyHostToDevice);
//lauch a kernel on the GPU with one thread for each element
addKernel <<<1, SIZE >>> (dev_c, dev_a, dev_b);
//copy from device to host
cudaMemcpy(c, dev_c, SIZE * sizeof(int), cudaMemcpyDeviceToHost);
//free device memory
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
//print the result
printf("{%d, %d, %d,%d,%d}+{%d, %d, %d,%d,%d}""={%d, %d, %d,%d,%d}\n",
a[0], a[1], a[2], a[3], a[4],
b[0], b[1], b[2], b[3], b[4],
c[0], c[1], c[2], c[3], c[4]);
return 0;
}
|
e1a5a437a94f05ebe3027bd0198d835a1582cdaf.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <cutf/type.hpp>
#include <cutf/memory.hpp>
#include <cutf/cublas.hpp>
#include "kan_algorithm.hpp"
template <class T>
kan_algorithm::gemm<T>::gemm(const int gpu_id) : kan_algorithm::kan_base<T>(gpu_id){}
template <class T>
std::size_t kan_algorithm::gemm<T>::run(const bool &complete, std::vector<hyperparameter::parameter_t> parameters){
std::size_t loop_count = 0;
// N x N
const std::size_t N = parameters[0];
auto dA = cutf::cuda::memory::get_device_unique_ptr<T>(N * N);
auto dB = cutf::cuda::memory::get_device_unique_ptr<T>(N * N);
auto dC = cutf::cuda::memory::get_device_unique_ptr<T>(N * N);
auto cublas = cutf::cublas::get_cublas_unique_ptr();
const T alpha = cutf::cuda::type::cast<T>(0.0f);
const T beta = cutf::cuda::type::cast<T>(0.0f);
while(!complete){
cutf::cublas::error::check(cutf::cublas::gemm(
*cublas.get(),
HIPBLAS_OP_N, HIPBLAS_OP_N,
N, N, N,
&alpha,
dA.get(), N,
dB.get(), N,
&beta,
dC.get(), N
), __FILE__, __LINE__, __func__);
hipDeviceSynchronize();
loop_count++;
}
return loop_count;
}
template <class T>
std::vector<hyperparameter::range> kan_algorithm::gemm<T>::get_hyperparameter_ranges() const{
return {
{"N","matrix size N x N", 1<<6, 1<<11, [](hyperparameter::parameter_t a){return a * 2;}}
};
}
template class kan_algorithm::gemm<float>;
template class kan_algorithm::gemm<double>;
|
e1a5a437a94f05ebe3027bd0198d835a1582cdaf.cu
|
#include <cutf/type.hpp>
#include <cutf/memory.hpp>
#include <cutf/cublas.hpp>
#include "kan_algorithm.hpp"
template <class T>
kan_algorithm::gemm<T>::gemm(const int gpu_id) : kan_algorithm::kan_base<T>(gpu_id){}
template <class T>
std::size_t kan_algorithm::gemm<T>::run(const bool &complete, std::vector<hyperparameter::parameter_t> parameters){
std::size_t loop_count = 0;
// 席を計算する行列の大きさ N x N
const std::size_t N = parameters[0];
auto dA = cutf::cuda::memory::get_device_unique_ptr<T>(N * N);
auto dB = cutf::cuda::memory::get_device_unique_ptr<T>(N * N);
auto dC = cutf::cuda::memory::get_device_unique_ptr<T>(N * N);
auto cublas = cutf::cublas::get_cublas_unique_ptr();
const T alpha = cutf::cuda::type::cast<T>(0.0f);
const T beta = cutf::cuda::type::cast<T>(0.0f);
while(!complete){
cutf::cublas::error::check(cutf::cublas::gemm(
*cublas.get(),
CUBLAS_OP_N, CUBLAS_OP_N,
N, N, N,
&alpha,
dA.get(), N,
dB.get(), N,
&beta,
dC.get(), N
), __FILE__, __LINE__, __func__);
cudaDeviceSynchronize();
loop_count++;
}
return loop_count;
}
template <class T>
std::vector<hyperparameter::range> kan_algorithm::gemm<T>::get_hyperparameter_ranges() const{
return {
{"N","matrix size N x N", 1<<6, 1<<11, [](hyperparameter::parameter_t a){return a * 2;}}
};
}
template class kan_algorithm::gemm<float>;
template class kan_algorithm::gemm<double>;
|
691dd314f6c4001b7c20cc177ab8938e5e8d6c5e.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This sample demonstrates a combination of Peer-to-Peer (P2P) and
* Unified Virtual Address Space (UVA) features new to SDK 4.0
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
// CUDA includes
#include <hip/hip_runtime.h>
// includes, project
#include <helper_cuda.h>
#include <helper_functions.h> // helper for shared that are common to CUDA Samples
__global__ void SimpleKernel(float *src, float *dst)
{
// Just a dummy kernel, doing enough for us to verify that everything
// worked
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
dst[idx] = src[idx] * 2.0f;
}
inline bool IsAppBuiltAs64()
{
return sizeof(void*) == 8;
}
int main(int argc, char **argv)
{
printf("[%s] - Starting...\n", argv[0]);
if (!IsAppBuiltAs64())
{
printf("%s is only supported with on 64-bit OSs and the application must be built as a 64-bit target. Test is being waived.\n", argv[0]);
exit(EXIT_WAIVED);
}
// Number of GPUs
printf("Checking for multiple GPUs...\n");
int gpu_n;
checkCudaErrors(hipGetDeviceCount(&gpu_n));
printf("CUDA-capable device count: %i\n", gpu_n);
if (gpu_n < 2)
{
printf("Two or more GPUs with Peer-to-Peer access capability are required for %s.\n", argv[0]);
printf("Waiving test.\n");
exit(EXIT_WAIVED);
}
// Query device properties
hipDeviceProp_t prop[64];
int gpuid[2]; // we want to find the first two GPU's that can support P2P
for (int i=0; i < gpu_n; i++)
{
checkCudaErrors(hipGetDeviceProperties(&prop[i], i));
}
// Check possibility for peer access
printf("\nChecking GPU(s) for support of peer to peer memory access...\n");
int can_access_peer;
int p2pCapableGPUs[2]; // We take only 1 pair of P2P capable GPUs
p2pCapableGPUs[0] = p2pCapableGPUs[1] = -1;
// Show all the combinations of supported P2P GPUs
for (int i = 0; i < gpu_n; i++)
{
for (int j = 0; j < gpu_n; j++)
{
if (i == j)
{
continue;
}
checkCudaErrors(hipDeviceCanAccessPeer(&can_access_peer, i, j));
printf("> Peer access from %s (GPU%d) -> %s (GPU%d) : %s\n", prop[i].name, i,
prop[j].name, j, can_access_peer ? "Yes" : "No");
if (can_access_peer && p2pCapableGPUs[0] == -1)
{
p2pCapableGPUs[0] = i;
p2pCapableGPUs[1] = j;
}
}
}
if (p2pCapableGPUs[0] == -1 || p2pCapableGPUs[1] == -1)
{
printf("Two or more GPUs with Peer-to-Peer access capability are required for %s.\n", argv[0]);
printf("Peer to Peer access is not available amongst GPUs in the system, waiving test.\n");
exit(EXIT_WAIVED);
}
// Use first pair of p2p capable GPUs detected.
gpuid[0] = p2pCapableGPUs[0];
gpuid[1] = p2pCapableGPUs[1];
// Enable peer access
printf("Enabling peer access between GPU%d and GPU%d...\n", gpuid[0], gpuid[1]);
checkCudaErrors(hipSetDevice(gpuid[0]));
checkCudaErrors(hipDeviceEnablePeerAccess(gpuid[1], 0));
checkCudaErrors(hipSetDevice(gpuid[1]));
checkCudaErrors(hipDeviceEnablePeerAccess(gpuid[0], 0));
// Allocate buffers
const size_t buf_size = 1024 * 1024 * 16 * sizeof(float);
printf("Allocating buffers (%iMB on GPU%d, GPU%d and CPU Host)...\n", int(buf_size / 1024 / 1024), gpuid[0], gpuid[1]);
checkCudaErrors(hipSetDevice(gpuid[0]));
float *g0;
checkCudaErrors(hipMalloc(&g0, buf_size));
checkCudaErrors(hipSetDevice(gpuid[1]));
float *g1;
checkCudaErrors(hipMalloc(&g1, buf_size));
float *h0;
checkCudaErrors(hipHostMalloc(&h0, buf_size)); // Automatically portable with UVA
// Create CUDA event handles
printf("Creating event handles...\n");
hipEvent_t start_event, stop_event;
float time_memcpy;
int eventflags = hipEventBlockingSync;
checkCudaErrors(hipEventCreateWithFlags(&start_event, eventflags));
checkCudaErrors(hipEventCreateWithFlags(&stop_event, eventflags));
// P2P memcopy() benchmark
checkCudaErrors(hipEventRecord(start_event, 0));
for (int i=0; i<100; i++)
{
// With UVA we don't need to specify source and target devices, the
// runtime figures this out by itself from the pointers
// Ping-pong copy between GPUs
if (i % 2 == 0)
{
checkCudaErrors(hipMemcpy(g1, g0, buf_size, hipMemcpyDefault));
}
else
{
checkCudaErrors(hipMemcpy(g0, g1, buf_size, hipMemcpyDefault));
}
}
checkCudaErrors(hipEventRecord(stop_event, 0));
checkCudaErrors(hipEventSynchronize(stop_event));
checkCudaErrors(hipEventElapsedTime(&time_memcpy, start_event, stop_event));
printf("hipMemcpyPeer / hipMemcpy between GPU%d and GPU%d: %.2fGB/s\n", gpuid[0], gpuid[1],
(1.0f / (time_memcpy / 1000.0f)) * ((100.0f * buf_size)) / 1024.0f / 1024.0f / 1024.0f);
// Prepare host buffer and copy to GPU 0
printf("Preparing host buffer and memcpy to GPU%d...\n", gpuid[0]);
for (int i=0; i<buf_size / sizeof(float); i++)
{
h0[i] = float(i % 4096);
}
checkCudaErrors(hipSetDevice(gpuid[0]));
checkCudaErrors(hipMemcpy(g0, h0, buf_size, hipMemcpyDefault));
// Kernel launch configuration
const dim3 threads(512, 1);
const dim3 blocks((buf_size / sizeof(float)) / threads.x, 1);
// Run kernel on GPU 1, reading input from the GPU 0 buffer, writing
// output to the GPU 1 buffer
printf("Run kernel on GPU%d, taking source data from GPU%d and writing to GPU%d...\n",
gpuid[1], gpuid[0], gpuid[1]);
checkCudaErrors(hipSetDevice(gpuid[1]));
hipLaunchKernelGGL(( SimpleKernel), dim3(blocks), dim3(threads), 0, 0, g0, g1);
checkCudaErrors(hipDeviceSynchronize());
// Run kernel on GPU 0, reading input from the GPU 1 buffer, writing
// output to the GPU 0 buffer
printf("Run kernel on GPU%d, taking source data from GPU%d and writing to GPU%d...\n",
gpuid[0], gpuid[1], gpuid[0]);
checkCudaErrors(hipSetDevice(gpuid[0]));
hipLaunchKernelGGL(( SimpleKernel), dim3(blocks), dim3(threads), 0, 0, g1, g0);
checkCudaErrors(hipDeviceSynchronize());
// Copy data back to host and verify
printf("Copy data back to host from GPU%d and verify results...\n", gpuid[0]);
checkCudaErrors(hipMemcpy(h0, g0, buf_size, hipMemcpyDefault));
int error_count = 0;
for (int i=0; i<buf_size / sizeof(float); i++)
{
// Re-generate input data and apply 2x '* 2.0f' computation of both
// kernel runs
if (h0[i] != float(i % 4096) * 2.0f * 2.0f)
{
printf("Verification error @ element %i: val = %f, ref = %f\n", i, h0[i], (float(i%4096)*2.0f*2.0f));
if (error_count++ > 10)
{
break;
}
}
}
// Disable peer access (also unregisters memory for non-UVA cases)
printf("Disabling peer access...\n");
checkCudaErrors(hipSetDevice(gpuid[0]));
checkCudaErrors(hipDeviceDisablePeerAccess(gpuid[1]));
checkCudaErrors(hipSetDevice(gpuid[1]));
checkCudaErrors(hipDeviceDisablePeerAccess(gpuid[0]));
// Cleanup and shutdown
printf("Shutting down...\n");
checkCudaErrors(hipEventDestroy(start_event));
checkCudaErrors(hipEventDestroy(stop_event));
checkCudaErrors(hipSetDevice(gpuid[0]));
checkCudaErrors(hipFree(g0));
checkCudaErrors(hipSetDevice(gpuid[1]));
checkCudaErrors(hipFree(g1));
checkCudaErrors(hipHostFree(h0));
for (int i=0; i<gpu_n; i++)
{
checkCudaErrors(hipSetDevice(i));
}
if (error_count != 0)
{
printf("Test failed!\n");
exit(EXIT_FAILURE);
}
else
{
printf("Test passed\n");
exit(EXIT_SUCCESS);
}
}
|
691dd314f6c4001b7c20cc177ab8938e5e8d6c5e.cu
|
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This sample demonstrates a combination of Peer-to-Peer (P2P) and
* Unified Virtual Address Space (UVA) features new to SDK 4.0
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
// CUDA includes
#include <cuda_runtime.h>
// includes, project
#include <helper_cuda.h>
#include <helper_functions.h> // helper for shared that are common to CUDA Samples
__global__ void SimpleKernel(float *src, float *dst)
{
// Just a dummy kernel, doing enough for us to verify that everything
// worked
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
dst[idx] = src[idx] * 2.0f;
}
inline bool IsAppBuiltAs64()
{
return sizeof(void*) == 8;
}
int main(int argc, char **argv)
{
printf("[%s] - Starting...\n", argv[0]);
if (!IsAppBuiltAs64())
{
printf("%s is only supported with on 64-bit OSs and the application must be built as a 64-bit target. Test is being waived.\n", argv[0]);
exit(EXIT_WAIVED);
}
// Number of GPUs
printf("Checking for multiple GPUs...\n");
int gpu_n;
checkCudaErrors(cudaGetDeviceCount(&gpu_n));
printf("CUDA-capable device count: %i\n", gpu_n);
if (gpu_n < 2)
{
printf("Two or more GPUs with Peer-to-Peer access capability are required for %s.\n", argv[0]);
printf("Waiving test.\n");
exit(EXIT_WAIVED);
}
// Query device properties
cudaDeviceProp prop[64];
int gpuid[2]; // we want to find the first two GPU's that can support P2P
for (int i=0; i < gpu_n; i++)
{
checkCudaErrors(cudaGetDeviceProperties(&prop[i], i));
}
// Check possibility for peer access
printf("\nChecking GPU(s) for support of peer to peer memory access...\n");
int can_access_peer;
int p2pCapableGPUs[2]; // We take only 1 pair of P2P capable GPUs
p2pCapableGPUs[0] = p2pCapableGPUs[1] = -1;
// Show all the combinations of supported P2P GPUs
for (int i = 0; i < gpu_n; i++)
{
for (int j = 0; j < gpu_n; j++)
{
if (i == j)
{
continue;
}
checkCudaErrors(cudaDeviceCanAccessPeer(&can_access_peer, i, j));
printf("> Peer access from %s (GPU%d) -> %s (GPU%d) : %s\n", prop[i].name, i,
prop[j].name, j, can_access_peer ? "Yes" : "No");
if (can_access_peer && p2pCapableGPUs[0] == -1)
{
p2pCapableGPUs[0] = i;
p2pCapableGPUs[1] = j;
}
}
}
if (p2pCapableGPUs[0] == -1 || p2pCapableGPUs[1] == -1)
{
printf("Two or more GPUs with Peer-to-Peer access capability are required for %s.\n", argv[0]);
printf("Peer to Peer access is not available amongst GPUs in the system, waiving test.\n");
exit(EXIT_WAIVED);
}
// Use first pair of p2p capable GPUs detected.
gpuid[0] = p2pCapableGPUs[0];
gpuid[1] = p2pCapableGPUs[1];
// Enable peer access
printf("Enabling peer access between GPU%d and GPU%d...\n", gpuid[0], gpuid[1]);
checkCudaErrors(cudaSetDevice(gpuid[0]));
checkCudaErrors(cudaDeviceEnablePeerAccess(gpuid[1], 0));
checkCudaErrors(cudaSetDevice(gpuid[1]));
checkCudaErrors(cudaDeviceEnablePeerAccess(gpuid[0], 0));
// Allocate buffers
const size_t buf_size = 1024 * 1024 * 16 * sizeof(float);
printf("Allocating buffers (%iMB on GPU%d, GPU%d and CPU Host)...\n", int(buf_size / 1024 / 1024), gpuid[0], gpuid[1]);
checkCudaErrors(cudaSetDevice(gpuid[0]));
float *g0;
checkCudaErrors(cudaMalloc(&g0, buf_size));
checkCudaErrors(cudaSetDevice(gpuid[1]));
float *g1;
checkCudaErrors(cudaMalloc(&g1, buf_size));
float *h0;
checkCudaErrors(cudaMallocHost(&h0, buf_size)); // Automatically portable with UVA
// Create CUDA event handles
printf("Creating event handles...\n");
cudaEvent_t start_event, stop_event;
float time_memcpy;
int eventflags = cudaEventBlockingSync;
checkCudaErrors(cudaEventCreateWithFlags(&start_event, eventflags));
checkCudaErrors(cudaEventCreateWithFlags(&stop_event, eventflags));
// P2P memcopy() benchmark
checkCudaErrors(cudaEventRecord(start_event, 0));
for (int i=0; i<100; i++)
{
// With UVA we don't need to specify source and target devices, the
// runtime figures this out by itself from the pointers
// Ping-pong copy between GPUs
if (i % 2 == 0)
{
checkCudaErrors(cudaMemcpy(g1, g0, buf_size, cudaMemcpyDefault));
}
else
{
checkCudaErrors(cudaMemcpy(g0, g1, buf_size, cudaMemcpyDefault));
}
}
checkCudaErrors(cudaEventRecord(stop_event, 0));
checkCudaErrors(cudaEventSynchronize(stop_event));
checkCudaErrors(cudaEventElapsedTime(&time_memcpy, start_event, stop_event));
printf("cudaMemcpyPeer / cudaMemcpy between GPU%d and GPU%d: %.2fGB/s\n", gpuid[0], gpuid[1],
(1.0f / (time_memcpy / 1000.0f)) * ((100.0f * buf_size)) / 1024.0f / 1024.0f / 1024.0f);
// Prepare host buffer and copy to GPU 0
printf("Preparing host buffer and memcpy to GPU%d...\n", gpuid[0]);
for (int i=0; i<buf_size / sizeof(float); i++)
{
h0[i] = float(i % 4096);
}
checkCudaErrors(cudaSetDevice(gpuid[0]));
checkCudaErrors(cudaMemcpy(g0, h0, buf_size, cudaMemcpyDefault));
// Kernel launch configuration
const dim3 threads(512, 1);
const dim3 blocks((buf_size / sizeof(float)) / threads.x, 1);
// Run kernel on GPU 1, reading input from the GPU 0 buffer, writing
// output to the GPU 1 buffer
printf("Run kernel on GPU%d, taking source data from GPU%d and writing to GPU%d...\n",
gpuid[1], gpuid[0], gpuid[1]);
checkCudaErrors(cudaSetDevice(gpuid[1]));
SimpleKernel<<<blocks, threads>>>(g0, g1);
checkCudaErrors(cudaDeviceSynchronize());
// Run kernel on GPU 0, reading input from the GPU 1 buffer, writing
// output to the GPU 0 buffer
printf("Run kernel on GPU%d, taking source data from GPU%d and writing to GPU%d...\n",
gpuid[0], gpuid[1], gpuid[0]);
checkCudaErrors(cudaSetDevice(gpuid[0]));
SimpleKernel<<<blocks, threads>>>(g1, g0);
checkCudaErrors(cudaDeviceSynchronize());
// Copy data back to host and verify
printf("Copy data back to host from GPU%d and verify results...\n", gpuid[0]);
checkCudaErrors(cudaMemcpy(h0, g0, buf_size, cudaMemcpyDefault));
int error_count = 0;
for (int i=0; i<buf_size / sizeof(float); i++)
{
// Re-generate input data and apply 2x '* 2.0f' computation of both
// kernel runs
if (h0[i] != float(i % 4096) * 2.0f * 2.0f)
{
printf("Verification error @ element %i: val = %f, ref = %f\n", i, h0[i], (float(i%4096)*2.0f*2.0f));
if (error_count++ > 10)
{
break;
}
}
}
// Disable peer access (also unregisters memory for non-UVA cases)
printf("Disabling peer access...\n");
checkCudaErrors(cudaSetDevice(gpuid[0]));
checkCudaErrors(cudaDeviceDisablePeerAccess(gpuid[1]));
checkCudaErrors(cudaSetDevice(gpuid[1]));
checkCudaErrors(cudaDeviceDisablePeerAccess(gpuid[0]));
// Cleanup and shutdown
printf("Shutting down...\n");
checkCudaErrors(cudaEventDestroy(start_event));
checkCudaErrors(cudaEventDestroy(stop_event));
checkCudaErrors(cudaSetDevice(gpuid[0]));
checkCudaErrors(cudaFree(g0));
checkCudaErrors(cudaSetDevice(gpuid[1]));
checkCudaErrors(cudaFree(g1));
checkCudaErrors(cudaFreeHost(h0));
for (int i=0; i<gpu_n; i++)
{
checkCudaErrors(cudaSetDevice(i));
}
if (error_count != 0)
{
printf("Test failed!\n");
exit(EXIT_FAILURE);
}
else
{
printf("Test passed\n");
exit(EXIT_SUCCESS);
}
}
|
3b854ca7193b0a43d50108766de4cd8a51f91424.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* THE FOLLOWING CODE IS AUTHORED BY ADAM SIMPSON FROM
* OAK RIDGE NATIONAL LABORATORY, WITH MODIFICATIONS
* DONE BY NICOLAS BARRIOS AND MUHANNED IBRAHIM. WE DO
* NOT CLAIM OWNERSHIP NOR COPYRIGHT OVER THIS MATERIAL.
* THE ORIGINAL PUBLICATION AND REPOSITORY CAN BE FOUND HERE:
* https://www.olcf.ornl.gov/tutorials/cuda-game-of-life/
*/
#include <stdio.h>
#include <stdlib.h>
#define SRAND_VALUE 1985
#define BLOCK_SIZE_x 32
#define BLOCK_SIZE_y 16
__global__ void ghostRows(int dim, int *grid) {
// We want id [1,dim]
int id = blockDim.x * blockIdx.x + threadIdx.x + 1;
if (id <= dim) {
// Copy first real row to bottom ghost row
grid[(dim + 2) * (dim + 1) + id] = grid[(dim + 2) + id];
// Copy last real row to top ghost row
grid[id] = grid[(dim + 2) * dim + id];
}
}
__global__ void ghostCols(int dim, int *grid) {
// We want id [0,dim+1]
int id = blockDim.x * blockIdx.x + threadIdx.x;
if (id <= dim + 1) {
// Copy first real column to right most ghost column
grid[id * (dim + 2) + dim + 1] = grid[id * (dim + 2) + 1];
// Copy last real column to left most ghost column
grid[id * (dim + 2)] = grid[id * (dim + 2) + dim];
}
}
__global__ void GOL(int dim, int *grid, int *newGrid) {
int iy = (blockDim.y - 2) * blockIdx.y + threadIdx.y;
int ix = (blockDim.x - 2) * blockIdx.x + threadIdx.x;
int id = iy * (dim + 2) + ix;
int i = threadIdx.y;
int j = threadIdx.x;
int numNeighbors;
// Declare the shared memory on a per block level
__shared__ int s_grid[BLOCK_SIZE_y][BLOCK_SIZE_x];
// Copy cells into shared memory
if (ix <= dim + 1 && iy <= dim + 1)
s_grid[i][j] = grid[id];
// Sync all threads in block
__syncthreads();
if (iy <= dim && ix <= dim) {
if (i != 0 && i != blockDim.y - 1 && j != 0 && j != blockDim.x - 1) {
// Get the number of neighbors for a given grid point
numNeighbors = s_grid[i + 1][j] + s_grid[i - 1][j] // upper lower
+ s_grid[i][j + 1] + s_grid[i][j - 1] // right left
+ s_grid[i + 1][j + 1] +
s_grid[i - 1][j - 1] // diagonals
+ s_grid[i - 1][j + 1] + s_grid[i + 1][j - 1];
int cell = s_grid[i][j];
// Here we have explicitly all of the game rules
if (cell == 1 && numNeighbors < 2)
newGrid[id] = 0;
else if (cell == 1 && (numNeighbors == 2 || numNeighbors == 3))
newGrid[id] = 1;
else if (cell == 1 && numNeighbors > 3)
newGrid[id] = 0;
else if (cell == 0 && numNeighbors == 3)
newGrid[id] = 1;
else
newGrid[id] = cell;
}
}
}
int main() {
int i, j, iter;
int *h_grid; // Grid on host
int *d_grid; // Grid on device
int *d_newGrid; // Second grid used on device only
int *d_tmpGrid; // tmp grid pointer used to switch between grid and newGrid
int dim = 1 << 13; // Linear dimension of our grid - not counting ghost cells
int maxIter = 100; // Number of game steps
size_t bytes = sizeof(int) * (dim + 2) * (dim + 2);
// Allocate host Grid used for initial setup and read back from device
h_grid = (int *)malloc(bytes);
// Allocate device grids
hipMalloc(&d_grid, bytes);
hipMalloc(&d_newGrid, bytes);
// Assign initial population randomly
srand(SRAND_VALUE);
for (i = 1; i <= dim; i++) {
for (j = 1; j <= dim; j++) {
h_grid[i * (dim + 2) + j] = rand() % 2;
}
}
// See
// https://developer.nvidia.com/blog/how-implement-performance-metrics-cuda-cc/
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipFuncSetCacheConfig(GOL, hipFuncCachePreferShared);
// Copy over initial game grid (Dim-1 threads)
hipMemcpy(d_grid, h_grid, bytes, hipMemcpyHostToDevice);
dim3 blockSize(BLOCK_SIZE_x, BLOCK_SIZE_y, 1);
int linGrid_x = (int)ceil(dim / (float)(BLOCK_SIZE_x - 2));
int linGrid_y = (int)ceil(dim / (float)(BLOCK_SIZE_y - 2));
dim3 gridSize(linGrid_x, linGrid_y, 1);
dim3 cpyBlockSize(BLOCK_SIZE_x, 1, 1);
dim3 cpyGridRowsGridSize((int)ceil(dim / (float)cpyBlockSize.x), 1, 1);
dim3 cpyGridColsGridSize((int)ceil((dim + 2) / (float)cpyBlockSize.x), 1,
1);
// Added this myself -N
#ifdef DEBUG
printf("blockSize: { %d %d %d }\n", blockSize.x, blockSize.y, blockSize.z);
printf("gridSize: { %d %d %d }\n", gridSize.x, gridSize.y, gridSize.z);
printf("cpyBlockSize: { %d %d %d }\n", cpyBlockSize.x, cpyBlockSize.y,
cpyBlockSize.z);
printf("cpyGridRowsGridSize: { %d %d %d }\n", cpyGridRowsGridSize.x,
cpyGridRowsGridSize.y, cpyGridRowsGridSize.z);
printf("cpyGridColsGridSize: { %d %d %d }\n", cpyGridColsGridSize.x,
cpyGridColsGridSize.y, cpyGridColsGridSize.z);
#endif
hipEventRecord(start);
// Main game loop
for (iter = 0; iter < maxIter; iter++) {
hipLaunchKernelGGL(( ghostRows), dim3(cpyGridRowsGridSize), dim3(cpyBlockSize), 0, 0, dim, d_grid);
hipLaunchKernelGGL(( ghostCols), dim3(cpyGridColsGridSize), dim3(cpyBlockSize), 0, 0, dim, d_grid);
hipLaunchKernelGGL(( GOL), dim3(gridSize), dim3(blockSize), 0, 0, dim, d_grid, d_newGrid);
// Swap our grids and iterate again
d_tmpGrid = d_grid;
d_grid = d_newGrid;
d_newGrid = d_tmpGrid;
} // iter loop
hipEventRecord(stop);
hipError_t error = hipGetLastError();
if (error != hipSuccess)
printf("CUDA error %s\n", hipGetErrorString(error));
// Copy back results and sum
hipMemcpy(h_grid, d_grid, bytes, hipMemcpyDeviceToHost);
hipEventSynchronize(stop);
hipDeviceSynchronize();
// Sum up alive cells and print results
int total = 0;
for (i = 1; i <= dim; i++) {
for (j = 1; j <= dim; j++) {
#ifdef DEBUG
printf("%d", h_grid[i * (dim + 2) + j]);
#endif
total += h_grid[i * (dim + 2) + j];
}
#ifdef DEBUG
printf("\n");
#endif
}
printf("Total Alive: %d\n", total);
float ms = 0;
hipEventElapsedTime(&ms, start, stop);
printf("ElapsedTime: %f ms\n", ms);
hipFree(d_grid);
hipFree(d_newGrid);
free(h_grid);
return 0;
}
|
3b854ca7193b0a43d50108766de4cd8a51f91424.cu
|
/* THE FOLLOWING CODE IS AUTHORED BY ADAM SIMPSON FROM
* OAK RIDGE NATIONAL LABORATORY, WITH MODIFICATIONS
* DONE BY NICOLAS BARRIOS AND MUHANNED IBRAHIM. WE DO
* NOT CLAIM OWNERSHIP NOR COPYRIGHT OVER THIS MATERIAL.
* THE ORIGINAL PUBLICATION AND REPOSITORY CAN BE FOUND HERE:
* https://www.olcf.ornl.gov/tutorials/cuda-game-of-life/
*/
#include <stdio.h>
#include <stdlib.h>
#define SRAND_VALUE 1985
#define BLOCK_SIZE_x 32
#define BLOCK_SIZE_y 16
__global__ void ghostRows(int dim, int *grid) {
// We want id ∈ [1,dim]
int id = blockDim.x * blockIdx.x + threadIdx.x + 1;
if (id <= dim) {
// Copy first real row to bottom ghost row
grid[(dim + 2) * (dim + 1) + id] = grid[(dim + 2) + id];
// Copy last real row to top ghost row
grid[id] = grid[(dim + 2) * dim + id];
}
}
__global__ void ghostCols(int dim, int *grid) {
// We want id ∈ [0,dim+1]
int id = blockDim.x * blockIdx.x + threadIdx.x;
if (id <= dim + 1) {
// Copy first real column to right most ghost column
grid[id * (dim + 2) + dim + 1] = grid[id * (dim + 2) + 1];
// Copy last real column to left most ghost column
grid[id * (dim + 2)] = grid[id * (dim + 2) + dim];
}
}
__global__ void GOL(int dim, int *grid, int *newGrid) {
int iy = (blockDim.y - 2) * blockIdx.y + threadIdx.y;
int ix = (blockDim.x - 2) * blockIdx.x + threadIdx.x;
int id = iy * (dim + 2) + ix;
int i = threadIdx.y;
int j = threadIdx.x;
int numNeighbors;
// Declare the shared memory on a per block level
__shared__ int s_grid[BLOCK_SIZE_y][BLOCK_SIZE_x];
// Copy cells into shared memory
if (ix <= dim + 1 && iy <= dim + 1)
s_grid[i][j] = grid[id];
// Sync all threads in block
__syncthreads();
if (iy <= dim && ix <= dim) {
if (i != 0 && i != blockDim.y - 1 && j != 0 && j != blockDim.x - 1) {
// Get the number of neighbors for a given grid point
numNeighbors = s_grid[i + 1][j] + s_grid[i - 1][j] // upper lower
+ s_grid[i][j + 1] + s_grid[i][j - 1] // right left
+ s_grid[i + 1][j + 1] +
s_grid[i - 1][j - 1] // diagonals
+ s_grid[i - 1][j + 1] + s_grid[i + 1][j - 1];
int cell = s_grid[i][j];
// Here we have explicitly all of the game rules
if (cell == 1 && numNeighbors < 2)
newGrid[id] = 0;
else if (cell == 1 && (numNeighbors == 2 || numNeighbors == 3))
newGrid[id] = 1;
else if (cell == 1 && numNeighbors > 3)
newGrid[id] = 0;
else if (cell == 0 && numNeighbors == 3)
newGrid[id] = 1;
else
newGrid[id] = cell;
}
}
}
int main() {
int i, j, iter;
int *h_grid; // Grid on host
int *d_grid; // Grid on device
int *d_newGrid; // Second grid used on device only
int *d_tmpGrid; // tmp grid pointer used to switch between grid and newGrid
int dim = 1 << 13; // Linear dimension of our grid - not counting ghost cells
int maxIter = 100; // Number of game steps
size_t bytes = sizeof(int) * (dim + 2) * (dim + 2);
// Allocate host Grid used for initial setup and read back from device
h_grid = (int *)malloc(bytes);
// Allocate device grids
cudaMalloc(&d_grid, bytes);
cudaMalloc(&d_newGrid, bytes);
// Assign initial population randomly
srand(SRAND_VALUE);
for (i = 1; i <= dim; i++) {
for (j = 1; j <= dim; j++) {
h_grid[i * (dim + 2) + j] = rand() % 2;
}
}
// See
// https://developer.nvidia.com/blog/how-implement-performance-metrics-cuda-cc/
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaFuncSetCacheConfig(GOL, cudaFuncCachePreferShared);
// Copy over initial game grid (Dim-1 threads)
cudaMemcpy(d_grid, h_grid, bytes, cudaMemcpyHostToDevice);
dim3 blockSize(BLOCK_SIZE_x, BLOCK_SIZE_y, 1);
int linGrid_x = (int)ceil(dim / (float)(BLOCK_SIZE_x - 2));
int linGrid_y = (int)ceil(dim / (float)(BLOCK_SIZE_y - 2));
dim3 gridSize(linGrid_x, linGrid_y, 1);
dim3 cpyBlockSize(BLOCK_SIZE_x, 1, 1);
dim3 cpyGridRowsGridSize((int)ceil(dim / (float)cpyBlockSize.x), 1, 1);
dim3 cpyGridColsGridSize((int)ceil((dim + 2) / (float)cpyBlockSize.x), 1,
1);
// Added this myself -N
#ifdef DEBUG
printf("blockSize: { %d %d %d }\n", blockSize.x, blockSize.y, blockSize.z);
printf("gridSize: { %d %d %d }\n", gridSize.x, gridSize.y, gridSize.z);
printf("cpyBlockSize: { %d %d %d }\n", cpyBlockSize.x, cpyBlockSize.y,
cpyBlockSize.z);
printf("cpyGridRowsGridSize: { %d %d %d }\n", cpyGridRowsGridSize.x,
cpyGridRowsGridSize.y, cpyGridRowsGridSize.z);
printf("cpyGridColsGridSize: { %d %d %d }\n", cpyGridColsGridSize.x,
cpyGridColsGridSize.y, cpyGridColsGridSize.z);
#endif
cudaEventRecord(start);
// Main game loop
for (iter = 0; iter < maxIter; iter++) {
ghostRows<<<cpyGridRowsGridSize, cpyBlockSize>>>(dim, d_grid);
ghostCols<<<cpyGridColsGridSize, cpyBlockSize>>>(dim, d_grid);
GOL<<<gridSize, blockSize>>>(dim, d_grid, d_newGrid);
// Swap our grids and iterate again
d_tmpGrid = d_grid;
d_grid = d_newGrid;
d_newGrid = d_tmpGrid;
} // iter loop
cudaEventRecord(stop);
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess)
printf("CUDA error %s\n", cudaGetErrorString(error));
// Copy back results and sum
cudaMemcpy(h_grid, d_grid, bytes, cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
cudaDeviceSynchronize();
// Sum up alive cells and print results
int total = 0;
for (i = 1; i <= dim; i++) {
for (j = 1; j <= dim; j++) {
#ifdef DEBUG
printf("%d", h_grid[i * (dim + 2) + j]);
#endif
total += h_grid[i * (dim + 2) + j];
}
#ifdef DEBUG
printf("\n");
#endif
}
printf("Total Alive: %d\n", total);
float ms = 0;
cudaEventElapsedTime(&ms, start, stop);
printf("ElapsedTime: %f ms\n", ms);
cudaFree(d_grid);
cudaFree(d_newGrid);
free(h_grid);
return 0;
}
|
557654fbc8aba80d4e857f6d85a55c31bc1e4099.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "src/nfa_kernels.h"
// iNFAnt traversal algorithm to process multiple strings on a NFA
// input : total input string
// input_offset : offset of each input string
// transition_list : list of (source, destination) tuples
// transition_offset : index of first transition trigger by each
// symbol init_states_vector : vector of initial states
// persis_states_vector : vector of persistent states
// final_states_vector : vector of final states
// vector_len : length of state vector (# of ST_BLOCKs)
__global__ void AS_kernel(unsigned char *input, int *input_offset,
ST_BLOCK *transition_table,
Transition *transition_list,
ST_BLOCK *init_states_vector,
ST_BLOCK *final_states_vector, int vector_len,
int state_count, int wb_transition_count) {
// Skip to the right input string
input += input_offset[block_ID];
// Get the size of current input string
int input_bytes = input_offset[block_ID + 1] - input_offset[block_ID];
extern __shared__ ST_BLOCK s_data[]; // shared memory
ST_BLOCK *current_st_vec =
s_data; // current active states in shared memory
ST_BLOCK *future_st_vec =
s_data + vector_len; // future active states in shared memory
Transition tuple = transition_list[0];
ST_T src_state, dst_state;
ST_BLOCK src_bit, dst_bit;
unsigned int src_block, dst_block;
int c;
// Copy initial and persistent states from global memory into shared memory
for (int i = thread_ID; i < vector_len; i += thread_count) {
current_st_vec[i] = init_states_vector[i];
future_st_vec[i] = 0;
}
__syncthreads();
if (wb_transition_count == 0) goto BYPASS_HEAD;
// If the first character is a word character, there is a word boundary
// before the first character
if (!is_word_char(input[0])) goto BYPASS_HEAD;
// For each transition triggered by word boundary
for (int i = thread_ID; i < wb_transition_count; i += thread_count) {
tuple = transition_list[i];
src_state = tuple.src;
dst_state = tuple.dst;
src_bit =
1 << (src_state %
bit_sizeof(ST_BLOCK)); // index of state bit inside the block
dst_bit = 1 << (dst_state % bit_sizeof(ST_BLOCK));
src_block = src_state / bit_sizeof(ST_BLOCK); // index of state block
dst_block = dst_state / bit_sizeof(ST_BLOCK);
// If transition source is set in current active state vector
// (divergence happens here)
if (src_bit & current_st_vec[src_block]) {
// Set transition destination in CURRENT active state vector
atomicOr(¤t_st_vec[dst_block], dst_bit);
}
}
__syncthreads();
BYPASS_HEAD:
// For each byte in the input string
for (int byt = 0; byt < input_bytes; byt++) {
for (int i = thread_ID; i < vector_len; i += thread_count) {
future_st_vec[i] = 0;
}
__syncthreads();
c = (int)(input[byt]);
// For each transition triggered by the character
for (int blk = 0; blk < vector_len; blk++) {
int tmp = current_st_vec[blk];
/*
if (tmp) {
for (int s = blk * bit_sizeof(ST_BLOCK);
s < min((int)((blk + 1) * bit_sizeof(ST_BLOCK)),
(int)state_count);
s++) {
if (tmp & (1 << (s % bit_sizeof(ST_BLOCK)))) {
for (int i = thread_ID; i < vector_len;
i += thread_count) {
future_st_vec[i] |=
transition_table[c * state_count * vector_len +
s * vector_len + i];
}
__syncthreads();
}
}
}*/
while (tmp) {
int pos = __ffs(tmp);
tmp = tmp ^ (1<<(pos-1));
pos = blk * bit_sizeof(ST_BLOCK) + pos-1;
for (int i = thread_ID; i < vector_len;
i += thread_count) {
future_st_vec[i] |=
transition_table[c * state_count * vector_len +
pos * vector_len + i];
}
__syncthreads();
}
}
// Swap current and future active state vector
if (current_st_vec == s_data) {
current_st_vec = s_data + vector_len;
future_st_vec = s_data;
} else {
current_st_vec = s_data;
future_st_vec = s_data + vector_len;
}
__syncthreads();
// No transition triggered by word boundary
if (wb_transition_count == 0) continue;
// If there is NOT a word boundary between input[byt] and input[byt + 1]
// or after the last character
if ((byt < input_bytes - 1 &&
(is_word_char(input[byt]) ^ is_word_char(input[byt + 1])) == 0) ||
(byt == input_bytes - 1 && !is_word_char(input[input_bytes - 1])))
continue;
// For each transition triggered by word boundary
for (int i = thread_ID; i < wb_transition_count; i += thread_count) {
tuple = transition_list[i];
src_state = tuple.src;
dst_state = tuple.dst;
src_bit =
1 << (src_state %
bit_sizeof(
ST_BLOCK)); // index of state bit inside the block
dst_bit = 1 << (dst_state % bit_sizeof(ST_BLOCK));
src_block =
src_state / bit_sizeof(ST_BLOCK); // index of state block
dst_block = dst_state / bit_sizeof(ST_BLOCK);
// If transition source is set in current active state vector
// (divergence happens here)
if (src_bit & current_st_vec[src_block]) {
// Set transition destination in CURRENT active state vector
atomicOr(¤t_st_vec[dst_block], dst_bit);
}
}
__syncthreads();
}
// Copy final active states from shared memory into global memory
for (int i = thread_ID; i < vector_len; i += thread_count) {
final_states_vector[block_ID * vector_len + i] = current_st_vec[i];
}
}
// Host function to run iNFAnt algorithm on GPU
// This function can process multiple strings on a NFA simultaneously
// tg : NFA transition graph
// h_input_array : array of input string in host memory
// input_bytes_array : array of string length
// array_size : array size (# of strings to match)
// threads_per_block : # of threads per block for kernel function
// show_match_result : print regex matching result if this variable is true
void run_AS(struct ita_scratch &scratch, unsigned char **h_input_array,
int *input_bytes_array, int array_size, int threads_per_block,
bool show_match_result, bool profiler_mode,
vector<int> *accept_rules) {
struct timeval start_time, end_time;
hipEvent_t memalloc_start,
memalloc_end; // start and end events of device memory allocation
hipEvent_t memcpy_h2d_start,
memcpy_h2d_end; // start and end events of memory copy from host to
// device
hipEvent_t kernel_start,
kernel_end; // start and end events of kernel execution
hipEvent_t memcpy_d2h_start,
memcpy_d2h_end; // start and end events of memory copy from device to
// host
hipEvent_t memfree_start,
memfree_end; // start and end events of device memory free
int vec_len = scratch.tg->init_states_vector
.block_count; // length (# of blocks) of state vector
int total_input_bytes = 0; // sum of string length
// Variables in host memory
unsigned char *h_input; // total input string
int h_input_offset[array_size + 1]; // offsets of all input strings
ST_BLOCK *h_final_st_vec; // final active states of all strings
// Variables in device memory
unsigned char *d_input; // total input string
int *d_input_offset; // offset of each input string
ST_BLOCK *d_final_st_vec;
// Create events
if (profiler_mode) {
hipEventCreate(&memalloc_start);
hipEventCreate(&memalloc_end);
hipEventCreate(&memcpy_h2d_start);
hipEventCreate(&memcpy_h2d_end);
hipEventCreate(&kernel_start);
hipEventCreate(&kernel_end);
hipEventCreate(&memcpy_d2h_start);
hipEventCreate(&memcpy_d2h_end);
hipEventCreate(&memfree_start);
hipEventCreate(&memfree_end);
gettimeofday(&start_time, NULL);
}
for (int i = 0; i < array_size; i++) {
h_input_offset[i] = total_input_bytes;
total_input_bytes += input_bytes_array[i];
}
h_input_offset[array_size] = total_input_bytes;
h_input = (unsigned char *)malloc(total_input_bytes);
if (!h_input) {
cerr << "Error: allocate host memory to store total input string"
<< endl;
exit(-1);
}
// Copy each string into h_input to construct a big string
for (int i = 0; i < array_size; i++) {
memcpy(h_input + h_input_offset[i], h_input_array[i],
input_bytes_array[i]);
}
// Allocate host memory
h_final_st_vec =
(ST_BLOCK *)malloc(sizeof(ST_BLOCK) * vec_len * array_size);
if (!h_final_st_vec) {
cerr << "Error: allocate host memory to store final state vectors"
<< endl;
exit(-1);
}
// Allocate device memory
if (profiler_mode) hipEventRecord(memalloc_start, 0);
hipMalloc((void **)&d_input, total_input_bytes);
hipMalloc((void **)&d_input_offset, sizeof(int) * (array_size + 1));
hipMalloc((void **)&d_final_st_vec,
sizeof(ST_BLOCK) * vec_len * array_size);
if (profiler_mode) hipEventRecord(memalloc_end, 0);
// Copy input from host memory into device memory
if (profiler_mode) hipEventRecord(memcpy_h2d_start, 0);
hipMemcpy(d_input, h_input, total_input_bytes, hipMemcpyHostToDevice);
hipMemcpy(d_input_offset, h_input_offset, sizeof(int) * (array_size + 1),
hipMemcpyHostToDevice);
if (profiler_mode) hipEventRecord(memcpy_h2d_end, 0);
// Calculate the size of shared memory (for 3 state vectors and transition
// offset)
int shem = 2 * vec_len * sizeof(ST_BLOCK);
// Launch kernel
if (profiler_mode) hipEventRecord(kernel_start, 0);
hipLaunchKernelGGL(( AS_kernel), dim3(array_size), dim3(threads_per_block), shem, 0,
d_input, d_input_offset, scratch.d_transition_table,
scratch.d_transition_list, scratch.d_init_st_vec, d_final_st_vec,
vec_len, scratch.tg->state_count, scratch.tg->wb_transition_count);
if (profiler_mode) hipEventRecord(kernel_end, 0);
if (profiler_mode) hipEventSynchronize(kernel_end);
// Copy result from device memory into host memory
if (profiler_mode) hipEventRecord(memcpy_d2h_start, 0);
hipMemcpy(h_final_st_vec, d_final_st_vec,
sizeof(ST_BLOCK) * vec_len * array_size, hipMemcpyDeviceToHost);
if (profiler_mode) hipEventRecord(memcpy_d2h_end, 0);
// Get final active states and accept rules for each string
vector<ST_T> final_states[array_size];
// vector<int> accept_rules[array_size];
unordered_map<ST_T, vector<int>>::iterator itr;
for (int i = 0; i < array_size; i++) {
get_active_states(h_final_st_vec + i * vec_len, vec_len,
final_states[i]);
// Get all accept rules for string i
for (int j = 0; j < final_states[i].size(); j++) {
// Get accept rules triggered by this state
itr = scratch.tg->accept_states_rules.find(final_states[i][j]);
if (itr != scratch.tg->accept_states_rules.end()) {
accept_rules[i].insert(accept_rules[i].end(),
itr->second.begin(), itr->second.end());
}
}
// Remove repeated accept rules for string i
sort(accept_rules[i].begin(), accept_rules[i].end());
accept_rules[i].erase(
unique(accept_rules[i].begin(), accept_rules[i].end()),
accept_rules[i].end());
}
// Free device memory
if (profiler_mode) hipEventRecord(memfree_start, 0);
hipFree(d_input);
hipFree(d_input_offset);
hipFree(d_final_st_vec);
if (profiler_mode) hipEventRecord(memfree_end, 0);
// Free host memory
free(h_final_st_vec);
free(h_input);
if (profiler_mode) gettimeofday(&end_time, NULL);
if (show_match_result) show_results(array_size, final_states, accept_rules);
if (profiler_mode) {
Profiler(start_time, end_time, array_size, memalloc_start, memalloc_end,
memcpy_h2d_start, memcpy_h2d_end, kernel_start, kernel_end,
memcpy_d2h_start, memcpy_d2h_end, memfree_start, memfree_end);
}
// Destroy events
if (profiler_mode) {
hipEventDestroy(memalloc_start);
hipEventDestroy(memalloc_end);
hipEventDestroy(memcpy_h2d_start);
hipEventDestroy(memcpy_h2d_end);
hipEventDestroy(kernel_start);
hipEventDestroy(kernel_end);
hipEventDestroy(memcpy_d2h_start);
hipEventDestroy(memcpy_d2h_end);
hipEventDestroy(memfree_start);
hipEventDestroy(memfree_end);
}
}
|
557654fbc8aba80d4e857f6d85a55c31bc1e4099.cu
|
#include "src/nfa_kernels.h"
// iNFAnt traversal algorithm to process multiple strings on a NFA
// input : total input string
// input_offset : offset of each input string
// transition_list : list of (source, destination) tuples
// transition_offset : index of first transition trigger by each
// symbol init_states_vector : vector of initial states
// persis_states_vector : vector of persistent states
// final_states_vector : vector of final states
// vector_len : length of state vector (# of ST_BLOCKs)
__global__ void AS_kernel(unsigned char *input, int *input_offset,
ST_BLOCK *transition_table,
Transition *transition_list,
ST_BLOCK *init_states_vector,
ST_BLOCK *final_states_vector, int vector_len,
int state_count, int wb_transition_count) {
// Skip to the right input string
input += input_offset[block_ID];
// Get the size of current input string
int input_bytes = input_offset[block_ID + 1] - input_offset[block_ID];
extern __shared__ ST_BLOCK s_data[]; // shared memory
ST_BLOCK *current_st_vec =
s_data; // current active states in shared memory
ST_BLOCK *future_st_vec =
s_data + vector_len; // future active states in shared memory
Transition tuple = transition_list[0];
ST_T src_state, dst_state;
ST_BLOCK src_bit, dst_bit;
unsigned int src_block, dst_block;
int c;
// Copy initial and persistent states from global memory into shared memory
for (int i = thread_ID; i < vector_len; i += thread_count) {
current_st_vec[i] = init_states_vector[i];
future_st_vec[i] = 0;
}
__syncthreads();
if (wb_transition_count == 0) goto BYPASS_HEAD;
// If the first character is a word character, there is a word boundary
// before the first character
if (!is_word_char(input[0])) goto BYPASS_HEAD;
// For each transition triggered by word boundary
for (int i = thread_ID; i < wb_transition_count; i += thread_count) {
tuple = transition_list[i];
src_state = tuple.src;
dst_state = tuple.dst;
src_bit =
1 << (src_state %
bit_sizeof(ST_BLOCK)); // index of state bit inside the block
dst_bit = 1 << (dst_state % bit_sizeof(ST_BLOCK));
src_block = src_state / bit_sizeof(ST_BLOCK); // index of state block
dst_block = dst_state / bit_sizeof(ST_BLOCK);
// If transition source is set in current active state vector
// (divergence happens here)
if (src_bit & current_st_vec[src_block]) {
// Set transition destination in CURRENT active state vector
atomicOr(¤t_st_vec[dst_block], dst_bit);
}
}
__syncthreads();
BYPASS_HEAD:
// For each byte in the input string
for (int byt = 0; byt < input_bytes; byt++) {
for (int i = thread_ID; i < vector_len; i += thread_count) {
future_st_vec[i] = 0;
}
__syncthreads();
c = (int)(input[byt]);
// For each transition triggered by the character
for (int blk = 0; blk < vector_len; blk++) {
int tmp = current_st_vec[blk];
/*
if (tmp) {
for (int s = blk * bit_sizeof(ST_BLOCK);
s < min((int)((blk + 1) * bit_sizeof(ST_BLOCK)),
(int)state_count);
s++) {
if (tmp & (1 << (s % bit_sizeof(ST_BLOCK)))) {
for (int i = thread_ID; i < vector_len;
i += thread_count) {
future_st_vec[i] |=
transition_table[c * state_count * vector_len +
s * vector_len + i];
}
__syncthreads();
}
}
}*/
while (tmp) {
int pos = __ffs(tmp);
tmp = tmp ^ (1<<(pos-1));
pos = blk * bit_sizeof(ST_BLOCK) + pos-1;
for (int i = thread_ID; i < vector_len;
i += thread_count) {
future_st_vec[i] |=
transition_table[c * state_count * vector_len +
pos * vector_len + i];
}
__syncthreads();
}
}
// Swap current and future active state vector
if (current_st_vec == s_data) {
current_st_vec = s_data + vector_len;
future_st_vec = s_data;
} else {
current_st_vec = s_data;
future_st_vec = s_data + vector_len;
}
__syncthreads();
// No transition triggered by word boundary
if (wb_transition_count == 0) continue;
// If there is NOT a word boundary between input[byt] and input[byt + 1]
// or after the last character
if ((byt < input_bytes - 1 &&
(is_word_char(input[byt]) ^ is_word_char(input[byt + 1])) == 0) ||
(byt == input_bytes - 1 && !is_word_char(input[input_bytes - 1])))
continue;
// For each transition triggered by word boundary
for (int i = thread_ID; i < wb_transition_count; i += thread_count) {
tuple = transition_list[i];
src_state = tuple.src;
dst_state = tuple.dst;
src_bit =
1 << (src_state %
bit_sizeof(
ST_BLOCK)); // index of state bit inside the block
dst_bit = 1 << (dst_state % bit_sizeof(ST_BLOCK));
src_block =
src_state / bit_sizeof(ST_BLOCK); // index of state block
dst_block = dst_state / bit_sizeof(ST_BLOCK);
// If transition source is set in current active state vector
// (divergence happens here)
if (src_bit & current_st_vec[src_block]) {
// Set transition destination in CURRENT active state vector
atomicOr(¤t_st_vec[dst_block], dst_bit);
}
}
__syncthreads();
}
// Copy final active states from shared memory into global memory
for (int i = thread_ID; i < vector_len; i += thread_count) {
final_states_vector[block_ID * vector_len + i] = current_st_vec[i];
}
}
// Host function to run iNFAnt algorithm on GPU
// This function can process multiple strings on a NFA simultaneously
// tg : NFA transition graph
// h_input_array : array of input string in host memory
// input_bytes_array : array of string length
// array_size : array size (# of strings to match)
// threads_per_block : # of threads per block for kernel function
// show_match_result : print regex matching result if this variable is true
void run_AS(struct ita_scratch &scratch, unsigned char **h_input_array,
int *input_bytes_array, int array_size, int threads_per_block,
bool show_match_result, bool profiler_mode,
vector<int> *accept_rules) {
struct timeval start_time, end_time;
cudaEvent_t memalloc_start,
memalloc_end; // start and end events of device memory allocation
cudaEvent_t memcpy_h2d_start,
memcpy_h2d_end; // start and end events of memory copy from host to
// device
cudaEvent_t kernel_start,
kernel_end; // start and end events of kernel execution
cudaEvent_t memcpy_d2h_start,
memcpy_d2h_end; // start and end events of memory copy from device to
// host
cudaEvent_t memfree_start,
memfree_end; // start and end events of device memory free
int vec_len = scratch.tg->init_states_vector
.block_count; // length (# of blocks) of state vector
int total_input_bytes = 0; // sum of string length
// Variables in host memory
unsigned char *h_input; // total input string
int h_input_offset[array_size + 1]; // offsets of all input strings
ST_BLOCK *h_final_st_vec; // final active states of all strings
// Variables in device memory
unsigned char *d_input; // total input string
int *d_input_offset; // offset of each input string
ST_BLOCK *d_final_st_vec;
// Create events
if (profiler_mode) {
cudaEventCreate(&memalloc_start);
cudaEventCreate(&memalloc_end);
cudaEventCreate(&memcpy_h2d_start);
cudaEventCreate(&memcpy_h2d_end);
cudaEventCreate(&kernel_start);
cudaEventCreate(&kernel_end);
cudaEventCreate(&memcpy_d2h_start);
cudaEventCreate(&memcpy_d2h_end);
cudaEventCreate(&memfree_start);
cudaEventCreate(&memfree_end);
gettimeofday(&start_time, NULL);
}
for (int i = 0; i < array_size; i++) {
h_input_offset[i] = total_input_bytes;
total_input_bytes += input_bytes_array[i];
}
h_input_offset[array_size] = total_input_bytes;
h_input = (unsigned char *)malloc(total_input_bytes);
if (!h_input) {
cerr << "Error: allocate host memory to store total input string"
<< endl;
exit(-1);
}
// Copy each string into h_input to construct a big string
for (int i = 0; i < array_size; i++) {
memcpy(h_input + h_input_offset[i], h_input_array[i],
input_bytes_array[i]);
}
// Allocate host memory
h_final_st_vec =
(ST_BLOCK *)malloc(sizeof(ST_BLOCK) * vec_len * array_size);
if (!h_final_st_vec) {
cerr << "Error: allocate host memory to store final state vectors"
<< endl;
exit(-1);
}
// Allocate device memory
if (profiler_mode) cudaEventRecord(memalloc_start, 0);
cudaMalloc((void **)&d_input, total_input_bytes);
cudaMalloc((void **)&d_input_offset, sizeof(int) * (array_size + 1));
cudaMalloc((void **)&d_final_st_vec,
sizeof(ST_BLOCK) * vec_len * array_size);
if (profiler_mode) cudaEventRecord(memalloc_end, 0);
// Copy input from host memory into device memory
if (profiler_mode) cudaEventRecord(memcpy_h2d_start, 0);
cudaMemcpy(d_input, h_input, total_input_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_input_offset, h_input_offset, sizeof(int) * (array_size + 1),
cudaMemcpyHostToDevice);
if (profiler_mode) cudaEventRecord(memcpy_h2d_end, 0);
// Calculate the size of shared memory (for 3 state vectors and transition
// offset)
int shem = 2 * vec_len * sizeof(ST_BLOCK);
// Launch kernel
if (profiler_mode) cudaEventRecord(kernel_start, 0);
AS_kernel<<<array_size, threads_per_block, shem>>>(
d_input, d_input_offset, scratch.d_transition_table,
scratch.d_transition_list, scratch.d_init_st_vec, d_final_st_vec,
vec_len, scratch.tg->state_count, scratch.tg->wb_transition_count);
if (profiler_mode) cudaEventRecord(kernel_end, 0);
if (profiler_mode) cudaEventSynchronize(kernel_end);
// Copy result from device memory into host memory
if (profiler_mode) cudaEventRecord(memcpy_d2h_start, 0);
cudaMemcpy(h_final_st_vec, d_final_st_vec,
sizeof(ST_BLOCK) * vec_len * array_size, cudaMemcpyDeviceToHost);
if (profiler_mode) cudaEventRecord(memcpy_d2h_end, 0);
// Get final active states and accept rules for each string
vector<ST_T> final_states[array_size];
// vector<int> accept_rules[array_size];
unordered_map<ST_T, vector<int>>::iterator itr;
for (int i = 0; i < array_size; i++) {
get_active_states(h_final_st_vec + i * vec_len, vec_len,
final_states[i]);
// Get all accept rules for string i
for (int j = 0; j < final_states[i].size(); j++) {
// Get accept rules triggered by this state
itr = scratch.tg->accept_states_rules.find(final_states[i][j]);
if (itr != scratch.tg->accept_states_rules.end()) {
accept_rules[i].insert(accept_rules[i].end(),
itr->second.begin(), itr->second.end());
}
}
// Remove repeated accept rules for string i
sort(accept_rules[i].begin(), accept_rules[i].end());
accept_rules[i].erase(
unique(accept_rules[i].begin(), accept_rules[i].end()),
accept_rules[i].end());
}
// Free device memory
if (profiler_mode) cudaEventRecord(memfree_start, 0);
cudaFree(d_input);
cudaFree(d_input_offset);
cudaFree(d_final_st_vec);
if (profiler_mode) cudaEventRecord(memfree_end, 0);
// Free host memory
free(h_final_st_vec);
free(h_input);
if (profiler_mode) gettimeofday(&end_time, NULL);
if (show_match_result) show_results(array_size, final_states, accept_rules);
if (profiler_mode) {
Profiler(start_time, end_time, array_size, memalloc_start, memalloc_end,
memcpy_h2d_start, memcpy_h2d_end, kernel_start, kernel_end,
memcpy_d2h_start, memcpy_d2h_end, memfree_start, memfree_end);
}
// Destroy events
if (profiler_mode) {
cudaEventDestroy(memalloc_start);
cudaEventDestroy(memalloc_end);
cudaEventDestroy(memcpy_h2d_start);
cudaEventDestroy(memcpy_h2d_end);
cudaEventDestroy(kernel_start);
cudaEventDestroy(kernel_end);
cudaEventDestroy(memcpy_d2h_start);
cudaEventDestroy(memcpy_d2h_end);
cudaEventDestroy(memfree_start);
cudaEventDestroy(memfree_end);
}
}
|
d011658b0909614a53741a5a371f6f0bdd07886a.hip
|
// !!! This is a file automatically generated by hipify!!!
extern "C"
{
#include "completion.h"
#include "base.h"
#include "ciss.h"
#include <stdio.h>
#include <sys/time.h>
#include <stdlib.h>
#include <time.h>
#include <stdint.h>
}
#include "als.cuh"
#include "loss.cuh"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <rocblas.h>
#include <cusparse_v2.h>
#include <cusolver_common.h>
#include <cusolverDn.h>
#include <omp.h>
#define HANDLE_SOLVERERR( err ) (HandleSolverErr( err, __FILE__, __LINE__ ))
static void HandleSolverErr( cusolverStatus_t err, const char *file, int line )
{
if(err != CUSOLVER_STATUS_SUCCESS)
{
fprintf(stderr, "ERROR: in %s at line %d (error-code %d)\n",
file, line, err );
fflush(stdout);
exit(-1);
}
}
/*
/**
* @brief Compute the Cholesky decomposition of the normal equations and solve
* for out_row. We only compute the upper-triangular portion of 'neqs',
* so work with the lower-triangular portion when column-major
* (for Fortran).
*
* @param neqs The NxN normal equations.
* @param[out] out_row The RHS of the equation. Updated in place.
* @param N The rank of the problem.
static inline void p_invert_row(
double * const restrict neqs,
double * const restrict out_row,
idx_t const N)
{
char uplo = 'L';
int order = (int) N;
int lda = (int) N;
int info;
LAPACK_DPOTRF(&uplo, &order, neqs, &lda, &info);
if(info) {
fprintf(stderr, "SPLATT: DPOTRF returned %d\n", info);
}
int nrhs = 1;
int ldb = (int) N;
LAPACK_DPOTRS(&uplo, &order, &nrhs, neqs, &lda, out_row, &ldb, &info);
if(info) {
fprintf(stderr, "SPLATT: DPOTRS returned %d\n", info);
}
}
/**
* @brief Compute DSYRK: out += A^T * A, a rank-k update. Only compute
* the upper-triangular portion.
*
* @param A The input row(s) to update with.
* @param N The length of 'A'.
* @param nvecs The number of rows in 'A'.
* @param nflush Then number of times this has been performed (this slice).
* @param[out] out The NxN matrix to update.
static inline void p_vec_oprod(
double * const restrict A,
idx_t const N,
idx_t const nvecs,
idx_t const nflush,
double * const restrict out)
{
char uplo = 'L';
char trans = 'N';
int order = (int) N;
int k = (int) nvecs;
int lda = (int) N;
int ldc = (int) N;
double alpha = 1;
double beta = (nflush == 0) ? 0. : 1.;
LAPACK_DSYRK(&uplo, &trans, &order, &k, &alpha, A, &lda, &beta, out, &ldc);
}
static void p_process_slice3(
csf_sptensor * csf,
idx_t const tile,
idx_t const i,
double * A,
double * B,
idx_t const DEFAULT_NFACTORS,
double * out_row,
double * accum,
double * neqs,
double * neqs_buf,
idx_t * const nflush)
{
csf_sparsity const * const pt = csf->pt + tile;
idx_t const * const restrict sptr = pt->fptr[0];
idx_t const * const restrict fptr = pt->fptr[1];
idx_t const * const restrict fids = pt->fids[1];
idx_t const * const restrict inds = pt->fids[2];
double const * const restrict vals = pt->vals;
double * hada = neqs_buf;
idx_t bufsize = 0;
/* process each fiber
for(idx_t fib=sptr[i]; fib < sptr[i+1]; ++fib) {
double const * const restrict av = A + (fids[fib] * DEFAULT_NFACTORS);
/* first entry of the fiber is used to initialize accum
idx_t const jjfirst = fptr[fib];
double const vfirst = vals[jjfirst];
double const * const restrict bv = B + (inds[jjfirst] * DEFAULT_NFACTORS);
for(idx_t r=0; r < DEFAULT_NFACTORS; ++r) {
accum[r] = vfirst * bv[r];
hada[r] = av[r] * bv[r];
}
hada += DEFAULT_NFACTORS;
if(++bufsize == ALS_BUFSIZE) {
/* add to normal equations
p_vec_oprod(neqs_buf, DEFAULT_NFACTORS, bufsize, (*nflush)++, neqs);
bufsize = 0;
hada = neqs_buf;
}
/* foreach nnz in fiber
for(idx_t jj=fptr[fib]+1; jj < fptr[fib+1]; ++jj) {
double const v = vals[jj];
double const * const restrict bv = B + (inds[jj] * DEFAULT_NFACTORS);
for(idx_t r=0; r < DEFAULT_NFACTORS; ++r) {
accum[r] += v * bv[r];
hada[r] = av[r] * bv[r];
}
hada += DEFAULT_NFACTORS;
if(++bufsize == ALS_BUFSIZE) {
/* add to normal equations
p_vec_oprod(neqs_buf, DEFAULT_NFACTORS, bufsize, (*nflush)++, neqs);
bufsize = 0;
hada = neqs_buf;
}
}
/* accumulate into output row
for(idx_t r=0; r < DEFAULT_NFACTORS; ++r) {
out_row[r] += accum[r] * av[r];
}
} /* foreach fiber
/* final flush
p_vec_oprod(neqs_buf, DEFAULT_NFACTORS, bufsize, (*nflush)++, neqs);
}
//private function TODO: in gpu
/**
* @brief Compute the i-ith row of the MTTKRP, form the normal equations, and
* store the new row.*
* @param neq for inverse part
* @param out_row for mttkrp part
* @param i The row to update.
* @param reg Regularization parameter for the i-th row.
static void p_update_slice(
sptensor_t * train,
idx_t const i,
double const regularization_index,
idx_t DEFAULT_NFACTORS
)
{
idx_t const nmodes = train->nmodes;
/* fid is the row we are actually updating
idx_t const fid = (pt->fids[0] == NULL) ? i : pt->fids[0][i];
double * const restrict out_row = model->factors[csf->dim_perm[0]] +
(fid * DEFAULT_NFACTORS);
double * const restrict accum = ws->thds[tid].scratch[1];
double * const restrict neqs = ws->thds[tid].scratch[2];
idx_t bufsize = 0; /* how many hada vecs are in mat_accum
idx_t nflush = 0; /* how many times we have flushed to add to the neqs
double * const restrict mat_accum = ws->thds[tid].scratch[3];
double * hada = mat_accum;
double * const restrict hada_accum = ws->thds[tid].scratch[4];
/* clear out buffers
for(idx_t m=0; m < nmodes; ++m) {
for(idx_t f=0; f < DEFAULT_NFACTORS; ++f) {
accum[f + (m*DEFAULT_NFACTORS)] = 0.;
}
for(idx_t f=0; f < DEFAULT_NFACTORS; ++f) {
hada_accum[f + (m*DEFAULT_NFACTORS)] = 0.;
}
}
for(idx_t f=0; f < DEFAULT_NFACTORS; ++f) {
out_row[f] = 0;
}
/* grab factors
double * mats[MAX_NMODES];
for(idx_t m=0; m < nmodes; ++m) {
mats[m] = model->factors[csf->dim_perm[m]];
}
/* do MTTKRP + dsyrk
p_process_slice(csf, 0, i, mats, DEFAULT_NFACTORS, out_row, accum, neqs, mat_accum,
hada_accum, &nflush);
/* add regularization to the diagonal
for(idx_t f=0; f < DEFAULT_NFACTORS; ++f) {
neqs[f + (f * DEFAULT_NFACTORS)] += reg;
}
/* solve!
p_invert_row(neqs, out_row, DEFAULT_NFACTORS);
}
/**
* @brief To update the factor matrices in als
static void p_update_als(
sptensor_t * train,
ordi_matrix ** mats,
double regularization_index,
idx_t DEFAULT_NFACTORS
)
{
//for (i in all i number):
// p_update_slice();
} */
// gpu global function
/**
* @brief For computing the mttkrp in als
* @version Now only contains the atomic operation
*/
__global__ void p_mttkrp_gpu(cissbasic_t* d_traina,
ordi_matrix * d_factora,
ordi_matrix * d_factorb,
ordi_matrix * d_factorc,
double * d_hbuffer,
idx_t tilenum
)
{
//get thread and block index
idx_t bid = blockIdx.x;
idx_t tid = threadIdx.x;
idx_t tileid = bid * DEFAULT_BLOCKSIZE + tid;
uint8_t flag;
double * entries = d_traina -> entries;
idx_t localtile = tileid * ((DEFAULT_T_TILE_LENGTH + 1) * DEFAULT_T_TILE_WIDTH);
double __align__(256) localtbuffer[6];
double __align__(256) localmbuffer[2 * DEFAULT_NFACTORS];
//do the mttkrp
if(tileid < tilenum)
{
//get supportive information for tiles
idx_t f_id = (idx_t)(entries[localtile] * (-1)) ;
idx_t l_id = (idx_t)(entries[localtile+1] * (-1)) ;
idx_t bitmap = (idx_t)(entries[localtile+2]);
//if(bitmap == 0) break;
#ifdef DEBUG
if(tileid == 0)
{
printf("f_id %ld, l_id %ld, bitmap %ld\n", f_id, l_id, bitmap);
}
#endif
bitmap = __brevll(bitmap);
while((bitmap & 1) == 0) {bitmap = bitmap >> 1;}
bitmap = bitmap >> 1;
localtile += DEFAULT_T_TILE_WIDTH;
#ifdef DEBUG
if(tileid == 0)
{
printf("f_id %ld, l_id %ld, bitmap %ld\n", f_id, l_id, bitmap);
}
#endif
//load in vectorize
for(int m = 0; m < ((idx_t)DEFAULT_T_TILE_LENGTH) / 2; m++ )
{
//unroll loop and load
//((double2*)localtbuffer)[0] = ((double2*)(entries+localtile))[0];
//((double2*)localtbuffer)[1] = ((double2*)(entries+localtile))[1];
//((double2*)localtbuffer)[2] = ((double2*)(entries+localtile))[2];
localtbuffer[0] = entries[localtile];
localtbuffer[1] = entries[localtile + 1];
localtbuffer[2] = entries[localtile + 2];
localtbuffer[3] = entries[localtile + 3];
localtbuffer[4] = entries[localtile + 4];
localtbuffer[5] = entries[localtile + 5];
//do the mttkrp for the first
f_id = f_id + (!(bitmap & 1));
idx_t tmpi = d_traina->directory[f_id];
tmpi--;
#ifdef DEBUG
printf("the fid is %d\n", f_id);
#endif
bitmap = bitmap >> 1;
if((localtbuffer[0] == -1) && (localtbuffer[1] == -1)) break;
for(int j = 0; j < DEFAULT_NFACTORS; j++)
{
double b = d_factorb->values[((idx_t)localtbuffer[0]*DEFAULT_NFACTORS - DEFAULT_NFACTORS ) + j];
double c = d_factorc->values[((idx_t)localtbuffer[1]*DEFAULT_NFACTORS - DEFAULT_NFACTORS) + j];
localmbuffer[j] = b * c;
atomicAdd(&(d_factora->values[tmpi * DEFAULT_NFACTORS + j]), localmbuffer[j] * localtbuffer[2]);
}
//if(localtbuffer[0] == -1 && localtbuffer[1] == -1) break;
/*for(int j = 0; j < DEFAULT_NFACTORS; j++)
{
idx_t b = d_factorb->values[(idx_t)(localtbuffer[0]*DEFAULT_NFACTORS - DEFAULT_NFACTORS) + j];
idx_t c = d_factorc->values[(idx_t)(localtbuffer[1]*DEFAULT_NFACTORS - DEFAULT_NFACTORS) + j];
localmbuffer[j] = b * c;
atomicAdd(&(d_factora->values[tmpi * DEFAULT_NFACTORS + j]), localmbuffer[j] * localtbuffer[2]);
}*/
//do the mttkrp for the second
flag = !(bitmap & 1);
f_id = f_id + (!(bitmap & 1));
#ifdef DEBUG
printf("the fid is %d\n", f_id);
#endif
tmpi = d_traina->directory[f_id];
tmpi--;
bitmap = bitmap >> 1;
if((localtbuffer[0] == -1) && (localtbuffer[1] == -1)) break;
for(int j = 0; j < DEFAULT_NFACTORS; j++)
{
idx_t b = d_factorb->values[((idx_t)localtbuffer[3]*DEFAULT_NFACTORS - DEFAULT_NFACTORS) + j];
idx_t c = d_factorc->values[((idx_t)localtbuffer[4]*DEFAULT_NFACTORS - DEFAULT_NFACTORS) + j];
localmbuffer[DEFAULT_NFACTORS + j] = b * c;
atomicAdd(&(d_factora->values[tmpi * DEFAULT_NFACTORS + j]), localmbuffer[DEFAULT_NFACTORS + j] * localtbuffer[5]);
}
//compute the HTH for the first
//compute the HTH for the second
if(flag)
{
for(int i = 0; i < DEFAULT_NFACTORS; i++)
{
for(int j = 0; j <=i ; j++)
{
double presult1 = localmbuffer[i] * localmbuffer[j];
double presult2 = localmbuffer[DEFAULT_NFACTORS + i] * localmbuffer[DEFAULT_NFACTORS + j];
atomicAdd(&(d_hbuffer[(f_id - flag) * DEFAULT_NFACTORS * DEFAULT_NFACTORS + i * DEFAULT_NFACTORS + j]), presult1);
atomicAdd(&(d_hbuffer[f_id * DEFAULT_NFACTORS * DEFAULT_NFACTORS + i * DEFAULT_NFACTORS + j]), presult2);
}
}
}
else
{
for(int i = 0; i < DEFAULT_NFACTORS; i++)
{
for(int j = 0; j <=i ; j++)
{
double presult = localmbuffer[i] * localmbuffer[j] + localmbuffer[DEFAULT_NFACTORS + i] * localmbuffer[DEFAULT_NFACTORS + j];
atomicAdd(&(d_hbuffer[f_id * DEFAULT_NFACTORS * DEFAULT_NFACTORS + i * DEFAULT_NFACTORS + j]), presult);
}
}
}
localtile += 2*DEFAULT_T_TILE_WIDTH;
}
}
}
/**
* @brief For computing the mttkrp in als, only one element on one thread
* @version Now reduce atmoic add with segment scan
*/
__global__ void p_mttkrp_gpu_as(cissbasic_t* d_traina,
ordi_matrix * d_factora,
ordi_matrix * d_factorb,
ordi_matrix * d_factorc,
double * d_hbuffer,
//double * d_hthbuffer,
idx_t tilenum)
{
//get block, warp and thread index
__shared__ uint32_t warpmask[((idx_t)DEFAULT_BLOCKSIZE)/((idx_t)ALS_WARPSIZE)];
idx_t bid = blockIdx.x;
idx_t tid = threadIdx.x;
idx_t warpid = tid / ((idx_t)ALS_WARPSIZE);
idx_t laneid = tid % ((idx_t)ALS_WARPSIZE);
idx_t tileid = bid * ((idx_t)DEFAULT_BLOCKSIZE)/((idx_t)ALS_WARPSIZE) + warpid;
double * entries = d_traina -> entries;
idx_t localtile = tileid * ((DEFAULT_T_TILE_LENGTH + 1) * DEFAULT_T_TILE_WIDTH);
double __align__(256) localtbuffer[3];
double __align__(256) localmbuffer[DEFAULT_NFACTORS];
double mytmp = 0, myntmp = 0;
//initialize the warp mask
if(laneid == 0) warpmask[warpid] = 0xffffffff;
if((tileid * DEFAULT_T_TILE_LENGTH + laneid) == d_traina->nnz)
{
//redefine the mask
warpmask[warpid] = __brev((warpmask[warpid]<<(32-laneid)));
}
__syncwarp();
uint32_t mymask = warpmask[warpid];
#ifdef ALSAS_DEBUG
//printf("now the mymask and mynnz id in thread %ld are %x and %ld\n", tid, mymask, (tileid * DEFAULT_T_TILE_LENGTH + laneid));
#endif
if((tileid < tilenum) && ((tileid * DEFAULT_T_TILE_LENGTH + laneid)<d_traina->nnz))
{
//initialize the information for tile and local entry
idx_t f_id = (idx_t)(entries[localtile] * (-1)) ;
idx_t l_id = (idx_t)(entries[localtile+1] * (-1)) ;
idx_t bitmap = (idx_t)(entries[localtile+2]);
if(bitmap != 0)
{
bitmap = __brevll(bitmap);
while((bitmap & 1) == 0) {bitmap = bitmap >> 1;}
bitmap = bitmap >> 1;
idx_t itercounter = __popcll(bitmap) - (bitmap & 1);
#ifdef ALSAS_DEBUG
//if(laneid == 0)
//printf("now the itercounter is %ld\n", itercounter);
#endif
idx_t myfid = f_id + laneid - __popcll((bitmap << (63-laneid))) + 1;
#ifdef ALSAS_DEBUG
//printf("now the myfid in thread %ld is %ld\n", tid, myfid);
#endif
idx_t mybit = ((bitmap >> (laneid)) & 1);
idx_t mylbit = mybit;
if(laneid == 0)
{
mylbit = 0;
mybit = 1;
}
//inter thread computation
localtbuffer[0] = entries[localtile + (laneid + 1) * DEFAULT_T_TILE_WIDTH];
localtbuffer[1] = entries[localtile + (laneid + 1) * DEFAULT_T_TILE_WIDTH + 1];
localtbuffer[2] = entries[localtile + (laneid + 1) * DEFAULT_T_TILE_WIDTH + 2];
idx_t tmpi = d_traina->directory[myfid] - 1;
idx_t b = (idx_t)localtbuffer[0] - 1;
idx_t c = (idx_t)localtbuffer[1] - 1;
//for the hadamard
#ifdef ALSAS_DEBUG
//printf("now the myposition for hthbuffer in thread %ld is %ld\n", tid, (tileid * DEFAULT_T_TILE_LENGTH + laneid));
#endif
for(int m = 0; m < DEFAULT_NFACTORS; m++)
{
localmbuffer[m] = d_factorb->values[b * DEFAULT_NFACTORS + m] * d_factorc->values[c * DEFAULT_NFACTORS + m];
//d_hthbuffer[(tileid * DEFAULT_T_TILE_LENGTH + laneid)*DEFAULT_NFACTORS + m] = localmbuffer[m];
}
__syncwarp(mymask);
//reduction in hth
//mytmp: final partial result; myntmp: messages
for(int m = 0; m < DEFAULT_NFACTORS; m++)
{
for(int j = 0; j <=m ; j++)
{
mytmp = localmbuffer[m] * localmbuffer[j];
myntmp = mybit * mytmp;
__syncwarp(mymask);
//now the reduction
for(int i = 0; i < itercounter; i++)
{
mytmp = (__shfl_down_sync(mymask, myntmp, 1, (int)ALS_WARPSIZE)) + (!(mylbit)) * mytmp;
myntmp = mybit * mytmp;
__syncwarp(mymask);
}
if(!mybit)
{
atomicAdd(&(d_hbuffer[myfid * DEFAULT_NFACTORS * DEFAULT_NFACTORS + m * DEFAULT_NFACTORS + j]), mytmp);
}
__syncwarp(mymask);
}
}
__syncwarp(mymask);
//reduction in mttkrp
for(int m = 0; m < DEFAULT_NFACTORS; m++)
{
mytmp = localmbuffer[m] * localtbuffer[2];
myntmp = mybit * mytmp;
__syncwarp(mymask);
//now the reduction
for(int i = 0; i < itercounter; i++)
{
mytmp = (__shfl_down_sync(mymask, myntmp, 1, (int)ALS_WARPSIZE)) + (!(mylbit)) * mytmp;
myntmp = mybit * mytmp;
__syncwarp(mymask);
}
if(!mybit)
{
atomicAdd(&(d_factora->values[tmpi * DEFAULT_NFACTORS + m]), mytmp);
}
__syncwarp(mymask);
}
}
}
__syncthreads();
}
/**
* @brief For update the H matrices and prepare for inversion as well as equation
* @version Warp shuffle
**/
__global__ void p_hth_update_as(cissbasic_t * d_traina,
double * d_hthbuffer,
double * d_value_a,
double * d_hbuffer,
double ** d_hbufptr,
double ** d_factptr,
idx_t dlength,
double regularization_index)
{
__shared__ double blkmbuffer[((idx_t)DEFAULT_BLOCKSIZE)/((idx_t)ALS_WARPSIZE) * (idx_t)DEFAULT_NFACTORS];
//get block, warp and thread index
idx_t bid = blockIdx.x;
idx_t tid = threadIdx.x;
idx_t warpid = tid / ((idx_t)ALS_WARPSIZE);
idx_t laneid = tid % ((idx_t)ALS_WARPSIZE);
idx_t tileid = bid * ((idx_t)DEFAULT_BLOCKSIZE)/((idx_t)ALS_WARPSIZE) + warpid;
double __align__(256) localhthbuffer[DEFAULT_NFACTORS]={0};
if(tileid < dlength && laneid < DEFAULT_NFACTORS)
{
idx_t dcounter = d_traina->dcounter[tileid+1] - d_traina->dcounter[tileid];
#ifdef ALSAS_DEBUG
if(laneid == 0) printf("my dcounter is %ld\n and my tileid is %ld\n", dcounter, tileid);
#endif
idx_t basicposition = d_traina->dcounter[tileid];
idx_t basicsposition = warpid * DEFAULT_NFACTORS;
for(idx_t i = 0; i < dcounter; i++)
{
double localvalue = d_hthbuffer[(basicposition + i) * DEFAULT_NFACTORS + laneid];
blkmbuffer[basicsposition + laneid] = localvalue;
__syncwarp();
for(idx_t j = 0; j < DEFAULT_NFACTORS; j++)
{
localhthbuffer[j] += localvalue * blkmbuffer[basicsposition + j];
}
}
__syncwarp();
localhthbuffer[laneid] += regularization_index;
for(idx_t i = 0; i < DEFAULT_NFACTORS; i++)
{
d_hbuffer[tileid * DEFAULT_NFACTORS * DEFAULT_NFACTORS + laneid * DEFAULT_NFACTORS + i] = localhthbuffer[i];
}
__syncwarp();
//prepare for ptrs
if(laneid == 0)
{
idx_t fid = d_traina->directory[tileid] - 1;
d_factptr[tileid] = d_value_a + fid * DEFAULT_NFACTORS;
d_hbufptr[tileid] = d_hbuffer + tileid * DEFAULT_NFACTORS * DEFAULT_NFACTORS;
}
}
__syncwarp();
}
/**
* @brief Compute the inverse and finish the final update
* @version Now only with coarse grain
*/
__global__ void p_update_als_gpu(cissbasic_t * d_traina,
ordi_matrix * d_factora,
double * d_hbuffer,
idx_t dlength,
double regularization_index
)
{
idx_t bid = blockIdx.x;
idx_t tid = threadIdx.x;
idx_t tileid = bid * DEFAULT_BLOCKSIZE + tid;
idx_t basicposition = tileid * DEFAULT_NFACTORS * DEFAULT_NFACTORS;
double lv[DEFAULT_NFACTORS * DEFAULT_NFACTORS]={0};
if(tileid < dlength)
{
//compute the inverse
idx_t tmpi = d_traina->directory[tileid];
tmpi--;
double *av = d_hbuffer + basicposition;
idx_t i = 0;
idx_t j = 0;
idx_t k = 0;
for (i = 0; i < DEFAULT_NFACTORS; ++i)
{
for (j = 0; j <= i; ++j)
{
double inner = 0;
for (k = 0; k < j; ++k)
{
inner += lv[k+(i*DEFAULT_NFACTORS)] * lv[k+(j*DEFAULT_NFACTORS)];
}
if(i == j)
{
lv[j+(i*DEFAULT_NFACTORS)] = sqrt(av[i+(i*DEFAULT_NFACTORS)] - inner + regularization_index);
}
else
{
lv[j+(i*DEFAULT_NFACTORS)] = 1.0 / lv[j+(j*DEFAULT_NFACTORS)] * (av[j+(i*DEFAULT_NFACTORS)] - inner);
}
}
}
for(i = 0; i< DEFAULT_NFACTORS * DEFAULT_NFACTORS; i++)
{
av[i] = 0;
}
idx_t n = 0;
for(n=0; n<DEFAULT_NFACTORS; n++) //get identity matrix
{
av[n+(n*DEFAULT_NFACTORS)] = 1.0;
}
//forward solve
i = 1; //define counters outside the loop
j = 0;
idx_t f = 0;
for(j=0; j < DEFAULT_NFACTORS; ++j)
{
av[j] /= lv[0];
}
for(i=1; i < DEFAULT_NFACTORS; ++i)
{
/* X(i,f) = B(i,f) - \sum_{j=0}^{i-1} L(i,j)X(i,j) */
for(j=0; j < i; ++j)
{
for(f=0; f < DEFAULT_NFACTORS; ++f)
{
av[f+(i*DEFAULT_NFACTORS)] -= lv[j+(i*DEFAULT_NFACTORS)] * av[f+(j*DEFAULT_NFACTORS)];
}
}
for(f=0; f <DEFAULT_NFACTORS; ++f)
{
av[f+(i*DEFAULT_NFACTORS)] /= lv[i+(i*DEFAULT_NFACTORS)];
}
}
for(i=0; i < DEFAULT_NFACTORS; ++i)
{
for(j=i+1; j < DEFAULT_NFACTORS; ++j)
{
lv[j+(i*DEFAULT_NFACTORS)] = lv[i+(j*DEFAULT_NFACTORS)];
lv[i+(j*DEFAULT_NFACTORS)] = 0.0;
}
}
//backsolve
f = 0; //set counters
j = 0;
idx_t row = 2;
/* last row of X is easy */
for(f=0; f < DEFAULT_NFACTORS; ++f) {
i = DEFAULT_NFACTORS - 1;
av[f+(i*DEFAULT_NFACTORS)] /= lv[i+(i*DEFAULT_NFACTORS)];
}
/* now do backward substitution */
for(row=2; row <= DEFAULT_NFACTORS; ++row)
{
i = DEFAULT_NFACTORS - row;
/* X(i,f) = B(i,f) - \sum_{j=0}^{i-1} R(i,j)X(i,j) */
for( j=i+1; j < DEFAULT_NFACTORS; ++j)
{
for( f=0; f < DEFAULT_NFACTORS; ++f)
{
av[f+(i*DEFAULT_NFACTORS)] -= lv[j+(i*DEFAULT_NFACTORS)] * av[f+( j * DEFAULT_NFACTORS )];
}
}
for(f=0; f < DEFAULT_NFACTORS; ++f)
{
av[f+(i*DEFAULT_NFACTORS)] /= lv[i+(i*DEFAULT_NFACTORS)];
}
}
//now do the final update
double * mvals = d_factora->values + tmpi * DEFAULT_NFACTORS;
for(i = 0; i < DEFAULT_NFACTORS; i++)
{
lv[i] = 0;
for(j = 0; j < DEFAULT_NFACTORS; j++)
{
lv[i] += mvals[j] * av[i * DEFAULT_NFACTORS + j];
}
}
//the final transmission
for(i = 0; i < DEFAULT_NFACTORS/2; i++)
{
((double2*)mvals)[i] = ((double2*)lv)[i];
}
}
}
/**
* @brief Update the matrice
* @version Now only with coarse grain
*/
__global__ void p_update_matrice(cissbasic_t * d_traina,
double * d_value_a,
double * d_hbuffer,
double ** d_hbufptr,
double ** d_factptr,
idx_t dlength,
double regularization_index)
{
idx_t bid = blockIdx.x;
idx_t tid = threadIdx.x;
idx_t tileid = bid * DEFAULT_BLOCKSIZE + tid;
idx_t basicposition = tileid * DEFAULT_NFACTORS * DEFAULT_NFACTORS;
if(tileid < dlength)
{
idx_t tmpi = d_traina->directory[tileid] - 1;
for(idx_t f = 0; f < DEFAULT_NFACTORS; f++)
{
d_hbuffer[basicposition + f*DEFAULT_NFACTORS + f] += regularization_index;
}
d_hbufptr[tileid] = d_hbuffer + basicposition;
d_factptr[tileid] = d_value_a + tmpi * DEFAULT_NFACTORS;
}
}
void p_cholecheck(double * d_factora,
double * d_hbuffer,
double ** d_hbufptr,
double ** d_factptr,
idx_t dlength)
{
}
extern "C"{
/**
* @brief The main function for tensor completion in als
* @param train The tensor for generating factor matrices
* @param validation The tensor for validation(RMSE)
* @param test The tensor for testing the quality
* @param regularization_index Lambda
*/
idx_t tc_als(sptensor_t * traina,
sptensor_t * trainb,
sptensor_t * trainc,
sptensor_t * validation,
sptensor_t * test,
ordi_matrix ** mats,
ordi_matrix ** best_mats,
idx_t algorithm_index,
double regularization_index,
double * best_rmse,
double * tolerance,
int SGD_DEFAULT_BLOCKSIZE,
int SGD_DEFAULT_T_TILE_LENGTH,
idx_t * nbadepochs,
idx_t * bestepochs,
idx_t * max_badepochs)
{
idx_t const nmodes = traina->nmodes;
#ifdef CISS_DEBUG
printf("enter the als\n");
#endif
//initialize the devices
int deviceCount;
hipGetDeviceCount(&deviceCount);
int n;
//print the GPU status
for(n = 0; n < deviceCount; n++)
{
hipDeviceProp_t dprop;
hipGetDeviceProperties(&dprop, n);
printf(" %d: %s\n", n, dprop.name);
}
omp_set_num_threads(deviceCount);
//prepare the tensor in TB-COO
ciss_t * h_cissta = ciss_alloc(traina, 1, deviceCount, SGD_DEFAULT_T_TILE_LENGTH);
ciss_t * h_cisstb = ciss_alloc(trainb, 2, deviceCount, SGD_DEFAULT_T_TILE_LENGTH);
ciss_t * h_cisstc = ciss_alloc(trainc, 3, deviceCount, SGD_DEFAULT_T_TILE_LENGTH);
#ifdef MCISS_DEBUG
fprintf(stdout, "the new tensors for mode 0\n");
cissbasic_display(h_cissta->cissunits[0]);
cissbasic_display(h_cissta->cissunits[1]);
#endif
struct timeval start;
struct timeval end;
idx_t diff;
cissbasic_t ** d_traina = (cissbasic_t**)malloc(deviceCount * sizeof(cissbasic_t*));
cissbasic_t ** d_trainb = (cissbasic_t**)malloc(deviceCount * sizeof(cissbasic_t*));
cissbasic_t ** d_trainc = (cissbasic_t**)malloc(deviceCount * sizeof(cissbasic_t*));
idx_t ** d_directory_a = (idx_t**)malloc(deviceCount * sizeof(idx_t*));
idx_t ** d_directory_b = (idx_t**)malloc(deviceCount * sizeof(idx_t*));
idx_t ** d_directory_c = (idx_t**)malloc(deviceCount * sizeof(idx_t*));
idx_t ** d_counter_a = (idx_t**)malloc(deviceCount * sizeof(idx_t*));
idx_t ** d_counter_b = (idx_t**)malloc(deviceCount * sizeof(idx_t*));
idx_t ** d_counter_c = (idx_t**)malloc(deviceCount * sizeof(idx_t*));
idx_t ** d_dims_a = (idx_t**)malloc(deviceCount * sizeof(idx_t*));
idx_t ** d_dims_b = (idx_t**)malloc(deviceCount * sizeof(idx_t*));
idx_t ** d_dims_c = (idx_t**)malloc(deviceCount * sizeof(idx_t*));
double ** d_entries_a = (double**)malloc(deviceCount * sizeof(double*));
double ** d_entries_b = (double**)malloc(deviceCount * sizeof(double*));
double ** d_entries_c = (double**)malloc(deviceCount * sizeof(double*));
double ** d_hbuffer = (double**)malloc(deviceCount * sizeof(double*));
//double ** d_hthbuffer = (double**)malloc(deviceCount * sizeof(double*));
int ** d_infoarray = (int**)malloc(deviceCount * sizeof(int*));
double *** d_hbufptr = (double***)malloc(deviceCount * sizeof(double**));
double *** d_factptr = (double***)malloc(deviceCount * sizeof(double**));
ordi_matrix ** d_factora = (ordi_matrix**)malloc(deviceCount * sizeof(ordi_matrix*));
ordi_matrix ** d_factorb = (ordi_matrix**)malloc(deviceCount * sizeof(ordi_matrix*));
ordi_matrix ** d_factorc = (ordi_matrix**)malloc(deviceCount * sizeof(ordi_matrix*));
double ** d_value_a = (double**)malloc(deviceCount * sizeof(double*));
double ** d_value_b = (double**)malloc(deviceCount * sizeof(double*));
double ** d_value_c = (double**)malloc(deviceCount * sizeof(double*));
idx_t * maxdlength = (idx_t*)malloc(deviceCount * sizeof(idx_t));
idx_t * maxnnz = (idx_t*)malloc(deviceCount * sizeof(idx_t));
hipsolverDnHandle_t handle0, handle1;
hipSetDevice(0);
HANDLE_SOLVERERR(hipsolverDnCreate((&handle0)));
hipSetDevice(1);
HANDLE_SOLVERERR(hipsolverDnCreate((&handle1)));
#pragma omp parallel
{
//prepare the threads
unsigned int cpu_thread_id = omp_get_thread_num();
unsigned int num_cpu_threads = omp_get_num_threads();
//set gpus
int gpu_id = -1;
hipSetDevice(cpu_thread_id % deviceCount); // "% num_gpus" allows more CPU threads than GPU devices
hipGetDevice(&gpu_id);
idx_t * d_itemp1, *d_itemp2, *d_itemp3;
double * d_ftemp;
//initialize the cusolver
//HANDLE_SOLVERERR(hipsolverDnCreate((&(handle[gpu_id]))));
//malloc and copy the tensors + matrices to gpu
cissbasic_t * h_traina = h_cissta->cissunits[gpu_id];
cissbasic_t * h_trainb = h_cisstb->cissunits[gpu_id];
cissbasic_t * h_trainc = h_cisstc->cissunits[gpu_id];
//copy tensor for mode-1
HANDLE_ERROR(hipMalloc((void**)&(d_traina[gpu_id]), sizeof(cissbasic_t)));
HANDLE_ERROR(hipMalloc((void**)&(d_directory_a[gpu_id]), h_traina->dlength * sizeof(idx_t)));
HANDLE_ERROR(hipMalloc((void**)&(d_counter_a[gpu_id]), (h_traina->dlength + 1) * sizeof(idx_t)));
HANDLE_ERROR(hipMalloc((void**)&(d_entries_a[gpu_id]), h_traina->size * DEFAULT_T_TILE_WIDTH * sizeof(double)));
HANDLE_ERROR(hipMalloc((void**)&(d_dims_a[gpu_id]), nmodes * sizeof(idx_t)));
HANDLE_ERROR(hipMemcpy(d_counter_a[gpu_id], h_traina->dcounter, (h_traina->dlength + 1)*sizeof(idx_t), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_directory_a[gpu_id], h_traina->directory, h_traina->dlength*sizeof(idx_t), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_entries_a[gpu_id], h_traina->entries, h_traina->size * DEFAULT_T_TILE_WIDTH * sizeof(double), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_dims_a[gpu_id], h_traina->dims, nmodes*sizeof(idx_t), hipMemcpyHostToDevice));
d_itemp1 = h_traina->directory;
d_itemp2 = h_traina->dims;
d_itemp3 = h_traina->dcounter;
d_ftemp = h_traina->entries;
h_traina->directory = d_directory_a[gpu_id];
h_traina->dims = d_dims_a[gpu_id];
h_traina->entries = d_entries_a[gpu_id];
h_traina->dcounter = d_counter_a[gpu_id];
HANDLE_ERROR(hipMemcpy(d_traina[gpu_id], h_traina, sizeof(cissbasic_t), hipMemcpyHostToDevice));
h_traina->directory = d_itemp1;
h_traina->dims = d_itemp2;
h_traina->entries = d_ftemp;
h_traina->dcounter = d_itemp3;
//copy tensor for mode-2
HANDLE_ERROR(hipMalloc((void**)&(d_trainb[gpu_id]), sizeof(cissbasic_t)));
HANDLE_ERROR(hipMalloc((void**)&(d_directory_b[gpu_id]), h_trainb->dlength * sizeof(idx_t)));
HANDLE_ERROR(hipMalloc((void**)&(d_counter_b[gpu_id]), (h_trainb->dlength + 1) * sizeof(idx_t)));
HANDLE_ERROR(hipMalloc((void**)&(d_entries_b[gpu_id]), h_trainb->size * DEFAULT_T_TILE_WIDTH * sizeof(double)));
HANDLE_ERROR(hipMalloc((void**)&(d_dims_b[gpu_id]), nmodes * sizeof(idx_t)));
HANDLE_ERROR(hipMemcpy(d_directory_b[gpu_id], h_trainb->directory, h_trainb->dlength*sizeof(idx_t), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_counter_b[gpu_id], h_trainb->dcounter, (h_trainb->dlength + 1)*sizeof(idx_t), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_entries_b[gpu_id], h_trainb->entries, h_trainb->size * DEFAULT_T_TILE_WIDTH * sizeof(double), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_dims_b[gpu_id], h_trainb->dims, nmodes*sizeof(idx_t), hipMemcpyHostToDevice));
d_itemp1 = h_trainb->directory;
d_itemp2 = h_trainb->dims;
d_itemp3 = h_trainb->dcounter;
d_ftemp = h_trainb->entries;
h_trainb->directory = d_directory_b[gpu_id];
h_trainb->dims = d_dims_b[gpu_id];
h_trainb->entries = d_entries_b[gpu_id];
h_trainb->dcounter = d_counter_b[gpu_id];
HANDLE_ERROR(hipMemcpy(d_trainb[gpu_id], h_trainb, sizeof(cissbasic_t), hipMemcpyHostToDevice));
h_trainb->directory = d_itemp1;
h_trainb->dims = d_itemp2;
h_trainb->entries = d_ftemp;
h_trainb->dcounter = d_itemp3;
//copy tensor for mode-3
HANDLE_ERROR(hipMalloc((void**)&(d_trainc[gpu_id]), sizeof(cissbasic_t)));
HANDLE_ERROR(hipMalloc((void**)&(d_directory_c[gpu_id]), h_trainc->dlength * sizeof(idx_t)));
HANDLE_ERROR(hipMalloc((void**)&(d_counter_c[gpu_id]), (h_trainc->dlength + 1) * sizeof(idx_t)));
HANDLE_ERROR(hipMalloc((void**)&(d_entries_c[gpu_id]), h_trainc->size * DEFAULT_T_TILE_WIDTH * sizeof(double)));
HANDLE_ERROR(hipMalloc((void**)&(d_dims_c[gpu_id]), nmodes * sizeof(idx_t)));
HANDLE_ERROR(hipMemcpy(d_directory_c[gpu_id], h_trainc->directory, h_trainc->dlength*sizeof(idx_t), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_counter_c[gpu_id], h_trainc->dcounter, (h_trainc->dlength + 1)*sizeof(idx_t), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_entries_c[gpu_id], h_trainc->entries, h_trainc->size * DEFAULT_T_TILE_WIDTH * sizeof(double), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_dims_c[gpu_id], h_trainc->dims, nmodes*sizeof(idx_t), hipMemcpyHostToDevice));
d_itemp1 = h_trainc->directory;
d_itemp2 = h_trainc->dims;
d_ftemp = h_trainc->entries;
d_itemp3 = h_trainc->dcounter;
h_trainc->directory = d_directory_c[gpu_id];
h_trainc->dims = d_dims_c[gpu_id];
h_trainc->entries = d_entries_c[gpu_id];
h_trainc->dcounter = d_counter_c[gpu_id];
HANDLE_ERROR(hipMemcpy(d_trainc[gpu_id], h_trainc, sizeof(cissbasic_t), hipMemcpyHostToDevice));
h_trainc->directory = d_itemp1;
h_trainc->dims = d_itemp2;
h_trainc->entries = d_ftemp;
h_trainc->dcounter = d_itemp3;
//buffer for HTH
maxdlength[gpu_id] = SS_MAX(SS_MAX(h_traina->dlength, h_trainb->dlength),h_trainc->dlength);
maxnnz[gpu_id] = SS_MAX(SS_MAX(h_traina->nnz, h_trainb->nnz),h_trainc->nnz);
#ifdef ALSAS_DEBUG
fprintf(stdout, "now in thread %d the cpu maxnnz is %ld\n", cpu_thread_id,maxnnz[gpu_id]);
#endif
HANDLE_ERROR(hipMalloc((void**)&(d_hbuffer[gpu_id]), DEFAULT_NFACTORS * DEFAULT_NFACTORS * maxdlength[gpu_id] * sizeof(double)));
//HANDLE_ERROR(hipMalloc((void**)&(d_hthbuffer[gpu_id]), DEFAULT_NFACTORS * maxnnz[gpu_id] * sizeof(double)));
//HANDLE_ERROR(hipMalloc((void**)&d_invbuffer, DEFAULT_NFACTORS * DEFAULT_NFACTORS * maxdlength * sizeof(double)));
//buffer for inversion
HANDLE_ERROR(hipMalloc((void**)&(d_hbufptr[gpu_id]), maxdlength[gpu_id] * sizeof(double*)));
HANDLE_ERROR(hipMalloc((void**)&(d_factptr[gpu_id]), maxdlength[gpu_id] * sizeof(double*)));
HANDLE_ERROR(hipMalloc((void**)&(d_infoarray[gpu_id]), maxdlength[gpu_id] * sizeof(int)));
HANDLE_ERROR(hipMalloc((void**)&(d_factora[gpu_id]), sizeof(ordi_matrix)));
HANDLE_ERROR(hipMalloc((void**)&(d_value_a[gpu_id]), mats[0]->I * DEFAULT_NFACTORS * sizeof(double)));
HANDLE_ERROR(hipMemcpy(d_value_a[gpu_id], mats[0]->values, mats[0]->I * DEFAULT_NFACTORS * sizeof(double), hipMemcpyHostToDevice));
#pragma omp critical
{
d_ftemp = mats[0]->values;
mats[0]->values = d_value_a[gpu_id];
HANDLE_ERROR(hipMemcpy(d_factora[gpu_id], mats[0], sizeof(ordi_matrix), hipMemcpyHostToDevice));
mats[0]->values = d_ftemp;
}
#pragma omp barrier
HANDLE_ERROR(hipMalloc((void**)&(d_factorb[gpu_id]), sizeof(ordi_matrix)));
HANDLE_ERROR(hipMalloc((void**)&(d_value_b[gpu_id]), mats[1]->I * DEFAULT_NFACTORS * sizeof(double)));
HANDLE_ERROR(hipMemcpy(d_value_b[gpu_id], mats[1]->values, mats[1]->I * DEFAULT_NFACTORS * sizeof(double), hipMemcpyHostToDevice));
#pragma omp critical
{
d_ftemp = mats[1]->values;
mats[1]->values = d_value_b[gpu_id];
HANDLE_ERROR(hipMemcpy(d_factorb[gpu_id], mats[1], sizeof(ordi_matrix), hipMemcpyHostToDevice));
mats[1]->values = d_ftemp;
}
#pragma omp barrier
HANDLE_ERROR(hipMalloc((void**)&(d_factorc[gpu_id]), sizeof(ordi_matrix)));
HANDLE_ERROR(hipMalloc((void**)&(d_value_c[gpu_id]), mats[2]->I * DEFAULT_NFACTORS * sizeof(double)));
HANDLE_ERROR(hipMemcpy(d_value_c[gpu_id], mats[2]->values, mats[2]->I * DEFAULT_NFACTORS * sizeof(double), hipMemcpyHostToDevice));
#pragma omp critical
{
d_ftemp = mats[2]->values;
mats[2]->values = d_value_c[gpu_id];
HANDLE_ERROR(hipMemcpy(d_factorc[gpu_id], mats[2], sizeof(ordi_matrix), hipMemcpyHostToDevice));
mats[2]->values = d_ftemp;
}
}
#ifdef CUDA_LOSS //to be done
sptensor_gpu_t * d_test, * d_validate;
#else
double loss = tc_loss_sq(traina, mats, algorithm_index);
double frobsq = tc_frob_sq(nmodes, regularization_index, mats);
#endif
tc_converge(traina, validation, mats, best_mats, algorithm_index, loss, frobsq, 0, nmodes, best_rmse, tolerance, nbadepochs, bestepochs, max_badepochs);
//step into the kernel
idx_t mode_i, mode_n, m;
hipblasFillMode_t uplo = HIPBLAS_FILL_MODE_UPPER;
idx_t totaldiff = 0;
idx_t num_iterations = 0;
for(idx_t e=1; e < DEFAULT_MAX_ITERATE+1; ++e) {
gettimeofday(&start,NULL);
//can set random variables
srand(time(0));
mode_i = rand()%3;
#ifdef ALSAS_DEBUG
mode_i = 0;
fprintf(stdout, "now the mode_i is %d\n", mode_i);
#endif
for(m=0; m < 3; m++) {
#pragma omp parallel
{
unsigned int cpu_thread_id = omp_get_thread_num();
hipSetDevice(cpu_thread_id % deviceCount); // "% num_gpus" allows more CPU threads than GPU devices
hipsolverDnHandle_t handle;
if(!cpu_thread_id) handle = handle1;
else handle = handle0;
cissbasic_t * h_traina = h_cissta->cissunits[cpu_thread_id];
cissbasic_t * h_trainb = h_cisstb->cissunits[cpu_thread_id];
cissbasic_t * h_trainc = h_cisstc->cissunits[cpu_thread_id];
idx_t mymode_n = (mode_i + m)%3;
idx_t blocknum_u, blocknum_h, nnz, tilenum, blocknum_m;
HANDLE_ERROR(hipMemset(d_hbuffer[cpu_thread_id], 0, DEFAULT_NFACTORS * DEFAULT_NFACTORS * maxdlength[cpu_thread_id] * sizeof(double)));
//HANDLE_ERROR(hipMemcpy(d_invbuffer, h_invbuffer, DEFAULT_NFACTORS * DEFAULT_NFACTORS * maxdlength * sizeof(double)),hipMemcpyHostToDevice);
switch (mymode_n)
{
case 0:
{
nnz = h_traina->nnz;
tilenum = nnz/DEFAULT_T_TILE_LENGTH + 1;
blocknum_m = tilenum/(((idx_t)DEFAULT_BLOCKSIZE)/((idx_t)ALS_WARPSIZE)) + 1;
#ifdef ALSAS_DEBUG
fprintf(stdout, "now in thread %d, nnz is %d, blocknum_m is %d, tilenum is %d\n", cpu_thread_id, nnz, blocknum_m, tilenum);
#endif
HANDLE_ERROR(hipMemset(d_value_a[cpu_thread_id], 0, mats[0]->I * DEFAULT_NFACTORS * sizeof(double)));
blocknum_u = h_traina->dlength / DEFAULT_BLOCKSIZE + 1;
blocknum_h = h_traina->dlength / (((idx_t)DEFAULT_BLOCKSIZE)/((idx_t)ALS_WARPSIZE)) + 1;
hipLaunchKernelGGL(( p_mttkrp_gpu_as), dim3(blocknum_m),dim3(DEFAULT_BLOCKSIZE),0, 0, d_traina[cpu_thread_id], d_factora[cpu_thread_id], d_factorb[cpu_thread_id], d_factorc[cpu_thread_id], d_hbuffer[cpu_thread_id], tilenum);
HANDLE_ERROR(hipDeviceSynchronize());
#ifdef ALSAS_DEBUG
fprintf(stdout, "now in thread %d ends mttkrp\n", cpu_thread_id);
fprintf(stdout, "now in thread %d the blocknum for hth update is %ld and the dlength is %ld\n", cpu_thread_id, blocknum_h, h_traina->dlength);
#endif
//p_hth_update_as<<<blocknum_h,DEFAULT_BLOCKSIZE,0>>>(d_traina[cpu_thread_id], d_hthbuffer[cpu_thread_id], d_value_a[cpu_thread_id], d_hbuffer[cpu_thread_id], d_hbufptr[cpu_thread_id], d_factptr[cpu_thread_id], h_traina->dlength, regularization_index);
hipLaunchKernelGGL(( p_update_matrice), dim3(blocknum_u), dim3(DEFAULT_BLOCKSIZE), 0, 0, d_traina[cpu_thread_id], d_value_a[cpu_thread_id], d_hbuffer[cpu_thread_id], d_hbufptr[cpu_thread_id], d_factptr[cpu_thread_id], h_traina->dlength, regularization_index);
HANDLE_ERROR(hipDeviceSynchronize());
#ifdef ALS_DEBUG
p_cholecheck(d_value_a[cpu_thread_id], d_hbuffer[cpu_thread_id], d_hbufptr[cpu_thread_id], d_factptr[cpu_thread_id], h_traina->dlength);
#endif
HANDLE_SOLVERERR(hipsolverDnDpotrfBatched(handle, uplo, DEFAULT_NFACTORS, d_hbufptr[cpu_thread_id], DEFAULT_NFACTORS, d_infoarray[cpu_thread_id], (int)h_traina->dlength));
HANDLE_SOLVERERR(hipsolverDnDpotrsBatched(handle, uplo, DEFAULT_NFACTORS, 1, d_hbufptr[cpu_thread_id], DEFAULT_NFACTORS, d_factptr[cpu_thread_id], DEFAULT_NFACTORS, d_infoarray[cpu_thread_id], (int)h_traina->dlength));
HANDLE_ERROR(hipDeviceSynchronize());
gettimeofday(&end,NULL);
diff = 1000000*(end.tv_sec-start.tv_sec) + end.tv_usec - start.tv_usec;
num_iterations += 1;
totaldiff += diff;
//printf("this time cost %ld\n",diff);
#pragma omp barrier
//update the final results
HANDLE_ERROR(hipMemcpy(mats[0]->values + (h_cissta->d_ref[cpu_thread_id] -1) * DEFAULT_NFACTORS, d_value_a[cpu_thread_id] + (h_cissta->d_ref[cpu_thread_id] -1) * DEFAULT_NFACTORS, (h_cissta->d_ref[cpu_thread_id + 1] - h_cissta->d_ref[cpu_thread_id]) * DEFAULT_NFACTORS * sizeof(double), hipMemcpyDeviceToHost));
HANDLE_ERROR(hipDeviceSynchronize());
HANDLE_ERROR(hipMemcpy(d_value_a[cpu_thread_id] + (h_cissta->d_ref[(cpu_thread_id + 1)% deviceCount] - 1) * DEFAULT_NFACTORS, mats[0]->values + (h_cissta->d_ref[(cpu_thread_id + 1) % deviceCount] -1 ) * DEFAULT_NFACTORS, (h_cissta->d_ref[(cpu_thread_id + 1) % deviceCount + 1] - h_cissta->d_ref[(cpu_thread_id + 1) % deviceCount]) * DEFAULT_NFACTORS * sizeof(double), hipMemcpyHostToDevice));
HANDLE_ERROR(hipDeviceSynchronize());
break;
}
case 1:
{
nnz = h_trainb->nnz;
tilenum = nnz/DEFAULT_T_TILE_LENGTH + 1;
blocknum_m = tilenum/(((idx_t)DEFAULT_BLOCKSIZE)/((idx_t)ALS_WARPSIZE)) + 1;
HANDLE_ERROR(hipMemset(d_value_b[cpu_thread_id], 0, mats[1]->I * DEFAULT_NFACTORS * sizeof(double)));
blocknum_u = h_trainb->dlength / DEFAULT_BLOCKSIZE + 1;
blocknum_h = h_trainb->dlength / (((idx_t)DEFAULT_BLOCKSIZE)/((idx_t)ALS_WARPSIZE)) + 1;
hipLaunchKernelGGL(( p_mttkrp_gpu_as), dim3(blocknum_m),dim3(DEFAULT_BLOCKSIZE),0, 0, d_trainb[cpu_thread_id], d_factorb[cpu_thread_id], d_factorc[cpu_thread_id], d_factora[cpu_thread_id], d_hbuffer[cpu_thread_id], tilenum);
HANDLE_ERROR(hipDeviceSynchronize());
//p_hth_update_as<<<blocknum_h,DEFAULT_BLOCKSIZE,0>>>(d_trainb[cpu_thread_id], d_hthbuffer[cpu_thread_id], d_value_b[cpu_thread_id], d_hbuffer[cpu_thread_id], d_hbufptr[cpu_thread_id], d_factptr[cpu_thread_id], h_trainb->dlength, regularization_index);
HANDLE_ERROR(hipDeviceSynchronize());
hipLaunchKernelGGL(( p_update_matrice), dim3(blocknum_u), dim3(DEFAULT_BLOCKSIZE), 0, 0, d_trainb[cpu_thread_id], d_value_b[cpu_thread_id], d_hbuffer[cpu_thread_id], d_hbufptr[cpu_thread_id], d_factptr[cpu_thread_id], h_trainb->dlength, regularization_index);
HANDLE_ERROR(hipDeviceSynchronize());
HANDLE_SOLVERERR(hipsolverDnDpotrfBatched(handle, uplo, DEFAULT_NFACTORS, d_hbufptr[cpu_thread_id], DEFAULT_NFACTORS, d_infoarray[cpu_thread_id], (int)h_trainb->dlength));
HANDLE_SOLVERERR(hipsolverDnDpotrsBatched(handle, uplo, DEFAULT_NFACTORS, 1, d_hbufptr[cpu_thread_id], DEFAULT_NFACTORS, d_factptr[cpu_thread_id],DEFAULT_NFACTORS, d_infoarray[cpu_thread_id], (int)h_trainb->dlength));
HANDLE_ERROR(hipDeviceSynchronize());
gettimeofday(&end,NULL);
diff = 1000000*(end.tv_sec-start.tv_sec) + end.tv_usec - start.tv_usec;
num_iterations += 1;
totaldiff += diff;
//printf("this time cost %ld\n",diff);
#pragma omp barrier
//update the final results
HANDLE_ERROR(hipMemcpy(mats[1]->values + (h_cisstb->d_ref[cpu_thread_id] - 1) * DEFAULT_NFACTORS, d_value_b[cpu_thread_id] + (h_cisstb->d_ref[cpu_thread_id] - 1)* DEFAULT_NFACTORS, (h_cisstb->d_ref[cpu_thread_id + 1] - h_cisstb->d_ref[cpu_thread_id]) * DEFAULT_NFACTORS * sizeof(double), hipMemcpyDeviceToHost));
HANDLE_ERROR(hipDeviceSynchronize());
HANDLE_ERROR(hipMemcpy(d_value_b[cpu_thread_id] + (h_cisstb->d_ref[(cpu_thread_id + 1)% deviceCount] - 1)* DEFAULT_NFACTORS, mats[1]->values + (h_cisstb->d_ref[(cpu_thread_id + 1) % deviceCount] - 1)* DEFAULT_NFACTORS, (h_cisstb->d_ref[(cpu_thread_id + 1) % deviceCount + 1] - h_cisstb->d_ref[(cpu_thread_id + 1) % deviceCount]) * DEFAULT_NFACTORS * sizeof(double), hipMemcpyHostToDevice));
HANDLE_ERROR(hipDeviceSynchronize());
break;
}
default:
{
nnz = h_trainc->nnz;
tilenum = nnz/DEFAULT_T_TILE_LENGTH + 1;
blocknum_m = tilenum/(((idx_t)DEFAULT_BLOCKSIZE)/((idx_t)ALS_WARPSIZE)) + 1;
HANDLE_ERROR(hipMemset(d_value_c[cpu_thread_id], 0, mats[2]->I * DEFAULT_NFACTORS * sizeof(double)));
blocknum_u = h_trainc->dlength / DEFAULT_BLOCKSIZE + 1;
blocknum_h = h_trainc->dlength / (((idx_t)DEFAULT_BLOCKSIZE)/((idx_t)ALS_WARPSIZE)) + 1;
hipLaunchKernelGGL(( p_mttkrp_gpu_as), dim3(blocknum_m),dim3(DEFAULT_BLOCKSIZE),0, 0, d_trainc[cpu_thread_id], d_factorc[cpu_thread_id], d_factora[cpu_thread_id], d_factorb[cpu_thread_id], d_hbuffer[cpu_thread_id], tilenum);
HANDLE_ERROR(hipDeviceSynchronize());
//p_hth_update_as<<<blocknum_h,DEFAULT_BLOCKSIZE,0>>>(d_trainc[cpu_thread_id], d_hthbuffer[cpu_thread_id], d_value_c[cpu_thread_id], d_hbuffer[cpu_thread_id], d_hbufptr[cpu_thread_id], d_factptr[cpu_thread_id], h_trainc->dlength, regularization_index);
hipLaunchKernelGGL(( p_update_matrice), dim3(blocknum_u), dim3(DEFAULT_BLOCKSIZE), 0, 0, d_trainc[cpu_thread_id], d_value_c[cpu_thread_id], d_hbuffer[cpu_thread_id], d_hbufptr[cpu_thread_id], d_factptr[cpu_thread_id], h_trainc->dlength, regularization_index);
HANDLE_ERROR(hipDeviceSynchronize());
HANDLE_SOLVERERR(hipsolverDnDpotrfBatched(handle, uplo, DEFAULT_NFACTORS, d_hbufptr[cpu_thread_id], DEFAULT_NFACTORS, d_infoarray[cpu_thread_id], (int)h_trainc->dlength));
HANDLE_SOLVERERR(hipsolverDnDpotrsBatched(handle, uplo, DEFAULT_NFACTORS, 1, d_hbufptr[cpu_thread_id], DEFAULT_NFACTORS, d_factptr[cpu_thread_id],DEFAULT_NFACTORS, d_infoarray[cpu_thread_id], (int)h_trainc->dlength));
HANDLE_ERROR(hipDeviceSynchronize());
gettimeofday(&end,NULL);
diff = 1000000*(end.tv_sec-start.tv_sec) + end.tv_usec - start.tv_usec;
num_iterations += 1;
totaldiff += diff;
//printf("this time cost %ld\n",diff);
#pragma omp barrier
//update the final results
HANDLE_ERROR(hipMemcpy(mats[2]->values + (h_cisstc->d_ref[cpu_thread_id] -1 ) * DEFAULT_NFACTORS, d_value_c[cpu_thread_id] + (h_cisstc->d_ref[cpu_thread_id] - 1) * DEFAULT_NFACTORS, (h_cisstc->d_ref[cpu_thread_id + 1] - h_cisstc->d_ref[cpu_thread_id]) * DEFAULT_NFACTORS * sizeof(double), hipMemcpyDeviceToHost));
HANDLE_ERROR(hipDeviceSynchronize());
HANDLE_ERROR(hipMemcpy(d_value_c[cpu_thread_id] + (h_cisstc->d_ref[(cpu_thread_id + 1)% deviceCount] -1) * DEFAULT_NFACTORS, mats[2]->values + (h_cisstc->d_ref[(cpu_thread_id + 1) % deviceCount] -1)* DEFAULT_NFACTORS, (h_cisstc->d_ref[(cpu_thread_id + 1) % deviceCount + 1] - h_cisstc->d_ref[(cpu_thread_id + 1) % deviceCount]) * DEFAULT_NFACTORS * sizeof(double), hipMemcpyHostToDevice));
HANDLE_ERROR(hipDeviceSynchronize());
break;
}
//p_update_als(train, mats, m, DEFAULT_NFACTORS, regularization_index);
}
}
}
gettimeofday(&end,NULL);
diff = 1000000*(end.tv_sec-start.tv_sec) + end.tv_usec - start.tv_usec;
num_iterations += 1;
totaldiff += diff;
//printf("this time cost %ld\n",diff);
#ifdef DEBUG
matrix_display(mats[0]);
matrix_display(mats[1]);
matrix_display(mats[2]);
#endif
gettimeofday(&end,NULL);
diff = 1000000*(end.tv_sec-start.tv_sec) + end.tv_usec - start.tv_usec;
num_iterations += 1;
totaldiff += diff;
//printf("this time cost %ld\n",diff);
/* compute new obj value, print stats, and exit if converged */
loss = tc_loss_sq(traina, mats, algorithm_index);
frobsq = tc_frob_sq(nmodes, regularization_index, mats);
if(tc_converge(traina, validation, mats, best_mats, algorithm_index, loss, frobsq, e, nmodes, best_rmse, tolerance, nbadepochs, bestepochs, max_badepochs)) {
break;
}
} /* foreach iteration */
hipSetDevice(0);
HANDLE_SOLVERERR(hipsolverDnDestroy(handle0));
hipSetDevice(1);
HANDLE_SOLVERERR(hipsolverDnDestroy(handle1));
#pragma omp parallel
{
unsigned int cpu_thread_id = omp_get_thread_num();
hipSetDevice(cpu_thread_id % deviceCount);
//end the cusolver
//HANDLE_SOLVERERR(hipsolverDnDestroy(handle));
//free the cudabuffer
hipFree(d_counter_a[cpu_thread_id]);
hipFree(d_directory_a[cpu_thread_id]);
hipFree(d_dims_a[cpu_thread_id]);
hipFree(d_entries_a[cpu_thread_id]);
hipFree(d_counter_b[cpu_thread_id]);
hipFree(d_directory_b[cpu_thread_id]);
hipFree(d_dims_b[cpu_thread_id]);
hipFree(d_entries_b[cpu_thread_id]);
hipFree(d_counter_c[cpu_thread_id]);
hipFree(d_directory_c[cpu_thread_id]);
hipFree(d_dims_c[cpu_thread_id]);
hipFree(d_entries_c[cpu_thread_id]);
hipFree(d_hbuffer[cpu_thread_id]);
hipFree(d_hbufptr[cpu_thread_id]);
//hipFree(d_hthbuffer[cpu_thread_id]);
hipFree(d_factptr[cpu_thread_id]);
hipFree(d_infoarray[cpu_thread_id]);
hipFree(d_value_a[cpu_thread_id]);
hipFree(d_value_b[cpu_thread_id]);
hipFree(d_value_c[cpu_thread_id]);
hipFree(d_traina[cpu_thread_id]);
hipFree(d_trainb[cpu_thread_id]);
hipFree(d_trainc[cpu_thread_id]);
hipFree(d_factora[cpu_thread_id]);
hipFree(d_factorb[cpu_thread_id]);
hipFree(d_factorc[cpu_thread_id]);
//hipFree(d_hthbuffer[cpu_thread_id]);
hipDeviceReset();
}
ciss_free(h_cissta, deviceCount);
ciss_free(h_cisstb, deviceCount);
ciss_free(h_cisstc, deviceCount);
free(d_traina);
free(d_trainb);
free(d_trainc);
free(d_directory_a);
free(d_directory_b);
free(d_directory_c);
free(d_counter_a);
free(d_counter_b);
free(d_counter_c);
free(d_dims_a);
free(d_dims_b);
free(d_dims_c);
free(d_entries_a);
free(d_entries_b);
free(d_entries_c);
free(d_hbuffer);
//free(d_hthbuffer);
free(d_hbufptr);
free(d_infoarray);
free(d_factptr);
//free(handle);
free(d_factora);
free(d_factorb);
free(d_factorc);
free(d_value_a);
free(d_value_b);
free(d_value_c);
free(maxdlength);
free(maxnnz);
return totaldiff/num_iterations;
}
}
|
d011658b0909614a53741a5a371f6f0bdd07886a.cu
|
extern "C"
{
#include "completion.h"
#include "base.h"
#include "ciss.h"
#include <stdio.h>
#include <sys/time.h>
#include <stdlib.h>
#include <time.h>
#include <stdint.h>
}
#include "als.cuh"
#include "loss.cuh"
#include <cuda.h>
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <cusparse_v2.h>
#include <cusolver_common.h>
#include <cusolverDn.h>
#include <omp.h>
#define HANDLE_SOLVERERR( err ) (HandleSolverErr( err, __FILE__, __LINE__ ))
static void HandleSolverErr( cusolverStatus_t err, const char *file, int line )
{
if(err != CUSOLVER_STATUS_SUCCESS)
{
fprintf(stderr, "ERROR: in %s at line %d (error-code %d)\n",
file, line, err );
fflush(stdout);
exit(-1);
}
}
/*
/**
* @brief Compute the Cholesky decomposition of the normal equations and solve
* for out_row. We only compute the upper-triangular portion of 'neqs',
* so work with the lower-triangular portion when column-major
* (for Fortran).
*
* @param neqs The NxN normal equations.
* @param[out] out_row The RHS of the equation. Updated in place.
* @param N The rank of the problem.
static inline void p_invert_row(
double * const restrict neqs,
double * const restrict out_row,
idx_t const N)
{
char uplo = 'L';
int order = (int) N;
int lda = (int) N;
int info;
LAPACK_DPOTRF(&uplo, &order, neqs, &lda, &info);
if(info) {
fprintf(stderr, "SPLATT: DPOTRF returned %d\n", info);
}
int nrhs = 1;
int ldb = (int) N;
LAPACK_DPOTRS(&uplo, &order, &nrhs, neqs, &lda, out_row, &ldb, &info);
if(info) {
fprintf(stderr, "SPLATT: DPOTRS returned %d\n", info);
}
}
/**
* @brief Compute DSYRK: out += A^T * A, a rank-k update. Only compute
* the upper-triangular portion.
*
* @param A The input row(s) to update with.
* @param N The length of 'A'.
* @param nvecs The number of rows in 'A'.
* @param nflush Then number of times this has been performed (this slice).
* @param[out] out The NxN matrix to update.
static inline void p_vec_oprod(
double * const restrict A,
idx_t const N,
idx_t const nvecs,
idx_t const nflush,
double * const restrict out)
{
char uplo = 'L';
char trans = 'N';
int order = (int) N;
int k = (int) nvecs;
int lda = (int) N;
int ldc = (int) N;
double alpha = 1;
double beta = (nflush == 0) ? 0. : 1.;
LAPACK_DSYRK(&uplo, &trans, &order, &k, &alpha, A, &lda, &beta, out, &ldc);
}
static void p_process_slice3(
csf_sptensor * csf,
idx_t const tile,
idx_t const i,
double * A,
double * B,
idx_t const DEFAULT_NFACTORS,
double * out_row,
double * accum,
double * neqs,
double * neqs_buf,
idx_t * const nflush)
{
csf_sparsity const * const pt = csf->pt + tile;
idx_t const * const restrict sptr = pt->fptr[0];
idx_t const * const restrict fptr = pt->fptr[1];
idx_t const * const restrict fids = pt->fids[1];
idx_t const * const restrict inds = pt->fids[2];
double const * const restrict vals = pt->vals;
double * hada = neqs_buf;
idx_t bufsize = 0;
/* process each fiber
for(idx_t fib=sptr[i]; fib < sptr[i+1]; ++fib) {
double const * const restrict av = A + (fids[fib] * DEFAULT_NFACTORS);
/* first entry of the fiber is used to initialize accum
idx_t const jjfirst = fptr[fib];
double const vfirst = vals[jjfirst];
double const * const restrict bv = B + (inds[jjfirst] * DEFAULT_NFACTORS);
for(idx_t r=0; r < DEFAULT_NFACTORS; ++r) {
accum[r] = vfirst * bv[r];
hada[r] = av[r] * bv[r];
}
hada += DEFAULT_NFACTORS;
if(++bufsize == ALS_BUFSIZE) {
/* add to normal equations
p_vec_oprod(neqs_buf, DEFAULT_NFACTORS, bufsize, (*nflush)++, neqs);
bufsize = 0;
hada = neqs_buf;
}
/* foreach nnz in fiber
for(idx_t jj=fptr[fib]+1; jj < fptr[fib+1]; ++jj) {
double const v = vals[jj];
double const * const restrict bv = B + (inds[jj] * DEFAULT_NFACTORS);
for(idx_t r=0; r < DEFAULT_NFACTORS; ++r) {
accum[r] += v * bv[r];
hada[r] = av[r] * bv[r];
}
hada += DEFAULT_NFACTORS;
if(++bufsize == ALS_BUFSIZE) {
/* add to normal equations
p_vec_oprod(neqs_buf, DEFAULT_NFACTORS, bufsize, (*nflush)++, neqs);
bufsize = 0;
hada = neqs_buf;
}
}
/* accumulate into output row
for(idx_t r=0; r < DEFAULT_NFACTORS; ++r) {
out_row[r] += accum[r] * av[r];
}
} /* foreach fiber
/* final flush
p_vec_oprod(neqs_buf, DEFAULT_NFACTORS, bufsize, (*nflush)++, neqs);
}
//private function TODO: in gpu
/**
* @brief Compute the i-ith row of the MTTKRP, form the normal equations, and
* store the new row.*
* @param neq for inverse part
* @param out_row for mttkrp part
* @param i The row to update.
* @param reg Regularization parameter for the i-th row.
static void p_update_slice(
sptensor_t * train,
idx_t const i,
double const regularization_index,
idx_t DEFAULT_NFACTORS
)
{
idx_t const nmodes = train->nmodes;
/* fid is the row we are actually updating
idx_t const fid = (pt->fids[0] == NULL) ? i : pt->fids[0][i];
double * const restrict out_row = model->factors[csf->dim_perm[0]] +
(fid * DEFAULT_NFACTORS);
double * const restrict accum = ws->thds[tid].scratch[1];
double * const restrict neqs = ws->thds[tid].scratch[2];
idx_t bufsize = 0; /* how many hada vecs are in mat_accum
idx_t nflush = 0; /* how many times we have flushed to add to the neqs
double * const restrict mat_accum = ws->thds[tid].scratch[3];
double * hada = mat_accum;
double * const restrict hada_accum = ws->thds[tid].scratch[4];
/* clear out buffers
for(idx_t m=0; m < nmodes; ++m) {
for(idx_t f=0; f < DEFAULT_NFACTORS; ++f) {
accum[f + (m*DEFAULT_NFACTORS)] = 0.;
}
for(idx_t f=0; f < DEFAULT_NFACTORS; ++f) {
hada_accum[f + (m*DEFAULT_NFACTORS)] = 0.;
}
}
for(idx_t f=0; f < DEFAULT_NFACTORS; ++f) {
out_row[f] = 0;
}
/* grab factors
double * mats[MAX_NMODES];
for(idx_t m=0; m < nmodes; ++m) {
mats[m] = model->factors[csf->dim_perm[m]];
}
/* do MTTKRP + dsyrk
p_process_slice(csf, 0, i, mats, DEFAULT_NFACTORS, out_row, accum, neqs, mat_accum,
hada_accum, &nflush);
/* add regularization to the diagonal
for(idx_t f=0; f < DEFAULT_NFACTORS; ++f) {
neqs[f + (f * DEFAULT_NFACTORS)] += reg;
}
/* solve!
p_invert_row(neqs, out_row, DEFAULT_NFACTORS);
}
/**
* @brief To update the factor matrices in als
static void p_update_als(
sptensor_t * train,
ordi_matrix ** mats,
double regularization_index,
idx_t DEFAULT_NFACTORS
)
{
//for (i in all i number):
// p_update_slice();
} */
// gpu global function
/**
* @brief For computing the mttkrp in als
* @version Now only contains the atomic operation
*/
__global__ void p_mttkrp_gpu(cissbasic_t* d_traina,
ordi_matrix * d_factora,
ordi_matrix * d_factorb,
ordi_matrix * d_factorc,
double * d_hbuffer,
idx_t tilenum
)
{
//get thread and block index
idx_t bid = blockIdx.x;
idx_t tid = threadIdx.x;
idx_t tileid = bid * DEFAULT_BLOCKSIZE + tid;
uint8_t flag;
double * entries = d_traina -> entries;
idx_t localtile = tileid * ((DEFAULT_T_TILE_LENGTH + 1) * DEFAULT_T_TILE_WIDTH);
double __align__(256) localtbuffer[6];
double __align__(256) localmbuffer[2 * DEFAULT_NFACTORS];
//do the mttkrp
if(tileid < tilenum)
{
//get supportive information for tiles
idx_t f_id = (idx_t)(entries[localtile] * (-1)) ;
idx_t l_id = (idx_t)(entries[localtile+1] * (-1)) ;
idx_t bitmap = (idx_t)(entries[localtile+2]);
//if(bitmap == 0) break;
#ifdef DEBUG
if(tileid == 0)
{
printf("f_id %ld, l_id %ld, bitmap %ld\n", f_id, l_id, bitmap);
}
#endif
bitmap = __brevll(bitmap);
while((bitmap & 1) == 0) {bitmap = bitmap >> 1;}
bitmap = bitmap >> 1;
localtile += DEFAULT_T_TILE_WIDTH;
#ifdef DEBUG
if(tileid == 0)
{
printf("f_id %ld, l_id %ld, bitmap %ld\n", f_id, l_id, bitmap);
}
#endif
//load in vectorize
for(int m = 0; m < ((idx_t)DEFAULT_T_TILE_LENGTH) / 2; m++ )
{
//unroll loop and load
//((double2*)localtbuffer)[0] = ((double2*)(entries+localtile))[0];
//((double2*)localtbuffer)[1] = ((double2*)(entries+localtile))[1];
//((double2*)localtbuffer)[2] = ((double2*)(entries+localtile))[2];
localtbuffer[0] = entries[localtile];
localtbuffer[1] = entries[localtile + 1];
localtbuffer[2] = entries[localtile + 2];
localtbuffer[3] = entries[localtile + 3];
localtbuffer[4] = entries[localtile + 4];
localtbuffer[5] = entries[localtile + 5];
//do the mttkrp for the first
f_id = f_id + (!(bitmap & 1));
idx_t tmpi = d_traina->directory[f_id];
tmpi--;
#ifdef DEBUG
printf("the fid is %d\n", f_id);
#endif
bitmap = bitmap >> 1;
if((localtbuffer[0] == -1) && (localtbuffer[1] == -1)) break;
for(int j = 0; j < DEFAULT_NFACTORS; j++)
{
double b = d_factorb->values[((idx_t)localtbuffer[0]*DEFAULT_NFACTORS - DEFAULT_NFACTORS ) + j];
double c = d_factorc->values[((idx_t)localtbuffer[1]*DEFAULT_NFACTORS - DEFAULT_NFACTORS) + j];
localmbuffer[j] = b * c;
atomicAdd(&(d_factora->values[tmpi * DEFAULT_NFACTORS + j]), localmbuffer[j] * localtbuffer[2]);
}
//if(localtbuffer[0] == -1 && localtbuffer[1] == -1) break;
/*for(int j = 0; j < DEFAULT_NFACTORS; j++)
{
idx_t b = d_factorb->values[(idx_t)(localtbuffer[0]*DEFAULT_NFACTORS - DEFAULT_NFACTORS) + j];
idx_t c = d_factorc->values[(idx_t)(localtbuffer[1]*DEFAULT_NFACTORS - DEFAULT_NFACTORS) + j];
localmbuffer[j] = b * c;
atomicAdd(&(d_factora->values[tmpi * DEFAULT_NFACTORS + j]), localmbuffer[j] * localtbuffer[2]);
}*/
//do the mttkrp for the second
flag = !(bitmap & 1);
f_id = f_id + (!(bitmap & 1));
#ifdef DEBUG
printf("the fid is %d\n", f_id);
#endif
tmpi = d_traina->directory[f_id];
tmpi--;
bitmap = bitmap >> 1;
if((localtbuffer[0] == -1) && (localtbuffer[1] == -1)) break;
for(int j = 0; j < DEFAULT_NFACTORS; j++)
{
idx_t b = d_factorb->values[((idx_t)localtbuffer[3]*DEFAULT_NFACTORS - DEFAULT_NFACTORS) + j];
idx_t c = d_factorc->values[((idx_t)localtbuffer[4]*DEFAULT_NFACTORS - DEFAULT_NFACTORS) + j];
localmbuffer[DEFAULT_NFACTORS + j] = b * c;
atomicAdd(&(d_factora->values[tmpi * DEFAULT_NFACTORS + j]), localmbuffer[DEFAULT_NFACTORS + j] * localtbuffer[5]);
}
//compute the HTH for the first
//compute the HTH for the second
if(flag)
{
for(int i = 0; i < DEFAULT_NFACTORS; i++)
{
for(int j = 0; j <=i ; j++)
{
double presult1 = localmbuffer[i] * localmbuffer[j];
double presult2 = localmbuffer[DEFAULT_NFACTORS + i] * localmbuffer[DEFAULT_NFACTORS + j];
atomicAdd(&(d_hbuffer[(f_id - flag) * DEFAULT_NFACTORS * DEFAULT_NFACTORS + i * DEFAULT_NFACTORS + j]), presult1);
atomicAdd(&(d_hbuffer[f_id * DEFAULT_NFACTORS * DEFAULT_NFACTORS + i * DEFAULT_NFACTORS + j]), presult2);
}
}
}
else
{
for(int i = 0; i < DEFAULT_NFACTORS; i++)
{
for(int j = 0; j <=i ; j++)
{
double presult = localmbuffer[i] * localmbuffer[j] + localmbuffer[DEFAULT_NFACTORS + i] * localmbuffer[DEFAULT_NFACTORS + j];
atomicAdd(&(d_hbuffer[f_id * DEFAULT_NFACTORS * DEFAULT_NFACTORS + i * DEFAULT_NFACTORS + j]), presult);
}
}
}
localtile += 2*DEFAULT_T_TILE_WIDTH;
}
}
}
/**
* @brief For computing the mttkrp in als, only one element on one thread
* @version Now reduce atmoic add with segment scan
*/
__global__ void p_mttkrp_gpu_as(cissbasic_t* d_traina,
ordi_matrix * d_factora,
ordi_matrix * d_factorb,
ordi_matrix * d_factorc,
double * d_hbuffer,
//double * d_hthbuffer,
idx_t tilenum)
{
//get block, warp and thread index
__shared__ uint32_t warpmask[((idx_t)DEFAULT_BLOCKSIZE)/((idx_t)ALS_WARPSIZE)];
idx_t bid = blockIdx.x;
idx_t tid = threadIdx.x;
idx_t warpid = tid / ((idx_t)ALS_WARPSIZE);
idx_t laneid = tid % ((idx_t)ALS_WARPSIZE);
idx_t tileid = bid * ((idx_t)DEFAULT_BLOCKSIZE)/((idx_t)ALS_WARPSIZE) + warpid;
double * entries = d_traina -> entries;
idx_t localtile = tileid * ((DEFAULT_T_TILE_LENGTH + 1) * DEFAULT_T_TILE_WIDTH);
double __align__(256) localtbuffer[3];
double __align__(256) localmbuffer[DEFAULT_NFACTORS];
double mytmp = 0, myntmp = 0;
//initialize the warp mask
if(laneid == 0) warpmask[warpid] = 0xffffffff;
if((tileid * DEFAULT_T_TILE_LENGTH + laneid) == d_traina->nnz)
{
//redefine the mask
warpmask[warpid] = __brev((warpmask[warpid]<<(32-laneid)));
}
__syncwarp();
uint32_t mymask = warpmask[warpid];
#ifdef ALSAS_DEBUG
//printf("now the mymask and mynnz id in thread %ld are %x and %ld\n", tid, mymask, (tileid * DEFAULT_T_TILE_LENGTH + laneid));
#endif
if((tileid < tilenum) && ((tileid * DEFAULT_T_TILE_LENGTH + laneid)<d_traina->nnz))
{
//initialize the information for tile and local entry
idx_t f_id = (idx_t)(entries[localtile] * (-1)) ;
idx_t l_id = (idx_t)(entries[localtile+1] * (-1)) ;
idx_t bitmap = (idx_t)(entries[localtile+2]);
if(bitmap != 0)
{
bitmap = __brevll(bitmap);
while((bitmap & 1) == 0) {bitmap = bitmap >> 1;}
bitmap = bitmap >> 1;
idx_t itercounter = __popcll(bitmap) - (bitmap & 1);
#ifdef ALSAS_DEBUG
//if(laneid == 0)
//printf("now the itercounter is %ld\n", itercounter);
#endif
idx_t myfid = f_id + laneid - __popcll((bitmap << (63-laneid))) + 1;
#ifdef ALSAS_DEBUG
//printf("now the myfid in thread %ld is %ld\n", tid, myfid);
#endif
idx_t mybit = ((bitmap >> (laneid)) & 1);
idx_t mylbit = mybit;
if(laneid == 0)
{
mylbit = 0;
mybit = 1;
}
//inter thread computation
localtbuffer[0] = entries[localtile + (laneid + 1) * DEFAULT_T_TILE_WIDTH];
localtbuffer[1] = entries[localtile + (laneid + 1) * DEFAULT_T_TILE_WIDTH + 1];
localtbuffer[2] = entries[localtile + (laneid + 1) * DEFAULT_T_TILE_WIDTH + 2];
idx_t tmpi = d_traina->directory[myfid] - 1;
idx_t b = (idx_t)localtbuffer[0] - 1;
idx_t c = (idx_t)localtbuffer[1] - 1;
//for the hadamard
#ifdef ALSAS_DEBUG
//printf("now the myposition for hthbuffer in thread %ld is %ld\n", tid, (tileid * DEFAULT_T_TILE_LENGTH + laneid));
#endif
for(int m = 0; m < DEFAULT_NFACTORS; m++)
{
localmbuffer[m] = d_factorb->values[b * DEFAULT_NFACTORS + m] * d_factorc->values[c * DEFAULT_NFACTORS + m];
//d_hthbuffer[(tileid * DEFAULT_T_TILE_LENGTH + laneid)*DEFAULT_NFACTORS + m] = localmbuffer[m];
}
__syncwarp(mymask);
//reduction in hth
//mytmp: final partial result; myntmp: messages
for(int m = 0; m < DEFAULT_NFACTORS; m++)
{
for(int j = 0; j <=m ; j++)
{
mytmp = localmbuffer[m] * localmbuffer[j];
myntmp = mybit * mytmp;
__syncwarp(mymask);
//now the reduction
for(int i = 0; i < itercounter; i++)
{
mytmp = (__shfl_down_sync(mymask, myntmp, 1, (int)ALS_WARPSIZE)) + (!(mylbit)) * mytmp;
myntmp = mybit * mytmp;
__syncwarp(mymask);
}
if(!mybit)
{
atomicAdd(&(d_hbuffer[myfid * DEFAULT_NFACTORS * DEFAULT_NFACTORS + m * DEFAULT_NFACTORS + j]), mytmp);
}
__syncwarp(mymask);
}
}
__syncwarp(mymask);
//reduction in mttkrp
for(int m = 0; m < DEFAULT_NFACTORS; m++)
{
mytmp = localmbuffer[m] * localtbuffer[2];
myntmp = mybit * mytmp;
__syncwarp(mymask);
//now the reduction
for(int i = 0; i < itercounter; i++)
{
mytmp = (__shfl_down_sync(mymask, myntmp, 1, (int)ALS_WARPSIZE)) + (!(mylbit)) * mytmp;
myntmp = mybit * mytmp;
__syncwarp(mymask);
}
if(!mybit)
{
atomicAdd(&(d_factora->values[tmpi * DEFAULT_NFACTORS + m]), mytmp);
}
__syncwarp(mymask);
}
}
}
__syncthreads();
}
/**
* @brief For update the H matrices and prepare for inversion as well as equation
* @version Warp shuffle
**/
__global__ void p_hth_update_as(cissbasic_t * d_traina,
double * d_hthbuffer,
double * d_value_a,
double * d_hbuffer,
double ** d_hbufptr,
double ** d_factptr,
idx_t dlength,
double regularization_index)
{
__shared__ double blkmbuffer[((idx_t)DEFAULT_BLOCKSIZE)/((idx_t)ALS_WARPSIZE) * (idx_t)DEFAULT_NFACTORS];
//get block, warp and thread index
idx_t bid = blockIdx.x;
idx_t tid = threadIdx.x;
idx_t warpid = tid / ((idx_t)ALS_WARPSIZE);
idx_t laneid = tid % ((idx_t)ALS_WARPSIZE);
idx_t tileid = bid * ((idx_t)DEFAULT_BLOCKSIZE)/((idx_t)ALS_WARPSIZE) + warpid;
double __align__(256) localhthbuffer[DEFAULT_NFACTORS]={0};
if(tileid < dlength && laneid < DEFAULT_NFACTORS)
{
idx_t dcounter = d_traina->dcounter[tileid+1] - d_traina->dcounter[tileid];
#ifdef ALSAS_DEBUG
if(laneid == 0) printf("my dcounter is %ld\n and my tileid is %ld\n", dcounter, tileid);
#endif
idx_t basicposition = d_traina->dcounter[tileid];
idx_t basicsposition = warpid * DEFAULT_NFACTORS;
for(idx_t i = 0; i < dcounter; i++)
{
double localvalue = d_hthbuffer[(basicposition + i) * DEFAULT_NFACTORS + laneid];
blkmbuffer[basicsposition + laneid] = localvalue;
__syncwarp();
for(idx_t j = 0; j < DEFAULT_NFACTORS; j++)
{
localhthbuffer[j] += localvalue * blkmbuffer[basicsposition + j];
}
}
__syncwarp();
localhthbuffer[laneid] += regularization_index;
for(idx_t i = 0; i < DEFAULT_NFACTORS; i++)
{
d_hbuffer[tileid * DEFAULT_NFACTORS * DEFAULT_NFACTORS + laneid * DEFAULT_NFACTORS + i] = localhthbuffer[i];
}
__syncwarp();
//prepare for ptrs
if(laneid == 0)
{
idx_t fid = d_traina->directory[tileid] - 1;
d_factptr[tileid] = d_value_a + fid * DEFAULT_NFACTORS;
d_hbufptr[tileid] = d_hbuffer + tileid * DEFAULT_NFACTORS * DEFAULT_NFACTORS;
}
}
__syncwarp();
}
/**
* @brief Compute the inverse and finish the final update
* @version Now only with coarse grain
*/
__global__ void p_update_als_gpu(cissbasic_t * d_traina,
ordi_matrix * d_factora,
double * d_hbuffer,
idx_t dlength,
double regularization_index
)
{
idx_t bid = blockIdx.x;
idx_t tid = threadIdx.x;
idx_t tileid = bid * DEFAULT_BLOCKSIZE + tid;
idx_t basicposition = tileid * DEFAULT_NFACTORS * DEFAULT_NFACTORS;
double lv[DEFAULT_NFACTORS * DEFAULT_NFACTORS]={0};
if(tileid < dlength)
{
//compute the inverse
idx_t tmpi = d_traina->directory[tileid];
tmpi--;
double *av = d_hbuffer + basicposition;
idx_t i = 0;
idx_t j = 0;
idx_t k = 0;
for (i = 0; i < DEFAULT_NFACTORS; ++i)
{
for (j = 0; j <= i; ++j)
{
double inner = 0;
for (k = 0; k < j; ++k)
{
inner += lv[k+(i*DEFAULT_NFACTORS)] * lv[k+(j*DEFAULT_NFACTORS)];
}
if(i == j)
{
lv[j+(i*DEFAULT_NFACTORS)] = sqrt(av[i+(i*DEFAULT_NFACTORS)] - inner + regularization_index);
}
else
{
lv[j+(i*DEFAULT_NFACTORS)] = 1.0 / lv[j+(j*DEFAULT_NFACTORS)] * (av[j+(i*DEFAULT_NFACTORS)] - inner);
}
}
}
for(i = 0; i< DEFAULT_NFACTORS * DEFAULT_NFACTORS; i++)
{
av[i] = 0;
}
idx_t n = 0;
for(n=0; n<DEFAULT_NFACTORS; n++) //get identity matrix
{
av[n+(n*DEFAULT_NFACTORS)] = 1.0;
}
//forward solve
i = 1; //define counters outside the loop
j = 0;
idx_t f = 0;
for(j=0; j < DEFAULT_NFACTORS; ++j)
{
av[j] /= lv[0];
}
for(i=1; i < DEFAULT_NFACTORS; ++i)
{
/* X(i,f) = B(i,f) - \sum_{j=0}^{i-1} L(i,j)X(i,j) */
for(j=0; j < i; ++j)
{
for(f=0; f < DEFAULT_NFACTORS; ++f)
{
av[f+(i*DEFAULT_NFACTORS)] -= lv[j+(i*DEFAULT_NFACTORS)] * av[f+(j*DEFAULT_NFACTORS)];
}
}
for(f=0; f <DEFAULT_NFACTORS; ++f)
{
av[f+(i*DEFAULT_NFACTORS)] /= lv[i+(i*DEFAULT_NFACTORS)];
}
}
for(i=0; i < DEFAULT_NFACTORS; ++i)
{
for(j=i+1; j < DEFAULT_NFACTORS; ++j)
{
lv[j+(i*DEFAULT_NFACTORS)] = lv[i+(j*DEFAULT_NFACTORS)];
lv[i+(j*DEFAULT_NFACTORS)] = 0.0;
}
}
//backsolve
f = 0; //set counters
j = 0;
idx_t row = 2;
/* last row of X is easy */
for(f=0; f < DEFAULT_NFACTORS; ++f) {
i = DEFAULT_NFACTORS - 1;
av[f+(i*DEFAULT_NFACTORS)] /= lv[i+(i*DEFAULT_NFACTORS)];
}
/* now do backward substitution */
for(row=2; row <= DEFAULT_NFACTORS; ++row)
{
i = DEFAULT_NFACTORS - row;
/* X(i,f) = B(i,f) - \sum_{j=0}^{i-1} R(i,j)X(i,j) */
for( j=i+1; j < DEFAULT_NFACTORS; ++j)
{
for( f=0; f < DEFAULT_NFACTORS; ++f)
{
av[f+(i*DEFAULT_NFACTORS)] -= lv[j+(i*DEFAULT_NFACTORS)] * av[f+( j * DEFAULT_NFACTORS )];
}
}
for(f=0; f < DEFAULT_NFACTORS; ++f)
{
av[f+(i*DEFAULT_NFACTORS)] /= lv[i+(i*DEFAULT_NFACTORS)];
}
}
//now do the final update
double * mvals = d_factora->values + tmpi * DEFAULT_NFACTORS;
for(i = 0; i < DEFAULT_NFACTORS; i++)
{
lv[i] = 0;
for(j = 0; j < DEFAULT_NFACTORS; j++)
{
lv[i] += mvals[j] * av[i * DEFAULT_NFACTORS + j];
}
}
//the final transmission
for(i = 0; i < DEFAULT_NFACTORS/2; i++)
{
((double2*)mvals)[i] = ((double2*)lv)[i];
}
}
}
/**
* @brief Update the matrice
* @version Now only with coarse grain
*/
__global__ void p_update_matrice(cissbasic_t * d_traina,
double * d_value_a,
double * d_hbuffer,
double ** d_hbufptr,
double ** d_factptr,
idx_t dlength,
double regularization_index)
{
idx_t bid = blockIdx.x;
idx_t tid = threadIdx.x;
idx_t tileid = bid * DEFAULT_BLOCKSIZE + tid;
idx_t basicposition = tileid * DEFAULT_NFACTORS * DEFAULT_NFACTORS;
if(tileid < dlength)
{
idx_t tmpi = d_traina->directory[tileid] - 1;
for(idx_t f = 0; f < DEFAULT_NFACTORS; f++)
{
d_hbuffer[basicposition + f*DEFAULT_NFACTORS + f] += regularization_index;
}
d_hbufptr[tileid] = d_hbuffer + basicposition;
d_factptr[tileid] = d_value_a + tmpi * DEFAULT_NFACTORS;
}
}
void p_cholecheck(double * d_factora,
double * d_hbuffer,
double ** d_hbufptr,
double ** d_factptr,
idx_t dlength)
{
}
extern "C"{
/**
* @brief The main function for tensor completion in als
* @param train The tensor for generating factor matrices
* @param validation The tensor for validation(RMSE)
* @param test The tensor for testing the quality
* @param regularization_index Lambda
*/
idx_t tc_als(sptensor_t * traina,
sptensor_t * trainb,
sptensor_t * trainc,
sptensor_t * validation,
sptensor_t * test,
ordi_matrix ** mats,
ordi_matrix ** best_mats,
idx_t algorithm_index,
double regularization_index,
double * best_rmse,
double * tolerance,
int SGD_DEFAULT_BLOCKSIZE,
int SGD_DEFAULT_T_TILE_LENGTH,
idx_t * nbadepochs,
idx_t * bestepochs,
idx_t * max_badepochs)
{
idx_t const nmodes = traina->nmodes;
#ifdef CISS_DEBUG
printf("enter the als\n");
#endif
//initialize the devices
int deviceCount;
cudaGetDeviceCount(&deviceCount);
int n;
//print the GPU status
for(n = 0; n < deviceCount; n++)
{
cudaDeviceProp dprop;
cudaGetDeviceProperties(&dprop, n);
printf(" %d: %s\n", n, dprop.name);
}
omp_set_num_threads(deviceCount);
//prepare the tensor in TB-COO
ciss_t * h_cissta = ciss_alloc(traina, 1, deviceCount, SGD_DEFAULT_T_TILE_LENGTH);
ciss_t * h_cisstb = ciss_alloc(trainb, 2, deviceCount, SGD_DEFAULT_T_TILE_LENGTH);
ciss_t * h_cisstc = ciss_alloc(trainc, 3, deviceCount, SGD_DEFAULT_T_TILE_LENGTH);
#ifdef MCISS_DEBUG
fprintf(stdout, "the new tensors for mode 0\n");
cissbasic_display(h_cissta->cissunits[0]);
cissbasic_display(h_cissta->cissunits[1]);
#endif
struct timeval start;
struct timeval end;
idx_t diff;
cissbasic_t ** d_traina = (cissbasic_t**)malloc(deviceCount * sizeof(cissbasic_t*));
cissbasic_t ** d_trainb = (cissbasic_t**)malloc(deviceCount * sizeof(cissbasic_t*));
cissbasic_t ** d_trainc = (cissbasic_t**)malloc(deviceCount * sizeof(cissbasic_t*));
idx_t ** d_directory_a = (idx_t**)malloc(deviceCount * sizeof(idx_t*));
idx_t ** d_directory_b = (idx_t**)malloc(deviceCount * sizeof(idx_t*));
idx_t ** d_directory_c = (idx_t**)malloc(deviceCount * sizeof(idx_t*));
idx_t ** d_counter_a = (idx_t**)malloc(deviceCount * sizeof(idx_t*));
idx_t ** d_counter_b = (idx_t**)malloc(deviceCount * sizeof(idx_t*));
idx_t ** d_counter_c = (idx_t**)malloc(deviceCount * sizeof(idx_t*));
idx_t ** d_dims_a = (idx_t**)malloc(deviceCount * sizeof(idx_t*));
idx_t ** d_dims_b = (idx_t**)malloc(deviceCount * sizeof(idx_t*));
idx_t ** d_dims_c = (idx_t**)malloc(deviceCount * sizeof(idx_t*));
double ** d_entries_a = (double**)malloc(deviceCount * sizeof(double*));
double ** d_entries_b = (double**)malloc(deviceCount * sizeof(double*));
double ** d_entries_c = (double**)malloc(deviceCount * sizeof(double*));
double ** d_hbuffer = (double**)malloc(deviceCount * sizeof(double*));
//double ** d_hthbuffer = (double**)malloc(deviceCount * sizeof(double*));
int ** d_infoarray = (int**)malloc(deviceCount * sizeof(int*));
double *** d_hbufptr = (double***)malloc(deviceCount * sizeof(double**));
double *** d_factptr = (double***)malloc(deviceCount * sizeof(double**));
ordi_matrix ** d_factora = (ordi_matrix**)malloc(deviceCount * sizeof(ordi_matrix*));
ordi_matrix ** d_factorb = (ordi_matrix**)malloc(deviceCount * sizeof(ordi_matrix*));
ordi_matrix ** d_factorc = (ordi_matrix**)malloc(deviceCount * sizeof(ordi_matrix*));
double ** d_value_a = (double**)malloc(deviceCount * sizeof(double*));
double ** d_value_b = (double**)malloc(deviceCount * sizeof(double*));
double ** d_value_c = (double**)malloc(deviceCount * sizeof(double*));
idx_t * maxdlength = (idx_t*)malloc(deviceCount * sizeof(idx_t));
idx_t * maxnnz = (idx_t*)malloc(deviceCount * sizeof(idx_t));
cusolverDnHandle_t handle0, handle1;
cudaSetDevice(0);
HANDLE_SOLVERERR(cusolverDnCreate((&handle0)));
cudaSetDevice(1);
HANDLE_SOLVERERR(cusolverDnCreate((&handle1)));
#pragma omp parallel
{
//prepare the threads
unsigned int cpu_thread_id = omp_get_thread_num();
unsigned int num_cpu_threads = omp_get_num_threads();
//set gpus
int gpu_id = -1;
cudaSetDevice(cpu_thread_id % deviceCount); // "% num_gpus" allows more CPU threads than GPU devices
cudaGetDevice(&gpu_id);
idx_t * d_itemp1, *d_itemp2, *d_itemp3;
double * d_ftemp;
//initialize the cusolver
//HANDLE_SOLVERERR(cusolverDnCreate((&(handle[gpu_id]))));
//malloc and copy the tensors + matrices to gpu
cissbasic_t * h_traina = h_cissta->cissunits[gpu_id];
cissbasic_t * h_trainb = h_cisstb->cissunits[gpu_id];
cissbasic_t * h_trainc = h_cisstc->cissunits[gpu_id];
//copy tensor for mode-1
HANDLE_ERROR(cudaMalloc((void**)&(d_traina[gpu_id]), sizeof(cissbasic_t)));
HANDLE_ERROR(cudaMalloc((void**)&(d_directory_a[gpu_id]), h_traina->dlength * sizeof(idx_t)));
HANDLE_ERROR(cudaMalloc((void**)&(d_counter_a[gpu_id]), (h_traina->dlength + 1) * sizeof(idx_t)));
HANDLE_ERROR(cudaMalloc((void**)&(d_entries_a[gpu_id]), h_traina->size * DEFAULT_T_TILE_WIDTH * sizeof(double)));
HANDLE_ERROR(cudaMalloc((void**)&(d_dims_a[gpu_id]), nmodes * sizeof(idx_t)));
HANDLE_ERROR(cudaMemcpy(d_counter_a[gpu_id], h_traina->dcounter, (h_traina->dlength + 1)*sizeof(idx_t), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_directory_a[gpu_id], h_traina->directory, h_traina->dlength*sizeof(idx_t), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_entries_a[gpu_id], h_traina->entries, h_traina->size * DEFAULT_T_TILE_WIDTH * sizeof(double), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_dims_a[gpu_id], h_traina->dims, nmodes*sizeof(idx_t), cudaMemcpyHostToDevice));
d_itemp1 = h_traina->directory;
d_itemp2 = h_traina->dims;
d_itemp3 = h_traina->dcounter;
d_ftemp = h_traina->entries;
h_traina->directory = d_directory_a[gpu_id];
h_traina->dims = d_dims_a[gpu_id];
h_traina->entries = d_entries_a[gpu_id];
h_traina->dcounter = d_counter_a[gpu_id];
HANDLE_ERROR(cudaMemcpy(d_traina[gpu_id], h_traina, sizeof(cissbasic_t), cudaMemcpyHostToDevice));
h_traina->directory = d_itemp1;
h_traina->dims = d_itemp2;
h_traina->entries = d_ftemp;
h_traina->dcounter = d_itemp3;
//copy tensor for mode-2
HANDLE_ERROR(cudaMalloc((void**)&(d_trainb[gpu_id]), sizeof(cissbasic_t)));
HANDLE_ERROR(cudaMalloc((void**)&(d_directory_b[gpu_id]), h_trainb->dlength * sizeof(idx_t)));
HANDLE_ERROR(cudaMalloc((void**)&(d_counter_b[gpu_id]), (h_trainb->dlength + 1) * sizeof(idx_t)));
HANDLE_ERROR(cudaMalloc((void**)&(d_entries_b[gpu_id]), h_trainb->size * DEFAULT_T_TILE_WIDTH * sizeof(double)));
HANDLE_ERROR(cudaMalloc((void**)&(d_dims_b[gpu_id]), nmodes * sizeof(idx_t)));
HANDLE_ERROR(cudaMemcpy(d_directory_b[gpu_id], h_trainb->directory, h_trainb->dlength*sizeof(idx_t), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_counter_b[gpu_id], h_trainb->dcounter, (h_trainb->dlength + 1)*sizeof(idx_t), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_entries_b[gpu_id], h_trainb->entries, h_trainb->size * DEFAULT_T_TILE_WIDTH * sizeof(double), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_dims_b[gpu_id], h_trainb->dims, nmodes*sizeof(idx_t), cudaMemcpyHostToDevice));
d_itemp1 = h_trainb->directory;
d_itemp2 = h_trainb->dims;
d_itemp3 = h_trainb->dcounter;
d_ftemp = h_trainb->entries;
h_trainb->directory = d_directory_b[gpu_id];
h_trainb->dims = d_dims_b[gpu_id];
h_trainb->entries = d_entries_b[gpu_id];
h_trainb->dcounter = d_counter_b[gpu_id];
HANDLE_ERROR(cudaMemcpy(d_trainb[gpu_id], h_trainb, sizeof(cissbasic_t), cudaMemcpyHostToDevice));
h_trainb->directory = d_itemp1;
h_trainb->dims = d_itemp2;
h_trainb->entries = d_ftemp;
h_trainb->dcounter = d_itemp3;
//copy tensor for mode-3
HANDLE_ERROR(cudaMalloc((void**)&(d_trainc[gpu_id]), sizeof(cissbasic_t)));
HANDLE_ERROR(cudaMalloc((void**)&(d_directory_c[gpu_id]), h_trainc->dlength * sizeof(idx_t)));
HANDLE_ERROR(cudaMalloc((void**)&(d_counter_c[gpu_id]), (h_trainc->dlength + 1) * sizeof(idx_t)));
HANDLE_ERROR(cudaMalloc((void**)&(d_entries_c[gpu_id]), h_trainc->size * DEFAULT_T_TILE_WIDTH * sizeof(double)));
HANDLE_ERROR(cudaMalloc((void**)&(d_dims_c[gpu_id]), nmodes * sizeof(idx_t)));
HANDLE_ERROR(cudaMemcpy(d_directory_c[gpu_id], h_trainc->directory, h_trainc->dlength*sizeof(idx_t), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_counter_c[gpu_id], h_trainc->dcounter, (h_trainc->dlength + 1)*sizeof(idx_t), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_entries_c[gpu_id], h_trainc->entries, h_trainc->size * DEFAULT_T_TILE_WIDTH * sizeof(double), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_dims_c[gpu_id], h_trainc->dims, nmodes*sizeof(idx_t), cudaMemcpyHostToDevice));
d_itemp1 = h_trainc->directory;
d_itemp2 = h_trainc->dims;
d_ftemp = h_trainc->entries;
d_itemp3 = h_trainc->dcounter;
h_trainc->directory = d_directory_c[gpu_id];
h_trainc->dims = d_dims_c[gpu_id];
h_trainc->entries = d_entries_c[gpu_id];
h_trainc->dcounter = d_counter_c[gpu_id];
HANDLE_ERROR(cudaMemcpy(d_trainc[gpu_id], h_trainc, sizeof(cissbasic_t), cudaMemcpyHostToDevice));
h_trainc->directory = d_itemp1;
h_trainc->dims = d_itemp2;
h_trainc->entries = d_ftemp;
h_trainc->dcounter = d_itemp3;
//buffer for HTH
maxdlength[gpu_id] = SS_MAX(SS_MAX(h_traina->dlength, h_trainb->dlength),h_trainc->dlength);
maxnnz[gpu_id] = SS_MAX(SS_MAX(h_traina->nnz, h_trainb->nnz),h_trainc->nnz);
#ifdef ALSAS_DEBUG
fprintf(stdout, "now in thread %d the cpu maxnnz is %ld\n", cpu_thread_id,maxnnz[gpu_id]);
#endif
HANDLE_ERROR(cudaMalloc((void**)&(d_hbuffer[gpu_id]), DEFAULT_NFACTORS * DEFAULT_NFACTORS * maxdlength[gpu_id] * sizeof(double)));
//HANDLE_ERROR(cudaMalloc((void**)&(d_hthbuffer[gpu_id]), DEFAULT_NFACTORS * maxnnz[gpu_id] * sizeof(double)));
//HANDLE_ERROR(cudaMalloc((void**)&d_invbuffer, DEFAULT_NFACTORS * DEFAULT_NFACTORS * maxdlength * sizeof(double)));
//buffer for inversion
HANDLE_ERROR(cudaMalloc((void**)&(d_hbufptr[gpu_id]), maxdlength[gpu_id] * sizeof(double*)));
HANDLE_ERROR(cudaMalloc((void**)&(d_factptr[gpu_id]), maxdlength[gpu_id] * sizeof(double*)));
HANDLE_ERROR(cudaMalloc((void**)&(d_infoarray[gpu_id]), maxdlength[gpu_id] * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&(d_factora[gpu_id]), sizeof(ordi_matrix)));
HANDLE_ERROR(cudaMalloc((void**)&(d_value_a[gpu_id]), mats[0]->I * DEFAULT_NFACTORS * sizeof(double)));
HANDLE_ERROR(cudaMemcpy(d_value_a[gpu_id], mats[0]->values, mats[0]->I * DEFAULT_NFACTORS * sizeof(double), cudaMemcpyHostToDevice));
#pragma omp critical
{
d_ftemp = mats[0]->values;
mats[0]->values = d_value_a[gpu_id];
HANDLE_ERROR(cudaMemcpy(d_factora[gpu_id], mats[0], sizeof(ordi_matrix), cudaMemcpyHostToDevice));
mats[0]->values = d_ftemp;
}
#pragma omp barrier
HANDLE_ERROR(cudaMalloc((void**)&(d_factorb[gpu_id]), sizeof(ordi_matrix)));
HANDLE_ERROR(cudaMalloc((void**)&(d_value_b[gpu_id]), mats[1]->I * DEFAULT_NFACTORS * sizeof(double)));
HANDLE_ERROR(cudaMemcpy(d_value_b[gpu_id], mats[1]->values, mats[1]->I * DEFAULT_NFACTORS * sizeof(double), cudaMemcpyHostToDevice));
#pragma omp critical
{
d_ftemp = mats[1]->values;
mats[1]->values = d_value_b[gpu_id];
HANDLE_ERROR(cudaMemcpy(d_factorb[gpu_id], mats[1], sizeof(ordi_matrix), cudaMemcpyHostToDevice));
mats[1]->values = d_ftemp;
}
#pragma omp barrier
HANDLE_ERROR(cudaMalloc((void**)&(d_factorc[gpu_id]), sizeof(ordi_matrix)));
HANDLE_ERROR(cudaMalloc((void**)&(d_value_c[gpu_id]), mats[2]->I * DEFAULT_NFACTORS * sizeof(double)));
HANDLE_ERROR(cudaMemcpy(d_value_c[gpu_id], mats[2]->values, mats[2]->I * DEFAULT_NFACTORS * sizeof(double), cudaMemcpyHostToDevice));
#pragma omp critical
{
d_ftemp = mats[2]->values;
mats[2]->values = d_value_c[gpu_id];
HANDLE_ERROR(cudaMemcpy(d_factorc[gpu_id], mats[2], sizeof(ordi_matrix), cudaMemcpyHostToDevice));
mats[2]->values = d_ftemp;
}
}
#ifdef CUDA_LOSS //to be done
sptensor_gpu_t * d_test, * d_validate;
#else
double loss = tc_loss_sq(traina, mats, algorithm_index);
double frobsq = tc_frob_sq(nmodes, regularization_index, mats);
#endif
tc_converge(traina, validation, mats, best_mats, algorithm_index, loss, frobsq, 0, nmodes, best_rmse, tolerance, nbadepochs, bestepochs, max_badepochs);
//step into the kernel
idx_t mode_i, mode_n, m;
cublasFillMode_t uplo = CUBLAS_FILL_MODE_UPPER;
idx_t totaldiff = 0;
idx_t num_iterations = 0;
for(idx_t e=1; e < DEFAULT_MAX_ITERATE+1; ++e) {
gettimeofday(&start,NULL);
//can set random variables
srand(time(0));
mode_i = rand()%3;
#ifdef ALSAS_DEBUG
mode_i = 0;
fprintf(stdout, "now the mode_i is %d\n", mode_i);
#endif
for(m=0; m < 3; m++) {
#pragma omp parallel
{
unsigned int cpu_thread_id = omp_get_thread_num();
cudaSetDevice(cpu_thread_id % deviceCount); // "% num_gpus" allows more CPU threads than GPU devices
cusolverDnHandle_t handle;
if(!cpu_thread_id) handle = handle1;
else handle = handle0;
cissbasic_t * h_traina = h_cissta->cissunits[cpu_thread_id];
cissbasic_t * h_trainb = h_cisstb->cissunits[cpu_thread_id];
cissbasic_t * h_trainc = h_cisstc->cissunits[cpu_thread_id];
idx_t mymode_n = (mode_i + m)%3;
idx_t blocknum_u, blocknum_h, nnz, tilenum, blocknum_m;
HANDLE_ERROR(cudaMemset(d_hbuffer[cpu_thread_id], 0, DEFAULT_NFACTORS * DEFAULT_NFACTORS * maxdlength[cpu_thread_id] * sizeof(double)));
//HANDLE_ERROR(cudaMemcpy(d_invbuffer, h_invbuffer, DEFAULT_NFACTORS * DEFAULT_NFACTORS * maxdlength * sizeof(double)),cudaMemcpyHostToDevice);
switch (mymode_n)
{
case 0:
{
nnz = h_traina->nnz;
tilenum = nnz/DEFAULT_T_TILE_LENGTH + 1;
blocknum_m = tilenum/(((idx_t)DEFAULT_BLOCKSIZE)/((idx_t)ALS_WARPSIZE)) + 1;
#ifdef ALSAS_DEBUG
fprintf(stdout, "now in thread %d, nnz is %d, blocknum_m is %d, tilenum is %d\n", cpu_thread_id, nnz, blocknum_m, tilenum);
#endif
HANDLE_ERROR(cudaMemset(d_value_a[cpu_thread_id], 0, mats[0]->I * DEFAULT_NFACTORS * sizeof(double)));
blocknum_u = h_traina->dlength / DEFAULT_BLOCKSIZE + 1;
blocknum_h = h_traina->dlength / (((idx_t)DEFAULT_BLOCKSIZE)/((idx_t)ALS_WARPSIZE)) + 1;
p_mttkrp_gpu_as<<<blocknum_m,DEFAULT_BLOCKSIZE,0>>>(d_traina[cpu_thread_id], d_factora[cpu_thread_id], d_factorb[cpu_thread_id], d_factorc[cpu_thread_id], d_hbuffer[cpu_thread_id], tilenum);
HANDLE_ERROR(cudaDeviceSynchronize());
#ifdef ALSAS_DEBUG
fprintf(stdout, "now in thread %d ends mttkrp\n", cpu_thread_id);
fprintf(stdout, "now in thread %d the blocknum for hth update is %ld and the dlength is %ld\n", cpu_thread_id, blocknum_h, h_traina->dlength);
#endif
//p_hth_update_as<<<blocknum_h,DEFAULT_BLOCKSIZE,0>>>(d_traina[cpu_thread_id], d_hthbuffer[cpu_thread_id], d_value_a[cpu_thread_id], d_hbuffer[cpu_thread_id], d_hbufptr[cpu_thread_id], d_factptr[cpu_thread_id], h_traina->dlength, regularization_index);
p_update_matrice<<<blocknum_u, DEFAULT_BLOCKSIZE, 0>>>(d_traina[cpu_thread_id], d_value_a[cpu_thread_id], d_hbuffer[cpu_thread_id], d_hbufptr[cpu_thread_id], d_factptr[cpu_thread_id], h_traina->dlength, regularization_index);
HANDLE_ERROR(cudaDeviceSynchronize());
#ifdef ALS_DEBUG
p_cholecheck(d_value_a[cpu_thread_id], d_hbuffer[cpu_thread_id], d_hbufptr[cpu_thread_id], d_factptr[cpu_thread_id], h_traina->dlength);
#endif
HANDLE_SOLVERERR(cusolverDnDpotrfBatched(handle, uplo, DEFAULT_NFACTORS, d_hbufptr[cpu_thread_id], DEFAULT_NFACTORS, d_infoarray[cpu_thread_id], (int)h_traina->dlength));
HANDLE_SOLVERERR(cusolverDnDpotrsBatched(handle, uplo, DEFAULT_NFACTORS, 1, d_hbufptr[cpu_thread_id], DEFAULT_NFACTORS, d_factptr[cpu_thread_id], DEFAULT_NFACTORS, d_infoarray[cpu_thread_id], (int)h_traina->dlength));
HANDLE_ERROR(cudaDeviceSynchronize());
gettimeofday(&end,NULL);
diff = 1000000*(end.tv_sec-start.tv_sec) + end.tv_usec - start.tv_usec;
num_iterations += 1;
totaldiff += diff;
//printf("this time cost %ld\n",diff);
#pragma omp barrier
//update the final results
HANDLE_ERROR(cudaMemcpy(mats[0]->values + (h_cissta->d_ref[cpu_thread_id] -1) * DEFAULT_NFACTORS, d_value_a[cpu_thread_id] + (h_cissta->d_ref[cpu_thread_id] -1) * DEFAULT_NFACTORS, (h_cissta->d_ref[cpu_thread_id + 1] - h_cissta->d_ref[cpu_thread_id]) * DEFAULT_NFACTORS * sizeof(double), cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaDeviceSynchronize());
HANDLE_ERROR(cudaMemcpy(d_value_a[cpu_thread_id] + (h_cissta->d_ref[(cpu_thread_id + 1)% deviceCount] - 1) * DEFAULT_NFACTORS, mats[0]->values + (h_cissta->d_ref[(cpu_thread_id + 1) % deviceCount] -1 ) * DEFAULT_NFACTORS, (h_cissta->d_ref[(cpu_thread_id + 1) % deviceCount + 1] - h_cissta->d_ref[(cpu_thread_id + 1) % deviceCount]) * DEFAULT_NFACTORS * sizeof(double), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaDeviceSynchronize());
break;
}
case 1:
{
nnz = h_trainb->nnz;
tilenum = nnz/DEFAULT_T_TILE_LENGTH + 1;
blocknum_m = tilenum/(((idx_t)DEFAULT_BLOCKSIZE)/((idx_t)ALS_WARPSIZE)) + 1;
HANDLE_ERROR(cudaMemset(d_value_b[cpu_thread_id], 0, mats[1]->I * DEFAULT_NFACTORS * sizeof(double)));
blocknum_u = h_trainb->dlength / DEFAULT_BLOCKSIZE + 1;
blocknum_h = h_trainb->dlength / (((idx_t)DEFAULT_BLOCKSIZE)/((idx_t)ALS_WARPSIZE)) + 1;
p_mttkrp_gpu_as<<<blocknum_m,DEFAULT_BLOCKSIZE,0>>>(d_trainb[cpu_thread_id], d_factorb[cpu_thread_id], d_factorc[cpu_thread_id], d_factora[cpu_thread_id], d_hbuffer[cpu_thread_id], tilenum);
HANDLE_ERROR(cudaDeviceSynchronize());
//p_hth_update_as<<<blocknum_h,DEFAULT_BLOCKSIZE,0>>>(d_trainb[cpu_thread_id], d_hthbuffer[cpu_thread_id], d_value_b[cpu_thread_id], d_hbuffer[cpu_thread_id], d_hbufptr[cpu_thread_id], d_factptr[cpu_thread_id], h_trainb->dlength, regularization_index);
HANDLE_ERROR(cudaDeviceSynchronize());
p_update_matrice<<<blocknum_u, DEFAULT_BLOCKSIZE, 0>>>(d_trainb[cpu_thread_id], d_value_b[cpu_thread_id], d_hbuffer[cpu_thread_id], d_hbufptr[cpu_thread_id], d_factptr[cpu_thread_id], h_trainb->dlength, regularization_index);
HANDLE_ERROR(cudaDeviceSynchronize());
HANDLE_SOLVERERR(cusolverDnDpotrfBatched(handle, uplo, DEFAULT_NFACTORS, d_hbufptr[cpu_thread_id], DEFAULT_NFACTORS, d_infoarray[cpu_thread_id], (int)h_trainb->dlength));
HANDLE_SOLVERERR(cusolverDnDpotrsBatched(handle, uplo, DEFAULT_NFACTORS, 1, d_hbufptr[cpu_thread_id], DEFAULT_NFACTORS, d_factptr[cpu_thread_id],DEFAULT_NFACTORS, d_infoarray[cpu_thread_id], (int)h_trainb->dlength));
HANDLE_ERROR(cudaDeviceSynchronize());
gettimeofday(&end,NULL);
diff = 1000000*(end.tv_sec-start.tv_sec) + end.tv_usec - start.tv_usec;
num_iterations += 1;
totaldiff += diff;
//printf("this time cost %ld\n",diff);
#pragma omp barrier
//update the final results
HANDLE_ERROR(cudaMemcpy(mats[1]->values + (h_cisstb->d_ref[cpu_thread_id] - 1) * DEFAULT_NFACTORS, d_value_b[cpu_thread_id] + (h_cisstb->d_ref[cpu_thread_id] - 1)* DEFAULT_NFACTORS, (h_cisstb->d_ref[cpu_thread_id + 1] - h_cisstb->d_ref[cpu_thread_id]) * DEFAULT_NFACTORS * sizeof(double), cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaDeviceSynchronize());
HANDLE_ERROR(cudaMemcpy(d_value_b[cpu_thread_id] + (h_cisstb->d_ref[(cpu_thread_id + 1)% deviceCount] - 1)* DEFAULT_NFACTORS, mats[1]->values + (h_cisstb->d_ref[(cpu_thread_id + 1) % deviceCount] - 1)* DEFAULT_NFACTORS, (h_cisstb->d_ref[(cpu_thread_id + 1) % deviceCount + 1] - h_cisstb->d_ref[(cpu_thread_id + 1) % deviceCount]) * DEFAULT_NFACTORS * sizeof(double), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaDeviceSynchronize());
break;
}
default:
{
nnz = h_trainc->nnz;
tilenum = nnz/DEFAULT_T_TILE_LENGTH + 1;
blocknum_m = tilenum/(((idx_t)DEFAULT_BLOCKSIZE)/((idx_t)ALS_WARPSIZE)) + 1;
HANDLE_ERROR(cudaMemset(d_value_c[cpu_thread_id], 0, mats[2]->I * DEFAULT_NFACTORS * sizeof(double)));
blocknum_u = h_trainc->dlength / DEFAULT_BLOCKSIZE + 1;
blocknum_h = h_trainc->dlength / (((idx_t)DEFAULT_BLOCKSIZE)/((idx_t)ALS_WARPSIZE)) + 1;
p_mttkrp_gpu_as<<<blocknum_m,DEFAULT_BLOCKSIZE,0>>>(d_trainc[cpu_thread_id], d_factorc[cpu_thread_id], d_factora[cpu_thread_id], d_factorb[cpu_thread_id], d_hbuffer[cpu_thread_id], tilenum);
HANDLE_ERROR(cudaDeviceSynchronize());
//p_hth_update_as<<<blocknum_h,DEFAULT_BLOCKSIZE,0>>>(d_trainc[cpu_thread_id], d_hthbuffer[cpu_thread_id], d_value_c[cpu_thread_id], d_hbuffer[cpu_thread_id], d_hbufptr[cpu_thread_id], d_factptr[cpu_thread_id], h_trainc->dlength, regularization_index);
p_update_matrice<<<blocknum_u, DEFAULT_BLOCKSIZE, 0>>>(d_trainc[cpu_thread_id], d_value_c[cpu_thread_id], d_hbuffer[cpu_thread_id], d_hbufptr[cpu_thread_id], d_factptr[cpu_thread_id], h_trainc->dlength, regularization_index);
HANDLE_ERROR(cudaDeviceSynchronize());
HANDLE_SOLVERERR(cusolverDnDpotrfBatched(handle, uplo, DEFAULT_NFACTORS, d_hbufptr[cpu_thread_id], DEFAULT_NFACTORS, d_infoarray[cpu_thread_id], (int)h_trainc->dlength));
HANDLE_SOLVERERR(cusolverDnDpotrsBatched(handle, uplo, DEFAULT_NFACTORS, 1, d_hbufptr[cpu_thread_id], DEFAULT_NFACTORS, d_factptr[cpu_thread_id],DEFAULT_NFACTORS, d_infoarray[cpu_thread_id], (int)h_trainc->dlength));
HANDLE_ERROR(cudaDeviceSynchronize());
gettimeofday(&end,NULL);
diff = 1000000*(end.tv_sec-start.tv_sec) + end.tv_usec - start.tv_usec;
num_iterations += 1;
totaldiff += diff;
//printf("this time cost %ld\n",diff);
#pragma omp barrier
//update the final results
HANDLE_ERROR(cudaMemcpy(mats[2]->values + (h_cisstc->d_ref[cpu_thread_id] -1 ) * DEFAULT_NFACTORS, d_value_c[cpu_thread_id] + (h_cisstc->d_ref[cpu_thread_id] - 1) * DEFAULT_NFACTORS, (h_cisstc->d_ref[cpu_thread_id + 1] - h_cisstc->d_ref[cpu_thread_id]) * DEFAULT_NFACTORS * sizeof(double), cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaDeviceSynchronize());
HANDLE_ERROR(cudaMemcpy(d_value_c[cpu_thread_id] + (h_cisstc->d_ref[(cpu_thread_id + 1)% deviceCount] -1) * DEFAULT_NFACTORS, mats[2]->values + (h_cisstc->d_ref[(cpu_thread_id + 1) % deviceCount] -1)* DEFAULT_NFACTORS, (h_cisstc->d_ref[(cpu_thread_id + 1) % deviceCount + 1] - h_cisstc->d_ref[(cpu_thread_id + 1) % deviceCount]) * DEFAULT_NFACTORS * sizeof(double), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaDeviceSynchronize());
break;
}
//p_update_als(train, mats, m, DEFAULT_NFACTORS, regularization_index);
}
}
}
gettimeofday(&end,NULL);
diff = 1000000*(end.tv_sec-start.tv_sec) + end.tv_usec - start.tv_usec;
num_iterations += 1;
totaldiff += diff;
//printf("this time cost %ld\n",diff);
#ifdef DEBUG
matrix_display(mats[0]);
matrix_display(mats[1]);
matrix_display(mats[2]);
#endif
gettimeofday(&end,NULL);
diff = 1000000*(end.tv_sec-start.tv_sec) + end.tv_usec - start.tv_usec;
num_iterations += 1;
totaldiff += diff;
//printf("this time cost %ld\n",diff);
/* compute new obj value, print stats, and exit if converged */
loss = tc_loss_sq(traina, mats, algorithm_index);
frobsq = tc_frob_sq(nmodes, regularization_index, mats);
if(tc_converge(traina, validation, mats, best_mats, algorithm_index, loss, frobsq, e, nmodes, best_rmse, tolerance, nbadepochs, bestepochs, max_badepochs)) {
break;
}
} /* foreach iteration */
cudaSetDevice(0);
HANDLE_SOLVERERR(cusolverDnDestroy(handle0));
cudaSetDevice(1);
HANDLE_SOLVERERR(cusolverDnDestroy(handle1));
#pragma omp parallel
{
unsigned int cpu_thread_id = omp_get_thread_num();
cudaSetDevice(cpu_thread_id % deviceCount);
//end the cusolver
//HANDLE_SOLVERERR(cusolverDnDestroy(handle));
//free the cudabuffer
cudaFree(d_counter_a[cpu_thread_id]);
cudaFree(d_directory_a[cpu_thread_id]);
cudaFree(d_dims_a[cpu_thread_id]);
cudaFree(d_entries_a[cpu_thread_id]);
cudaFree(d_counter_b[cpu_thread_id]);
cudaFree(d_directory_b[cpu_thread_id]);
cudaFree(d_dims_b[cpu_thread_id]);
cudaFree(d_entries_b[cpu_thread_id]);
cudaFree(d_counter_c[cpu_thread_id]);
cudaFree(d_directory_c[cpu_thread_id]);
cudaFree(d_dims_c[cpu_thread_id]);
cudaFree(d_entries_c[cpu_thread_id]);
cudaFree(d_hbuffer[cpu_thread_id]);
cudaFree(d_hbufptr[cpu_thread_id]);
//cudaFree(d_hthbuffer[cpu_thread_id]);
cudaFree(d_factptr[cpu_thread_id]);
cudaFree(d_infoarray[cpu_thread_id]);
cudaFree(d_value_a[cpu_thread_id]);
cudaFree(d_value_b[cpu_thread_id]);
cudaFree(d_value_c[cpu_thread_id]);
cudaFree(d_traina[cpu_thread_id]);
cudaFree(d_trainb[cpu_thread_id]);
cudaFree(d_trainc[cpu_thread_id]);
cudaFree(d_factora[cpu_thread_id]);
cudaFree(d_factorb[cpu_thread_id]);
cudaFree(d_factorc[cpu_thread_id]);
//cudaFree(d_hthbuffer[cpu_thread_id]);
cudaDeviceReset();
}
ciss_free(h_cissta, deviceCount);
ciss_free(h_cisstb, deviceCount);
ciss_free(h_cisstc, deviceCount);
free(d_traina);
free(d_trainb);
free(d_trainc);
free(d_directory_a);
free(d_directory_b);
free(d_directory_c);
free(d_counter_a);
free(d_counter_b);
free(d_counter_c);
free(d_dims_a);
free(d_dims_b);
free(d_dims_c);
free(d_entries_a);
free(d_entries_b);
free(d_entries_c);
free(d_hbuffer);
//free(d_hthbuffer);
free(d_hbufptr);
free(d_infoarray);
free(d_factptr);
//free(handle);
free(d_factora);
free(d_factorb);
free(d_factorc);
free(d_value_a);
free(d_value_b);
free(d_value_c);
free(maxdlength);
free(maxnnz);
return totaldiff/num_iterations;
}
}
|
781e2ce64c6586da85ec27b8ecee1bee21d6571e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <unistd.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <time.h>
#include <math.h>
#include <time.h>
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "internal.h"
#define LAMBDA 0.1
#define ALFA 0.5
#define BETA_MAX 1.0
#define BETA_MIN 0.0
#define INITIAL_LOUDNESS 1.0
int dimensions;
int bats_count;
int evaluation_function;
int iterations;
__device__ int devaluation_function;
__device__ int dbats_count;
__device__ int diterations;
enum { N = 624 }; // length of state vector
enum { M = 397 }; // period parameter
__device__ unsigned long state[N]; // internal state
__device__ unsigned long *pNext; // next value to get from state
__device__ int left; // number of values left before reload needed
__device__ unsigned long MT_randInt(unsigned long n);
__device__ unsigned long randInt();
__device__ void reload();
__device__ unsigned long twist(unsigned long m, unsigned long s0, unsigned long s1);
__device__ unsigned long hiBit(unsigned long u);
__device__ unsigned long loBit(unsigned long u);
__device__ unsigned long loBits(unsigned long u);
__device__ unsigned long mixBits(unsigned long u, unsigned long v );
__device__ void MT_seed(time_t time, clock_t clock);
__device__ unsigned long MT_hash(time_t t, clock_t c);
__device__ void MT_seedfinal(unsigned long oneSeed);
__device__ void MT_initialize(unsigned long seed);
__device__ float MT_randfloat();
__device__ double MT_randExc(const double *n );
#define CUDA_CALL(cuda_function, ...) { \
hipError_t status = cuda_function(__VA_ARGS__); \
cudaEnsureSuccess(status, #cuda_function, false, __FILE__, __LINE__); \
}
bool cudaEnsureSuccess(hipError_t status, const char* status_context_description,
bool die_on_error, const char* filename, unsigned line_number) {
if (status_context_description == NULL)
status_context_description = "";
if (status == hipSuccess) {
return true;
}
const char* errorString = hipGetErrorString(status);
fprintf(stderr, "CUDA Error: ");
if (status_context_description != NULL) {
fprintf(stderr, "%s\n", status_context_description);
}
if (errorString != NULL) {
fprintf(stderr,"%s\n", errorString);
} else {
fprintf(stderr, "(Unknown CUDA status code %i", status);
}
fprintf(stderr, "Filename: %s, Line: %i\n", filename, line_number);
if(die_on_error) {
exit(EXIT_FAILURE);
}
return false;
}
typedef struct Bat {
double pulse_rate;
double loudness;
double fitness;
double frequency;
double position[1000];
double velocity[1000];
} Bat;
__device__ int BOUNDRY_MAX;
__device__ int BOUNDRY_MIN;
__device__ int FREQUENCY_MIN;
__device__ int FREQUENCY_MAX;
__device__ double (*objective_function)(double[], int);
__device__ double griewank(double solution[], int dimensions)
{
double total = 0;
double top1=0;
double top2=1;
for(int i=0;i<dimensions;i++)
{
top1=top1+pow((solution[i]),(double)2);
top2=top2*cos((((solution[i])/sqrt((double)(i+1)))*M_PI)/180);
}
total=(1/(double)4000)*top1-top2+1;
return total;
}
__device__ double rastringin (double solution[], int dimensions)
{
double total = 0;
for(int i=0;i<dimensions;i++)
{
total=total+(pow(solution[i],(double)2)-10*cos(2*M_PI*solution[i])+10);
}
return total;
}
__device__ double ackley(double solution[], int dimensions)
{
int i;
double aux, aux1, result;
for (i = 0; i < dimensions; i++)
{
aux += solution[i]*solution[i];
}
for (i = 0; i < dimensions; i++)
{
aux1 += cos(2.0*M_PI*solution[i]);
}
result = -20.0*(exp(-0.2*sqrt(1.0/(float)dimensions*aux)))-exp(1.0/(float)dimensions*aux1)+20.0+exp(1.0);
return result;
}
__device__ double rosenbrock (double solution[], int dimensions)
{
double total = 0;
for (int i = 0; i < dimensions-1; i++)
{
total=total+100.*pow((solution[i+1] - pow(solution[i],2.)),2) + pow((1. - solution[i]),2);
}
return total;
}
__device__ double sphere (double *solution, int dimensions)
{
double total = 0;
for (int i = 0; i < dimensions; i++) {
total+= solution[i] * solution[i];
}
return total;
}
__device__ void copy_bat(struct Bat *from, struct Bat *to)
{
memcpy(to, from, sizeof(struct Bat));
}
__device__ void get_best(struct Bat *bats, struct Bat *best)
{
double current_best_val;
int best_indice;
current_best_val = bats[0].fitness;
best_indice = 0;
for (int i = 0; i < dbats_count; i++) {
if (bats[i].fitness < current_best_val) {
current_best_val = bats[i].fitness;
best_indice = i;
}
}
copy_bat(&bats[best_indice], best);
}
__device__ void log_bat_stdout(struct Bat *bat, int dimensions)
{
double position_average = 0;
for (int i = 0; i < dimensions; i++) {
position_average+=bat->position[i];
}
printf("ITERATIONS: %d\n", diterations);
printf("BATS_COUNT: %d\n", dbats_count);
printf("DIMENSIONS: %d\n", dimensions);
printf("POPULATION: %d\n", dbats_count);
printf("Fitness E: %E\n", bat->fitness);
}
__device__ double my_rand(double inferior, double superior)
{
double result = (double)inferior + ((superior - inferior)*MT_randInt(RAND_MAX)/(RAND_MAX+1.0));
return result;
}
__device__ void initialize_function(void)
{
switch(devaluation_function) {
case SPHERE:
BOUNDRY_MIN = 0.0;
BOUNDRY_MAX = 100.0;
objective_function = &sphere;
break;
case RASTRINGIN:
BOUNDRY_MIN = -5.12;
BOUNDRY_MAX = 5.12;
objective_function = &rastringin;
break;
case GRIEWANK:
BOUNDRY_MIN = -600.0;
BOUNDRY_MAX = 600.0;
objective_function = &griewank;
break;
case ACKLEY:
BOUNDRY_MIN = -32.0;
BOUNDRY_MAX = 32.0;
objective_function = &ackley;
break;
case ROSENBROOK:
BOUNDRY_MIN = -30.0;
BOUNDRY_MAX = 30.0;
objective_function = &rosenbrock;
break;
}
}
__global__ void local_bat_run(time_t time, clock_t clock, struct Bat *bats, struct Bat *candidates, int iterations, int bats_count, int evaluation_function, int dimensions)
{
MT_seed(time, clock);
dbats_count = bats_count;
devaluation_function = evaluation_function;
diterations = iterations;
initialize_function();
__shared__ struct Bat *best;
__shared__ int iteration;
__shared__ double loudness_average;
best = (struct Bat *) malloc(sizeof(struct Bat));
loudness_average = 1.0;
bats[threadIdx.x].pulse_rate = 0.0;
bats[threadIdx.x].frequency = 0.0;
bats[threadIdx.x].loudness = INITIAL_LOUDNESS;
for (int j = 0; j < dimensions; j++) {
bats[threadIdx.x].velocity[j] = 0;
bats[threadIdx.x].position[j] = my_rand(BOUNDRY_MIN, BOUNDRY_MAX);
}
bats[threadIdx.x].fitness = objective_function(bats[threadIdx.x].position, dimensions);
__syncthreads();
get_best(bats, best);
iteration = 0;
while(iteration < iterations) {
//frequency
double beta = my_rand(BETA_MIN, BETA_MAX);
bats[threadIdx.x].frequency = FREQUENCY_MIN + (FREQUENCY_MAX - FREQUENCY_MIN) * beta;
//velocity
for (int i = 0; i < dimensions; ++i) {
bats[threadIdx.x].velocity[i]+= (bats[threadIdx.x].position[i] - best->position[i]) * bats[threadIdx.x].frequency;
if (bats[threadIdx.x].velocity[i] > BOUNDRY_MAX) {
bats[threadIdx.x].velocity[i] = BOUNDRY_MAX;
} else if (bats[threadIdx.x].velocity[i] < BOUNDRY_MIN) {
bats[threadIdx.x].velocity[i] = BOUNDRY_MIN;
}
}
copy_bat(&bats[threadIdx.x], &candidates[threadIdx.x]);
//update position
for (int i = 0; i < dimensions; ++i) {
candidates[threadIdx.x].position[i] += candidates[threadIdx.x].velocity[i];
if (candidates[threadIdx.x].position[i] > BOUNDRY_MAX) {
candidates[threadIdx.x].position[i] = BOUNDRY_MAX;
} else if (candidates[threadIdx.x].position[i] < BOUNDRY_MIN) {
candidates[threadIdx.x].position[i] = BOUNDRY_MIN;
}
}
//local search
if (my_rand(0.0, 1.0) < candidates[threadIdx.x].pulse_rate) {
for (int i = 0; i < dimensions; i++ ) {
candidates[threadIdx.x].position[i] = best->position[i] + loudness_average * my_rand(-1.0, 1.0);
}
}
//position perturbation
int dimension = my_rand(0, dimensions);
candidates[threadIdx.x].position[dimension] = candidates[threadIdx.x].position[dimension] * my_rand(0.0,1.0);
bats[threadIdx.x].fitness = objective_function(bats[threadIdx.x].position, dimensions);
candidates[threadIdx.x].fitness = objective_function(candidates[threadIdx.x].position, dimensions);
if (my_rand(0.0,1.0) < bats[threadIdx.x].loudness && candidates[threadIdx.x].fitness < bats[threadIdx.x].fitness) {
copy_bat(&candidates[threadIdx.x], &bats[threadIdx.x]);
bats[threadIdx.x].pulse_rate = 1 - exp(-LAMBDA*iteration);
bats[threadIdx.x].loudness = INITIAL_LOUDNESS*pow(ALFA, iteration);
}
loudness_average=0;
loudness_average+=bats[threadIdx.x].loudness;
get_best(bats, best);
__syncthreads();
loudness_average/= dbats_count;
iteration++;
}
if (threadIdx.x == 0) {
log_bat_stdout(best, dimensions);
}
__syncthreads();
}
void bat_run(void)
{
struct Bat *bats;
struct Bat *candidates;
int size_of_bats = bats_count * sizeof(struct Bat) ;
CUDA_CALL(hipMalloc, (void **)&bats, size_of_bats);
CUDA_CALL(hipMalloc, (void **)&candidates, size_of_bats);
clock_t begin = clock();
hiprandState_t *deviceStates;
CUDA_CALL(hipMalloc, (void **)&deviceStates, bats_count *sizeof(hiprandState_t));
clock_t end = clock();
double time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
double gpu_time = time_spent;
printf("GPU time = %f\n",time_spent);
begin = clock();
printf("Iteration = %d \nBats count = %d\n",iterations,bats_count);
hipLaunchKernelGGL(( local_bat_run), dim3(1),dim3(bats_count), 0, 0, time(NULL), clock(), bats, candidates, iterations, bats_count, evaluation_function, dimensions);
end = clock();
time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
double cpu_time = time_spent;
printf("CPU time = %f\n",time_spent);
printf("Acceleration = %f\n", cpu_time/gpu_time);
CUDA_CALL(hipDeviceSynchronize);
CUDA_CALL(hipFree, bats);
CUDA_CALL(hipFree, candidates);
CUDA_CALL(hipFree, deviceStates);
}
__device__ unsigned long MT_randInt(unsigned long n)
{
unsigned long used = n;
used |= used >> 1;
used |= used >> 2;
used |= used >> 4;
used |= used >> 8;
used |= used >> 16;
unsigned long i;
do{
i = randInt() & used;
}while( i > n );
return i;
}
__device__ unsigned long randInt()
{
register unsigned long s1;
if( left == 0 ) reload();
--left;
s1 = *pNext++;
s1 ^= (s1 >> 11);
s1 ^= (s1 << 7) & 0x9d2c5680UL;
s1 ^= (s1 << 15) & 0xefc60000UL;
return ( s1 ^ (s1 >> 18) );
}
__device__ void reload()
{
register unsigned long *p = state;
register int i;
for( i = N - M; i--; ++p )
*p = twist( p[M], p[0], p[1] );
for( i = M; --i; ++p )
*p = twist( p[M-N], p[0], p[1] );
*p = twist( p[M-N], p[0], state[0] );
left = N, pNext = state;
}
__device__ unsigned long twist(unsigned long m, unsigned long s0, unsigned long s1 )
{
return m ^ (mixBits(s0,s1)>>1) ^ (-loBit(s1) & 0x9908b0dfUL);
}
__device__ void MT_seed(time_t time, clock_t clock)
{
MT_seedfinal( MT_hash( time, clock ) );
}
__device__ unsigned long MT_hash(time_t t, clock_t c)
{
size_t i, j;
static unsigned long differ = 0;
unsigned long h1 = 0;
unsigned char *p = (unsigned char *) &t;
for(i = 0; i < sizeof(t); ++i)
{
h1 *= UCHAR_MAX + 2U;
h1 += p[i];
}
unsigned long h2 = 0;
p = (unsigned char *) &c;
for(j = 0; j < sizeof(c); ++j)
{
h2 *= UCHAR_MAX + 2U;
h2 += p[j];
}
return ( h1 + differ++ ) ^ h2;
}
__device__ void MT_seedfinal(unsigned long oneSeed)
{
MT_initialize(oneSeed);
reload();
}
__device__ void MT_initialize(unsigned long seed)
{
register unsigned long *s = state;
register unsigned long *r = state;
register int i = 1;
*s++ = seed & 0xffffffffUL;
for( ; i < N; ++i )
{
*s++ = ( 1812433253UL * ( *r ^ (*r >> 30) ) + i ) & 0xffffffffUL;
r++;
}
}
__device__ float MT_randfloat()
{
return (float)(randInt()) * (1.0/4294967295.0);
}
__device__ double MT_rand()
{ return (double) (randInt()) * (1.0/4294967296.0);
}
__device__ double MT_randExc(const double *n )
{ return MT_rand() * *n;
}
__device__ unsigned long hiBit(unsigned long u) { return u & 0x80000000UL; }
__device__ unsigned long loBit(unsigned long u) { return u & 0x00000001UL; }
__device__ unsigned long loBits(unsigned long u){ return u & 0x7fffffffUL; }
__device__ unsigned long mixBits(unsigned long u, unsigned long v ) { return hiBit(u) | loBits(v); }
int main(){
bat_run();
}
|
781e2ce64c6586da85ec27b8ecee1bee21d6571e.cu
|
#include <unistd.h>
#include <curand.h>
#include <curand_kernel.h>
#include <time.h>
#include <math.h>
#include <time.h>
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "internal.h"
#define LAMBDA 0.1
#define ALFA 0.5
#define BETA_MAX 1.0
#define BETA_MIN 0.0
#define INITIAL_LOUDNESS 1.0
int dimensions;
int bats_count;
int evaluation_function;
int iterations;
__device__ int devaluation_function;
__device__ int dbats_count;
__device__ int diterations;
enum { N = 624 }; // length of state vector
enum { M = 397 }; // period parameter
__device__ unsigned long state[N]; // internal state
__device__ unsigned long *pNext; // next value to get from state
__device__ int left; // number of values left before reload needed
__device__ unsigned long MT_randInt(unsigned long n);
__device__ unsigned long randInt();
__device__ void reload();
__device__ unsigned long twist(unsigned long m, unsigned long s0, unsigned long s1);
__device__ unsigned long hiBit(unsigned long u);
__device__ unsigned long loBit(unsigned long u);
__device__ unsigned long loBits(unsigned long u);
__device__ unsigned long mixBits(unsigned long u, unsigned long v );
__device__ void MT_seed(time_t time, clock_t clock);
__device__ unsigned long MT_hash(time_t t, clock_t c);
__device__ void MT_seedfinal(unsigned long oneSeed);
__device__ void MT_initialize(unsigned long seed);
__device__ float MT_randfloat();
__device__ double MT_randExc(const double *n );
#define CUDA_CALL(cuda_function, ...) { \
cudaError_t status = cuda_function(__VA_ARGS__); \
cudaEnsureSuccess(status, #cuda_function, false, __FILE__, __LINE__); \
}
bool cudaEnsureSuccess(cudaError_t status, const char* status_context_description,
bool die_on_error, const char* filename, unsigned line_number) {
if (status_context_description == NULL)
status_context_description = "";
if (status == cudaSuccess) {
return true;
}
const char* errorString = cudaGetErrorString(status);
fprintf(stderr, "CUDA Error: ");
if (status_context_description != NULL) {
fprintf(stderr, "%s\n", status_context_description);
}
if (errorString != NULL) {
fprintf(stderr,"%s\n", errorString);
} else {
fprintf(stderr, "(Unknown CUDA status code %i", status);
}
fprintf(stderr, "Filename: %s, Line: %i\n", filename, line_number);
if(die_on_error) {
exit(EXIT_FAILURE);
}
return false;
}
typedef struct Bat {
double pulse_rate;
double loudness;
double fitness;
double frequency;
double position[1000];
double velocity[1000];
} Bat;
__device__ int BOUNDRY_MAX;
__device__ int BOUNDRY_MIN;
__device__ int FREQUENCY_MIN;
__device__ int FREQUENCY_MAX;
__device__ double (*objective_function)(double[], int);
__device__ double griewank(double solution[], int dimensions)
{
double total = 0;
double top1=0;
double top2=1;
for(int i=0;i<dimensions;i++)
{
top1=top1+pow((solution[i]),(double)2);
top2=top2*cos((((solution[i])/sqrt((double)(i+1)))*M_PI)/180);
}
total=(1/(double)4000)*top1-top2+1;
return total;
}
__device__ double rastringin (double solution[], int dimensions)
{
double total = 0;
for(int i=0;i<dimensions;i++)
{
total=total+(pow(solution[i],(double)2)-10*cos(2*M_PI*solution[i])+10);
}
return total;
}
__device__ double ackley(double solution[], int dimensions)
{
int i;
double aux, aux1, result;
for (i = 0; i < dimensions; i++)
{
aux += solution[i]*solution[i];
}
for (i = 0; i < dimensions; i++)
{
aux1 += cos(2.0*M_PI*solution[i]);
}
result = -20.0*(exp(-0.2*sqrt(1.0/(float)dimensions*aux)))-exp(1.0/(float)dimensions*aux1)+20.0+exp(1.0);
return result;
}
__device__ double rosenbrock (double solution[], int dimensions)
{
double total = 0;
for (int i = 0; i < dimensions-1; i++)
{
total=total+100.*pow((solution[i+1] - pow(solution[i],2.)),2) + pow((1. - solution[i]),2);
}
return total;
}
__device__ double sphere (double *solution, int dimensions)
{
double total = 0;
for (int i = 0; i < dimensions; i++) {
total+= solution[i] * solution[i];
}
return total;
}
__device__ void copy_bat(struct Bat *from, struct Bat *to)
{
memcpy(to, from, sizeof(struct Bat));
}
__device__ void get_best(struct Bat *bats, struct Bat *best)
{
double current_best_val;
int best_indice;
current_best_val = bats[0].fitness;
best_indice = 0;
for (int i = 0; i < dbats_count; i++) {
if (bats[i].fitness < current_best_val) {
current_best_val = bats[i].fitness;
best_indice = i;
}
}
copy_bat(&bats[best_indice], best);
}
__device__ void log_bat_stdout(struct Bat *bat, int dimensions)
{
double position_average = 0;
for (int i = 0; i < dimensions; i++) {
position_average+=bat->position[i];
}
printf("ITERATIONS: %d\n", diterations);
printf("BATS_COUNT: %d\n", dbats_count);
printf("DIMENSIONS: %d\n", dimensions);
printf("POPULATION: %d\n", dbats_count);
printf("Fitness E: %E\n", bat->fitness);
}
__device__ double my_rand(double inferior, double superior)
{
double result = (double)inferior + ((superior - inferior)*MT_randInt(RAND_MAX)/(RAND_MAX+1.0));
return result;
}
__device__ void initialize_function(void)
{
switch(devaluation_function) {
case SPHERE:
BOUNDRY_MIN = 0.0;
BOUNDRY_MAX = 100.0;
objective_function = &sphere;
break;
case RASTRINGIN:
BOUNDRY_MIN = -5.12;
BOUNDRY_MAX = 5.12;
objective_function = &rastringin;
break;
case GRIEWANK:
BOUNDRY_MIN = -600.0;
BOUNDRY_MAX = 600.0;
objective_function = &griewank;
break;
case ACKLEY:
BOUNDRY_MIN = -32.0;
BOUNDRY_MAX = 32.0;
objective_function = &ackley;
break;
case ROSENBROOK:
BOUNDRY_MIN = -30.0;
BOUNDRY_MAX = 30.0;
objective_function = &rosenbrock;
break;
}
}
__global__ void local_bat_run(time_t time, clock_t clock, struct Bat *bats, struct Bat *candidates, int iterations, int bats_count, int evaluation_function, int dimensions)
{
MT_seed(time, clock);
dbats_count = bats_count;
devaluation_function = evaluation_function;
diterations = iterations;
initialize_function();
__shared__ struct Bat *best;
__shared__ int iteration;
__shared__ double loudness_average;
best = (struct Bat *) malloc(sizeof(struct Bat));
loudness_average = 1.0;
bats[threadIdx.x].pulse_rate = 0.0;
bats[threadIdx.x].frequency = 0.0;
bats[threadIdx.x].loudness = INITIAL_LOUDNESS;
for (int j = 0; j < dimensions; j++) {
bats[threadIdx.x].velocity[j] = 0;
bats[threadIdx.x].position[j] = my_rand(BOUNDRY_MIN, BOUNDRY_MAX);
}
bats[threadIdx.x].fitness = objective_function(bats[threadIdx.x].position, dimensions);
__syncthreads();
get_best(bats, best);
iteration = 0;
while(iteration < iterations) {
//frequency
double beta = my_rand(BETA_MIN, BETA_MAX);
bats[threadIdx.x].frequency = FREQUENCY_MIN + (FREQUENCY_MAX - FREQUENCY_MIN) * beta;
//velocity
for (int i = 0; i < dimensions; ++i) {
bats[threadIdx.x].velocity[i]+= (bats[threadIdx.x].position[i] - best->position[i]) * bats[threadIdx.x].frequency;
if (bats[threadIdx.x].velocity[i] > BOUNDRY_MAX) {
bats[threadIdx.x].velocity[i] = BOUNDRY_MAX;
} else if (bats[threadIdx.x].velocity[i] < BOUNDRY_MIN) {
bats[threadIdx.x].velocity[i] = BOUNDRY_MIN;
}
}
copy_bat(&bats[threadIdx.x], &candidates[threadIdx.x]);
//update position
for (int i = 0; i < dimensions; ++i) {
candidates[threadIdx.x].position[i] += candidates[threadIdx.x].velocity[i];
if (candidates[threadIdx.x].position[i] > BOUNDRY_MAX) {
candidates[threadIdx.x].position[i] = BOUNDRY_MAX;
} else if (candidates[threadIdx.x].position[i] < BOUNDRY_MIN) {
candidates[threadIdx.x].position[i] = BOUNDRY_MIN;
}
}
//local search
if (my_rand(0.0, 1.0) < candidates[threadIdx.x].pulse_rate) {
for (int i = 0; i < dimensions; i++ ) {
candidates[threadIdx.x].position[i] = best->position[i] + loudness_average * my_rand(-1.0, 1.0);
}
}
//position perturbation
int dimension = my_rand(0, dimensions);
candidates[threadIdx.x].position[dimension] = candidates[threadIdx.x].position[dimension] * my_rand(0.0,1.0);
bats[threadIdx.x].fitness = objective_function(bats[threadIdx.x].position, dimensions);
candidates[threadIdx.x].fitness = objective_function(candidates[threadIdx.x].position, dimensions);
if (my_rand(0.0,1.0) < bats[threadIdx.x].loudness && candidates[threadIdx.x].fitness < bats[threadIdx.x].fitness) {
copy_bat(&candidates[threadIdx.x], &bats[threadIdx.x]);
bats[threadIdx.x].pulse_rate = 1 - exp(-LAMBDA*iteration);
bats[threadIdx.x].loudness = INITIAL_LOUDNESS*pow(ALFA, iteration);
}
loudness_average=0;
loudness_average+=bats[threadIdx.x].loudness;
get_best(bats, best);
__syncthreads();
loudness_average/= dbats_count;
iteration++;
}
if (threadIdx.x == 0) {
log_bat_stdout(best, dimensions);
}
__syncthreads();
}
void bat_run(void)
{
struct Bat *bats;
struct Bat *candidates;
int size_of_bats = bats_count * sizeof(struct Bat) ;
CUDA_CALL(cudaMalloc, (void **)&bats, size_of_bats);
CUDA_CALL(cudaMalloc, (void **)&candidates, size_of_bats);
clock_t begin = clock();
curandState *deviceStates;
CUDA_CALL(cudaMalloc, (void **)&deviceStates, bats_count *sizeof(curandState));
clock_t end = clock();
double time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
double gpu_time = time_spent;
printf("GPU time = %f\n",time_spent);
begin = clock();
printf("Iteration = %d \nBats count = %d\n",iterations,bats_count);
local_bat_run<<<1,bats_count>>>(time(NULL), clock(), bats, candidates, iterations, bats_count, evaluation_function, dimensions);
end = clock();
time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
double cpu_time = time_spent;
printf("CPU time = %f\n",time_spent);
printf("Acceleration = %f\n", cpu_time/gpu_time);
CUDA_CALL(cudaDeviceSynchronize);
CUDA_CALL(cudaFree, bats);
CUDA_CALL(cudaFree, candidates);
CUDA_CALL(cudaFree, deviceStates);
}
__device__ unsigned long MT_randInt(unsigned long n)
{
unsigned long used = n;
used |= used >> 1;
used |= used >> 2;
used |= used >> 4;
used |= used >> 8;
used |= used >> 16;
unsigned long i;
do{
i = randInt() & used;
}while( i > n );
return i;
}
__device__ unsigned long randInt()
{
register unsigned long s1;
if( left == 0 ) reload();
--left;
s1 = *pNext++;
s1 ^= (s1 >> 11);
s1 ^= (s1 << 7) & 0x9d2c5680UL;
s1 ^= (s1 << 15) & 0xefc60000UL;
return ( s1 ^ (s1 >> 18) );
}
__device__ void reload()
{
register unsigned long *p = state;
register int i;
for( i = N - M; i--; ++p )
*p = twist( p[M], p[0], p[1] );
for( i = M; --i; ++p )
*p = twist( p[M-N], p[0], p[1] );
*p = twist( p[M-N], p[0], state[0] );
left = N, pNext = state;
}
__device__ unsigned long twist(unsigned long m, unsigned long s0, unsigned long s1 )
{
return m ^ (mixBits(s0,s1)>>1) ^ (-loBit(s1) & 0x9908b0dfUL);
}
__device__ void MT_seed(time_t time, clock_t clock)
{
MT_seedfinal( MT_hash( time, clock ) );
}
__device__ unsigned long MT_hash(time_t t, clock_t c)
{
size_t i, j;
static unsigned long differ = 0;
unsigned long h1 = 0;
unsigned char *p = (unsigned char *) &t;
for(i = 0; i < sizeof(t); ++i)
{
h1 *= UCHAR_MAX + 2U;
h1 += p[i];
}
unsigned long h2 = 0;
p = (unsigned char *) &c;
for(j = 0; j < sizeof(c); ++j)
{
h2 *= UCHAR_MAX + 2U;
h2 += p[j];
}
return ( h1 + differ++ ) ^ h2;
}
__device__ void MT_seedfinal(unsigned long oneSeed)
{
MT_initialize(oneSeed);
reload();
}
__device__ void MT_initialize(unsigned long seed)
{
register unsigned long *s = state;
register unsigned long *r = state;
register int i = 1;
*s++ = seed & 0xffffffffUL;
for( ; i < N; ++i )
{
*s++ = ( 1812433253UL * ( *r ^ (*r >> 30) ) + i ) & 0xffffffffUL;
r++;
}
}
__device__ float MT_randfloat()
{
return (float)(randInt()) * (1.0/4294967295.0);
}
__device__ double MT_rand()
{ return (double) (randInt()) * (1.0/4294967296.0);
}
__device__ double MT_randExc(const double *n )
{ return MT_rand() * *n;
}
__device__ unsigned long hiBit(unsigned long u) { return u & 0x80000000UL; }
__device__ unsigned long loBit(unsigned long u) { return u & 0x00000001UL; }
__device__ unsigned long loBits(unsigned long u){ return u & 0x7fffffffUL; }
__device__ unsigned long mixBits(unsigned long u, unsigned long v ) { return hiBit(u) | loBits(v); }
int main(){
bat_run();
}
|
0b11c27a1b4dee2959644e04dcec0e698085e9a5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "caffe/util/math_functions.hpp"
#include "caffe/common.hpp"
#include "GeneralizedPatchMatch.cuh"
#include "DeepAnalogy.cuh"
#include "WLS.h"
#include "Deconv.h"
struct Parameters
{
std::vector<std::string> layers; //which layers used as content
int patch_size0;
int iter;
};
__host__ void norm(float* &dst, float* src, float* smooth, Dim dim){
int count = dim.channel*dim.height*dim.width;
float* x = src;
float* x2;
hipMalloc(&x2, count*sizeof(float));
caffe_gpu_mul(count, x, x, x2);
//caculate dis
float*sum;
float* ones;
hipMalloc(&sum, dim.height*dim.width*sizeof(float));
hipMalloc(&ones, dim.channel*sizeof(float));
caffe_gpu_set(dim.channel, 1.0f, ones);
caffe_gpu_gemv(CblasTrans, dim.channel, dim.height*dim.width, 1.0f, x2, ones, 0.0f, sum);
float *dis;
hipMalloc(&dis, dim.height*dim.width*sizeof(float));
caffe_gpu_powx(dim.height*dim.width, sum, 0.5f, dis);
if (smooth != NULL)
{
hipMemcpy(smooth, sum, dim.height*dim.width*sizeof(float), hipMemcpyDeviceToDevice);
int index;
float minv, maxv;
hipblasIsamin(Caffe::cublas_handle(), dim.height*dim.width, sum, 1, &index);
hipMemcpy(&minv, sum + index - 1, sizeof(float), hipMemcpyDeviceToHost);
hipblasIsamax(Caffe::cublas_handle(), dim.height*dim.width, sum, 1, &index);
hipMemcpy(&maxv, sum + index - 1, sizeof(float), hipMemcpyDeviceToHost);
caffe_gpu_add_scalar(dim.height*dim.width, -minv, smooth);
caffe_gpu_scal(dim.height*dim.width, 1.0f / (maxv - minv), smooth);
}
//norm
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, dim.channel, dim.width*dim.height, 1, 1.0f, ones, dis, 0.0f, x2);
caffe_gpu_div(count, src, x2, dst);
hipFree(x2);
hipFree(ones);
hipFree(dis);
hipFree(sum);
}
DeepAnalogy::DeepAnalogy(){
resizeRatio = 1;
weightLevel = 3;
photoTransfer = false;
file_A = "";
file_BP = "";
path_output = "";
path_model = "";
}
DeepAnalogy::~DeepAnalogy(){
}
void DeepAnalogy::SetRatio(float ratio){
resizeRatio = ratio;
}
void DeepAnalogy::SetBlendWeight(int level){
weightLevel = level;
}
void DeepAnalogy::UsePhotoTransfer(bool flag){
photoTransfer = flag;
}
void DeepAnalogy::SetModel(string path){
path_model =path;
}
void DeepAnalogy::SetA(string f_a){
file_A = f_a;
}
void DeepAnalogy::SetBPrime(string f_bp){
file_BP = f_bp;
}
void DeepAnalogy::SetOutputDir(string f_o){
path_output = f_o;
}
void DeepAnalogy::SetGPU(int no){
hipSetDevice(no);
}
void DeepAnalogy::LoadInputs(){
float ratio;
Mat ori_AL = imread(file_A);
Mat ori_BPL = imread(file_BP);
if (ori_AL.empty() || ori_BPL.empty())
{
cout << "image cannot read!" << endl;
waitKey();
return;
}
ori_A_cols = ori_AL.cols;
ori_A_rows = ori_AL.rows;
ori_BP_cols = ori_BPL.cols;
ori_BP_rows = ori_BPL.rows;
if (ori_AL.rows > 700)
{
ratio = 700.f / ori_AL.rows;
cv::resize(ori_AL, img_AL, Size(), ratio, ratio, INTER_CUBIC);
ori_AL = img_AL.clone();
}
if (ori_AL.cols > 700)
{
ratio = 700.f / ori_AL.cols;
cv::resize(ori_AL, img_AL, Size(), ratio, ratio, INTER_CUBIC);
ori_AL = img_AL.clone();
}
if (ori_AL.rows < 200)
{
ratio = 200.f / ori_AL.rows;
cv::resize(ori_AL, img_AL, Size(), ratio, ratio, INTER_CUBIC);
ori_AL = img_AL.clone();
}
if (ori_AL.cols < 200)
{
ratio = 200.f / ori_AL.cols;
cv::resize(ori_AL, img_AL, Size(), ratio, ratio, INTER_CUBIC);
ori_AL = img_AL.clone();
}
if (ori_BPL.rows > 700)
{
ratio = 700.f / ori_BPL.rows;
cv::resize(ori_BPL, img_BPL, Size(), ratio, ratio, INTER_CUBIC);
ori_BPL = img_BPL.clone();
}
if (ori_BPL.cols > 700)
{
ratio = 700.f / ori_BPL.cols;
cv::resize(ori_BPL, img_BPL, Size(), ratio, ratio, INTER_CUBIC);
ori_BPL = img_BPL.clone();
}
if (ori_BPL.rows < 200)
{
ratio = 200.f / ori_BPL.rows;
cv::resize(ori_BPL, img_BPL, Size(), ratio, ratio, INTER_CUBIC);
ori_BPL = img_BPL.clone();
}
if (ori_BPL.cols < 200)
{
ratio = 200.f / ori_BPL.cols;
cv::resize(ori_BPL, img_BPL, Size(), ratio, ratio, INTER_CUBIC);
ori_BPL = img_BPL.clone();
}
if ((ori_AL.cols*ori_AL.rows) > 350000)
{
ratio = sqrt((float)(350000) / (float)(ori_AL.cols*ori_AL.rows));
cv::resize(ori_AL, img_AL, Size(), ratio, ratio, INTER_CUBIC);
ori_AL = img_AL.clone();
}
if ((ori_BPL.cols*ori_BPL.rows) > 350000)
{
ratio = sqrt((float)(350000) / (float)(ori_BPL.cols*ori_BPL.rows));
cv::resize(ori_BPL, img_BPL, Size(), ratio, ratio, INTER_CUBIC);
ori_BPL = img_BPL.clone();
}
int maxLateral, minLateral;
maxLateral = max(max(ori_AL.rows, ori_AL.cols), max(ori_BPL.rows, ori_BPL.cols));
minLateral = min(min(ori_AL.rows, ori_AL.cols), min(ori_BPL.rows, ori_BPL.cols));
if (maxLateral > 700 || minLateral < 200)
{
cout << "The sizes of images are not permitted. (One side cannot be larger than 700 or smaller than 200 and the area should not be larger than 350000)" << endl;
waitKey();
return;
}
cur_A_cols = ori_AL.cols;
cur_A_rows = ori_AL.rows;
cur_BP_cols = ori_BPL.cols;
cur_BP_rows = ori_BPL.rows;
if (ori_A_cols != ori_AL.cols)
{
cout << "The input image A has been resized to " << cur_A_cols << " x " << cur_A_rows << ".\n";
}
if (ori_BP_cols != ori_BPL.cols)
{
cout << "The input image B prime has been resized to " << cur_BP_cols << " x " << cur_BP_rows << ".\n";
}
cv::resize(ori_AL, img_AL, Size(), (float)cur_A_cols / ori_AL.cols, (float)cur_A_rows / ori_AL.rows, INTER_CUBIC);
cv::resize(ori_BPL, img_BPL, Size(), (float)cur_BP_cols / ori_BPL.cols, (float)cur_BP_rows / ori_BPL.rows, INTER_CUBIC);
}
void DeepAnalogy::ComputeAnn() {
if (img_BPL.empty()||img_AL.empty())
{
waitKey();
return;
}
const int param_size = 8;
int ann_size_AB, ann_size_BA;//should be assigned later
int *params_host, *params_device_AB, *params_device_BA;
unsigned int *ann_device_AB, *ann_host_AB, *ann_device_BA, *ann_host_BA;
float *annd_device_AB, *annd_host_AB, *annd_device_BA, *annd_host_BA;
char fname[256];
//set parameters
Parameters params;
params.layers.push_back("conv5_1");
params.layers.push_back("conv4_1");
params.layers.push_back("conv3_1");
params.layers.push_back("conv2_1");
params.layers.push_back("conv1_1");
params.layers.push_back("data");
std::vector<float> weight;
weight.push_back(1.0);
switch (weightLevel)
{
case 1:
weight.push_back(0.7);
weight.push_back(0.6);
weight.push_back(0.5);
weight.push_back(0.0);
break;
case 2:
weight.push_back(0.8);
weight.push_back(0.7);
weight.push_back(0.6);
weight.push_back(0.1);
break;
case 3:
weight.push_back(0.9);
weight.push_back(0.8);
weight.push_back(0.7);
weight.push_back(0.2);
break;
default:
weight.push_back(0.9);
weight.push_back(0.8);
weight.push_back(0.7);
weight.push_back(0.2);
break;
}
weight.push_back(0.0);
std::vector<int> sizes;
sizes.push_back(3);
sizes.push_back(3);
sizes.push_back(3);
sizes.push_back(5);
sizes.push_back(5);
sizes.push_back(3);
params.iter = 10;
//scale and enhance
float ratio = resizeRatio;
Mat img_BP, img_A;
cv::resize(img_AL, img_A, Size(), ratio, ratio, INTER_CUBIC);
cv::resize(img_BPL, img_BP, Size(), ratio, ratio, INTER_CUBIC);
std::vector<int> range;
if (img_A.cols > img_A.rows)
{
range.push_back(img_A.cols / 16);
}
else
{
range.push_back(img_A.rows / 16);
}
range.push_back(6);
range.push_back(6);
range.push_back(4);
range.push_back(4);
range.push_back(2);
//load caffe
::google::InitGoogleLogging("deepanalogy");
string model_file = "vgg19/VGG_ILSVRC_19_layers_deploy.prototxt";
string trained_file = "vgg19/VGG_ILSVRC_19_layers.caffemodel";
Classifier classifier_A(path_model + model_file, path_model + trained_file);
Classifier classifier_B(path_model + model_file, path_model + trained_file);
std::vector<float *> data_A, data_AP;
data_A.resize(params.layers.size());
data_AP.resize(params.layers.size());
std::vector<Dim> data_A_size;
data_A_size.resize(params.layers.size());
classifier_A.Predict(img_A, params.layers, data_AP, data_A, data_A_size);
std::vector<float *> data_B, data_BP;
data_B.resize(params.layers.size());
data_BP.resize(params.layers.size());
std::vector<Dim> data_B_size;
data_B_size.resize(params.layers.size());
classifier_B.Predict(img_BP, params.layers, data_B, data_BP, data_B_size);
clock_t start, finish;
double duration;
start = clock();
ann_size_AB = img_AL.cols*img_AL.rows;
ann_size_BA = img_BPL.cols*img_BPL.rows;
params_host = (int *)malloc(param_size * sizeof(int));
ann_host_AB = (unsigned int *)malloc(ann_size_AB * sizeof(unsigned int));
annd_host_AB = (float *)malloc(ann_size_AB * sizeof(float));
ann_host_BA = (unsigned int *)malloc(ann_size_BA * sizeof(unsigned int));
annd_host_BA = (float *)malloc(ann_size_BA * sizeof(float));
hipMalloc(¶ms_device_AB, param_size * sizeof(int));
hipMalloc(¶ms_device_BA, param_size * sizeof(int));
hipMalloc(&ann_device_AB, ann_size_AB * sizeof(unsigned int));
hipMalloc(&annd_device_AB, ann_size_AB * sizeof(float));
hipMalloc(&ann_device_BA, ann_size_BA * sizeof(unsigned int));
hipMalloc(&annd_device_BA, ann_size_BA * sizeof(float));
int numlayer = params.layers.size();
//feature match
for (int curr_layer = 0; curr_layer < numlayer - 1; curr_layer++)//from 32 to 512
{
//set parameters
params_host[0] = data_A_size[curr_layer].channel;//channels
params_host[1] = data_A_size[curr_layer].height;
params_host[2] = data_A_size[curr_layer].width;
params_host[3] = data_B_size[curr_layer].height;
params_host[4] = data_B_size[curr_layer].width;
params_host[5] = sizes[curr_layer];
params_host[6] = params.iter;
params_host[7] = range[curr_layer];
//copy to device
hipMemcpy(params_device_AB, params_host, param_size * sizeof(int), hipMemcpyHostToDevice);
//set parameters
params_host[0] = data_B_size[curr_layer].channel;//channels
params_host[1] = data_B_size[curr_layer].height;
params_host[2] = data_B_size[curr_layer].width;
params_host[3] = data_A_size[curr_layer].height;
params_host[4] = data_A_size[curr_layer].width;
//copy to device
hipMemcpy(params_device_BA, params_host, param_size * sizeof(int), hipMemcpyHostToDevice);
////set device pa, device pb, device ann and device annd
dim3 blocksPerGridAB(data_A_size[curr_layer].width / 20 + 1, data_A_size[curr_layer].height / 20 + 1, 1);
dim3 threadsPerBlockAB(20, 20, 1);
ann_size_AB = data_A_size[curr_layer].width* data_A_size[curr_layer].height;
dim3 blocksPerGridBA(data_B_size[curr_layer].width / 20 + 1, data_B_size[curr_layer].height / 20 + 1, 1);
dim3 threadsPerBlockBA(20, 20, 1);
ann_size_BA = data_B_size[curr_layer].width* data_B_size[curr_layer].height;
//initialize ann if needed
if (curr_layer == 0)//initialize, rows and cols both less than 32, just use one block
{
initialAnn_kernel << <blocksPerGridAB, threadsPerBlockAB >> >(ann_device_AB, params_device_AB);
initialAnn_kernel << <blocksPerGridBA, threadsPerBlockBA >> >(ann_device_BA, params_device_BA);
}
else {//upsampling, notice this block's dimension is twice the ann at this point
unsigned int * ann_tmp;
hipMalloc(&ann_tmp, ann_size_AB * sizeof(unsigned int));
upSample_kernel << <blocksPerGridAB, threadsPerBlockAB >> >(ann_device_AB, ann_tmp, params_device_AB,
data_A_size[curr_layer - 1].width, data_A_size[curr_layer - 1].height);//get new ann_device
hipMemcpy(ann_device_AB, ann_tmp, ann_size_AB * sizeof(unsigned int), hipMemcpyDeviceToDevice);
hipFree(ann_tmp);
hipMalloc(&ann_tmp, ann_size_BA * sizeof(unsigned int));
upSample_kernel << <blocksPerGridBA, threadsPerBlockBA >> >(ann_device_BA, ann_tmp, params_device_BA,
data_B_size[curr_layer - 1].width, data_B_size[curr_layer - 1].height);//get new ann_device
hipMemcpy(ann_device_BA, ann_tmp, ann_size_BA * sizeof(unsigned int), hipMemcpyDeviceToDevice);
hipFree(ann_tmp);
}
//normarlize two data
float *Ndata_A, *Ndata_AP, *Ndata_B, *Ndata_BP;
float *response_A, *response_BP;
hipMalloc(&Ndata_A, data_A_size[curr_layer].channel*data_A_size[curr_layer].width*data_A_size[curr_layer].height*sizeof(float));
hipMalloc(&Ndata_AP, data_A_size[curr_layer].channel*data_A_size[curr_layer].width*data_A_size[curr_layer].height*sizeof(float));
hipMalloc(&response_A, data_A_size[curr_layer].width*data_A_size[curr_layer].height*sizeof(float));
hipMalloc(&Ndata_B, data_B_size[curr_layer].channel*data_B_size[curr_layer].width*data_B_size[curr_layer].height*sizeof(float));
hipMalloc(&Ndata_BP, data_B_size[curr_layer].channel*data_B_size[curr_layer].width*data_B_size[curr_layer].height*sizeof(float));
hipMalloc(&response_BP, data_B_size[curr_layer].width*data_B_size[curr_layer].height*sizeof(float));
norm(Ndata_A, data_A[curr_layer], response_A, data_A_size[curr_layer]);
norm(Ndata_BP, data_BP[curr_layer], response_BP, data_B_size[curr_layer]);
Mat temp1, temp2;
cv::resize(img_AL, temp1, cv::Size(data_A_size[curr_layer].width, data_A_size[curr_layer].height));
cv::resize(img_BPL, temp2, cv::Size(data_B_size[curr_layer].width, data_B_size[curr_layer].height));
Mat response1, response2;
response1 = Mat(temp1.size(), CV_32FC1);
response2 = Mat(temp2.size(), CV_32FC1);
hipMemcpy(response1.data, response_A, data_A_size[curr_layer].width*data_A_size[curr_layer].height*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(response2.data, response_BP, data_B_size[curr_layer].width*data_B_size[curr_layer].height*sizeof(float), hipMemcpyDeviceToHost);
Mat response_byte1, response_byte2;
response1.convertTo(response_byte1, CV_8UC1, 255);
response2.convertTo(response_byte2, CV_8UC1, 255);
blend << <blocksPerGridAB, threadsPerBlockAB >> >(response_A, data_A[curr_layer], data_AP[curr_layer], weight[curr_layer], params_device_AB);
blend << <blocksPerGridBA, threadsPerBlockBA >> >(response_BP, data_BP[curr_layer], data_B[curr_layer], weight[curr_layer], params_device_BA);
norm(Ndata_AP, data_AP[curr_layer], NULL, data_A_size[curr_layer]);
norm(Ndata_B, data_B[curr_layer], NULL, data_B_size[curr_layer]);
//patchmatch
cout << "Finding nearest neighbor field using PatchMatch Algorithm at layer:" << params.layers[curr_layer] << ".\n";
patchmatch << <blocksPerGridAB, threadsPerBlockAB >> >(Ndata_AP, Ndata_BP, Ndata_A, Ndata_B, ann_device_AB, annd_device_AB, params_device_AB);
patchmatch << <blocksPerGridBA, threadsPerBlockBA >> >(Ndata_B, Ndata_A, Ndata_BP, Ndata_AP, ann_device_BA, annd_device_BA, params_device_BA);
hipFree(Ndata_A);
hipFree(Ndata_AP);
hipFree(Ndata_B);
hipFree(Ndata_BP);
hipFree(response_A);
hipFree(response_BP);
//deconv
if (curr_layer < numlayer - 2)
{
int next_layer = curr_layer + 2;
/***************upsample***********************/
// for better deconvolution
params_host[0] = data_A_size[next_layer].channel;//channels
params_host[1] = data_A_size[next_layer].height;
params_host[2] = data_A_size[next_layer].width;
params_host[3] = data_B_size[next_layer].height;
params_host[4] = data_B_size[next_layer].width;
params_host[5] = sizes[next_layer];
params_host[6] = params.iter;
params_host[7] = range[next_layer];
//copy to device
hipMemcpy(params_device_AB, params_host, param_size * sizeof(int), hipMemcpyHostToDevice);
//set parameters
params_host[0] = data_B_size[next_layer].channel;//channels
params_host[1] = data_B_size[next_layer].height;
params_host[2] = data_B_size[next_layer].width;
params_host[3] = data_A_size[next_layer].height;
params_host[4] = data_A_size[next_layer].width;
//copy to device
hipMemcpy(params_device_BA, params_host, param_size * sizeof(int), hipMemcpyHostToDevice);
////set device pa, device pb, device ann and device annd
dim3 blocksPerGridAB(data_A_size[next_layer].width / 20 + 1, data_A_size[next_layer].height / 20 + 1, 1);
dim3 threadsPerBlockAB(20, 20, 1);
ann_size_AB = data_A_size[next_layer].width* data_A_size[next_layer].height;
dim3 blocksPerGridSC(data_B_size[next_layer].width / 20 + 1, data_B_size[next_layer].height / 20 + 1, 1);
dim3 threadsPerBlockBA(20, 20, 1);
ann_size_BA = data_B_size[next_layer].width* data_B_size[next_layer].height;
unsigned int * ann_tmp;
hipMalloc(&ann_tmp, ann_size_AB * sizeof(unsigned int));
upSample_kernel << <blocksPerGridAB, threadsPerBlockAB >> >(ann_device_AB, ann_tmp, params_device_AB,
data_A_size[curr_layer].width, data_A_size[curr_layer].height);//get new ann_device
avg_vote << <blocksPerGridAB, threadsPerBlockAB >> >(ann_tmp, data_BP[next_layer], data_AP[next_layer], params_device_AB);
hipFree(ann_tmp);
hipMalloc(&ann_tmp, ann_size_BA * sizeof(unsigned int));
upSample_kernel << <blocksPerGridBA, threadsPerBlockBA >> >(ann_device_BA, ann_tmp, params_device_BA,
data_B_size[curr_layer].width, data_B_size[curr_layer].height);//get new ann_devices
avg_vote << <blocksPerGridBA, threadsPerBlockBA >> >(ann_tmp, data_A[next_layer], data_B[next_layer], params_device_BA);
hipFree(ann_tmp);
/***********************************************/
//set parameters
params_host[0] = data_A_size[curr_layer].channel;//channels
params_host[1] = data_A_size[curr_layer].height;
params_host[2] = data_A_size[curr_layer].width;
params_host[3] = data_B_size[curr_layer].height;
params_host[4] = data_B_size[curr_layer].width;
params_host[5] = sizes[curr_layer];
params_host[6] = params.iter;
params_host[7] = range[curr_layer];
//copy to device
hipMemcpy(params_device_AB, params_host, param_size * sizeof(int), hipMemcpyHostToDevice);
//set parameters
params_host[0] = data_B_size[curr_layer].channel;//channels
params_host[1] = data_B_size[curr_layer].height;
params_host[2] = data_B_size[curr_layer].width;
params_host[3] = data_A_size[curr_layer].height;
params_host[4] = data_A_size[curr_layer].width;
//copy to device
hipMemcpy(params_device_BA, params_host, param_size * sizeof(int), hipMemcpyHostToDevice);
////set device pa, device pb, device ann and device annd
blocksPerGridAB = dim3(data_A_size[curr_layer].width / 20 + 1, data_A_size[curr_layer].height / 20 + 1, 1);
threadsPerBlockAB = dim3(20, 20, 1);
ann_size_AB = data_A_size[curr_layer].width* data_A_size[curr_layer].height;
blocksPerGridBA = dim3(data_B_size[curr_layer].width / 20 + 1, data_B_size[curr_layer].height / 20 + 1, 1);
threadsPerBlockBA = dim3(20, 20, 1);
ann_size_BA = data_B_size[curr_layer].width* data_B_size[curr_layer].height;
int num1 = data_A_size[curr_layer].channel*data_A_size[curr_layer].width*data_A_size[curr_layer].height;
int num2 = data_A_size[next_layer].channel*data_A_size[next_layer].width*data_A_size[next_layer].height;
float *target;
hipMalloc(&target, num1 * sizeof(float));
avg_vote << <blocksPerGridAB, threadsPerBlockAB >> >(ann_device_AB, data_BP[curr_layer], target, params_device_AB);
deconv(&classifier_A, params.layers[curr_layer], target, data_A_size[curr_layer], params.layers[next_layer], data_AP[next_layer], data_A_size[next_layer]);
hipFree(target);
num1 = data_B_size[curr_layer].channel*data_B_size[curr_layer].width*data_B_size[curr_layer].height;
num2 = data_B_size[next_layer].channel*data_B_size[next_layer].width*data_B_size[next_layer].height;
hipMalloc(&target, num1 * sizeof(float));
avg_vote << <blocksPerGridBA, threadsPerBlockBA >> >(ann_device_BA, data_A[curr_layer], target, params_device_BA);
deconv(&classifier_B, params.layers[curr_layer], target, data_B_size[curr_layer], params.layers[next_layer], data_B[next_layer], data_B_size[next_layer]);
hipFree(target);
}
}
//upsample
int curr_layer = numlayer - 1;
{
//set parameters
params_host[0] = 3;//channels
params_host[1] = img_AL.rows;
params_host[2] = img_AL.cols;
params_host[3] = img_BPL.rows;
params_host[4] = img_BPL.cols;
params_host[5] = sizes[curr_layer];
params_host[6] = params.iter;
params_host[7] = range[curr_layer];
//copy to device
hipMemcpy(params_device_AB, params_host, param_size * sizeof(int), hipMemcpyHostToDevice);
//set parameters
params_host[0] = 3;//channels
params_host[1] = img_BPL.rows;
params_host[2] = img_BPL.cols;
params_host[3] = img_AL.rows;
params_host[4] = img_AL.cols;
//copy to device
hipMemcpy(params_device_BA, params_host, param_size * sizeof(int), hipMemcpyHostToDevice);
////set device pa, device pb, device ann and device annd
dim3 blocksPerGridAB(img_AL.cols / 20 + 1, img_AL.rows / 20 + 1, 1);
dim3 threadsPerBlockAB(20, 20, 1);
ann_size_AB = img_AL.cols* img_AL.rows;
dim3 blocksPerGridBA(img_BPL.cols / 20 + 1, img_BPL.rows / 20 + 1, 1);
dim3 threadsPerBlockBA(20, 20, 1);
ann_size_BA = img_BPL.rows* img_BPL.cols;
//updample
unsigned int * ann_tmp;
hipMalloc(&ann_tmp, ann_size_AB * sizeof(unsigned int));
upSample_kernel << <blocksPerGridAB, threadsPerBlockAB >> >(ann_device_AB, ann_tmp, params_device_AB,
data_A_size[curr_layer - 1].width, data_A_size[curr_layer - 1].height);//get new ann_device
hipMemcpy(ann_device_AB, ann_tmp, ann_size_AB * sizeof(unsigned int), hipMemcpyDeviceToDevice);
hipFree(ann_tmp);
hipMalloc(&ann_tmp, ann_size_BA * sizeof(unsigned int));
upSample_kernel << <blocksPerGridBA, threadsPerBlockBA >> >(ann_device_BA, ann_tmp, params_device_BA,
data_B_size[curr_layer - 1].width, data_B_size[curr_layer - 1].height);//get new ann_device
hipMemcpy(ann_device_BA, ann_tmp, ann_size_BA * sizeof(unsigned int), hipMemcpyDeviceToDevice);
hipFree(ann_tmp);
hipMemcpy(ann_host_AB, ann_device_AB, ann_size_AB * sizeof(unsigned int), hipMemcpyDeviceToHost);
hipMemcpy(ann_host_BA, ann_device_BA, ann_size_BA * sizeof(unsigned int), hipMemcpyDeviceToHost);
//free space in device, only need to free pa and pb which are created temporarily
//image downBAale
Mat flow, result_AB, result_BA, err, out, normal;
flow = reconstruct_dflow(img_AL, img_BPL, ann_host_AB, sizes[curr_layer]);
result_AB = reconstruct_avg(img_AL, img_BPL, ann_host_AB, sizes[curr_layer]);
cv::resize(result_AB, out, Size(), (float)ori_A_cols / cur_A_cols, (float)ori_A_rows / cur_A_rows, INTER_CUBIC);
sprintf(fname, "resultAB.png");
imwrite(path_output + fname, out);
flow = reconstruct_dflow(img_BPL, img_AL, ann_host_BA, sizes[curr_layer]);
result_BA = reconstruct_avg(img_BPL, img_AL, ann_host_BA, sizes[curr_layer]);
cv::resize(result_BA, out, Size(), (float)ori_BP_cols / cur_BP_cols, (float)ori_BP_rows / cur_BP_rows, INTER_CUBIC);
sprintf(fname, "resultBA.png");
imwrite(path_output + fname, out);
if (photoTransfer)
{
cout << "Refining photo transfer." << endl;
Mat filtered_AB, filtered_BA, filtered_A, filtered_B, refine_AB, refine_BA;
Mat origin_A, origin_B, res_AB, res_BA;
img_AL.convertTo(origin_A, CV_32FC3, 1/255.0);
img_BPL.convertTo(origin_B, CV_32FC3, 1 / 255.0);
result_AB.convertTo(res_AB, CV_32FC3, 1 / 255.0);
result_BA.convertTo(res_BA, CV_32FC3, 1 / 255.0);
WeightedLeastSquare(filtered_AB, origin_A, res_AB);
WeightedLeastSquare(filtered_BA, origin_B, res_BA);
WeightedLeastSquare(filtered_A, origin_A, origin_A);
WeightedLeastSquare(filtered_B, origin_B, origin_B);
refine_AB = origin_A + filtered_AB - filtered_A;
refine_BA = origin_B + filtered_BA - filtered_B;
sprintf(fname, "refineAB.png");
refine_AB.convertTo(normal, CV_32FC3, 255.0);
cv::resize(normal, out, Size(), (float)ori_A_cols / cur_A_cols, (float)ori_A_rows / cur_A_rows, INTER_CUBIC);
imwrite(path_output + fname, out);
sprintf(fname, "refineBA.png");
refine_BA.convertTo(normal, CV_32FC3, 255.0);
cv::resize(normal, out, Size(), (float)ori_BP_cols / cur_BP_cols, (float)ori_BP_rows / cur_BP_rows, INTER_CUBIC);
imwrite(path_output + fname, out);
}
}
cout << "Saving flow result." << "\n";
//save ann
{
ofstream output1;
char fname[256];
sprintf(fname, "flowAB.txt");
output1.open(path_output + fname);
for (int y = 0; y < img_AL.rows; y++)
for (int x = 0; x < img_AL.cols; x++)
{
unsigned int v = ann_host_AB[y*img_AL.cols + x];
int xbest = INT_TO_X(v);
int ybest = INT_TO_Y(v);
output1 << xbest - x << " " << ybest - y << endl;
}
output1.close();
ofstream output2;
sprintf(fname, "flowBA.txt");
output2.open(path_output + fname);
for (int y = 0; y < img_BPL.rows; y++){
for (int x = 0; x < img_BPL.cols; x++)
{
unsigned int v = ann_host_BA[y*img_BPL.cols + x];
int xbest = INT_TO_X(v);
int ybest = INT_TO_Y(v);
output2 << xbest - x << " " << ybest - y << endl;
}
}
output2.close();
}
hipFree(params_device_AB);
hipFree(ann_device_AB);
hipFree(annd_device_AB);
hipFree(params_device_BA);
hipFree(ann_device_BA);
hipFree(annd_device_BA);
free(ann_host_AB);
free(annd_host_AB);
free(ann_host_BA);
free(annd_host_BA);
free(params_host);
for (int i = 0; i < numlayer; i++)
{
hipFree(data_A[i]);
hipFree(data_BP[i]);
}
finish = clock();
duration = (double)(finish - start) / CLOCKS_PER_SEC;
cout << "Finished finding ann. Time : " << duration << endl;
google::ShutdownGoogleLogging();
classifier_A.DeleteNet();
classifier_B.DeleteNet();
}
|
0b11c27a1b4dee2959644e04dcec0e698085e9a5.cu
|
#include "caffe/util/math_functions.hpp"
#include "caffe/common.hpp"
#include "GeneralizedPatchMatch.cuh"
#include "DeepAnalogy.cuh"
#include "WLS.h"
#include "Deconv.h"
struct Parameters
{
std::vector<std::string> layers; //which layers used as content
int patch_size0;
int iter;
};
__host__ void norm(float* &dst, float* src, float* smooth, Dim dim){
int count = dim.channel*dim.height*dim.width;
float* x = src;
float* x2;
cudaMalloc(&x2, count*sizeof(float));
caffe_gpu_mul(count, x, x, x2);
//caculate dis
float*sum;
float* ones;
cudaMalloc(&sum, dim.height*dim.width*sizeof(float));
cudaMalloc(&ones, dim.channel*sizeof(float));
caffe_gpu_set(dim.channel, 1.0f, ones);
caffe_gpu_gemv(CblasTrans, dim.channel, dim.height*dim.width, 1.0f, x2, ones, 0.0f, sum);
float *dis;
cudaMalloc(&dis, dim.height*dim.width*sizeof(float));
caffe_gpu_powx(dim.height*dim.width, sum, 0.5f, dis);
if (smooth != NULL)
{
cudaMemcpy(smooth, sum, dim.height*dim.width*sizeof(float), cudaMemcpyDeviceToDevice);
int index;
float minv, maxv;
cublasIsamin(Caffe::cublas_handle(), dim.height*dim.width, sum, 1, &index);
cudaMemcpy(&minv, sum + index - 1, sizeof(float), cudaMemcpyDeviceToHost);
cublasIsamax(Caffe::cublas_handle(), dim.height*dim.width, sum, 1, &index);
cudaMemcpy(&maxv, sum + index - 1, sizeof(float), cudaMemcpyDeviceToHost);
caffe_gpu_add_scalar(dim.height*dim.width, -minv, smooth);
caffe_gpu_scal(dim.height*dim.width, 1.0f / (maxv - minv), smooth);
}
//norm
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, dim.channel, dim.width*dim.height, 1, 1.0f, ones, dis, 0.0f, x2);
caffe_gpu_div(count, src, x2, dst);
cudaFree(x2);
cudaFree(ones);
cudaFree(dis);
cudaFree(sum);
}
DeepAnalogy::DeepAnalogy(){
resizeRatio = 1;
weightLevel = 3;
photoTransfer = false;
file_A = "";
file_BP = "";
path_output = "";
path_model = "";
}
DeepAnalogy::~DeepAnalogy(){
}
void DeepAnalogy::SetRatio(float ratio){
resizeRatio = ratio;
}
void DeepAnalogy::SetBlendWeight(int level){
weightLevel = level;
}
void DeepAnalogy::UsePhotoTransfer(bool flag){
photoTransfer = flag;
}
void DeepAnalogy::SetModel(string path){
path_model =path;
}
void DeepAnalogy::SetA(string f_a){
file_A = f_a;
}
void DeepAnalogy::SetBPrime(string f_bp){
file_BP = f_bp;
}
void DeepAnalogy::SetOutputDir(string f_o){
path_output = f_o;
}
void DeepAnalogy::SetGPU(int no){
cudaSetDevice(no);
}
void DeepAnalogy::LoadInputs(){
float ratio;
Mat ori_AL = imread(file_A);
Mat ori_BPL = imread(file_BP);
if (ori_AL.empty() || ori_BPL.empty())
{
cout << "image cannot read!" << endl;
waitKey();
return;
}
ori_A_cols = ori_AL.cols;
ori_A_rows = ori_AL.rows;
ori_BP_cols = ori_BPL.cols;
ori_BP_rows = ori_BPL.rows;
if (ori_AL.rows > 700)
{
ratio = 700.f / ori_AL.rows;
cv::resize(ori_AL, img_AL, Size(), ratio, ratio, INTER_CUBIC);
ori_AL = img_AL.clone();
}
if (ori_AL.cols > 700)
{
ratio = 700.f / ori_AL.cols;
cv::resize(ori_AL, img_AL, Size(), ratio, ratio, INTER_CUBIC);
ori_AL = img_AL.clone();
}
if (ori_AL.rows < 200)
{
ratio = 200.f / ori_AL.rows;
cv::resize(ori_AL, img_AL, Size(), ratio, ratio, INTER_CUBIC);
ori_AL = img_AL.clone();
}
if (ori_AL.cols < 200)
{
ratio = 200.f / ori_AL.cols;
cv::resize(ori_AL, img_AL, Size(), ratio, ratio, INTER_CUBIC);
ori_AL = img_AL.clone();
}
if (ori_BPL.rows > 700)
{
ratio = 700.f / ori_BPL.rows;
cv::resize(ori_BPL, img_BPL, Size(), ratio, ratio, INTER_CUBIC);
ori_BPL = img_BPL.clone();
}
if (ori_BPL.cols > 700)
{
ratio = 700.f / ori_BPL.cols;
cv::resize(ori_BPL, img_BPL, Size(), ratio, ratio, INTER_CUBIC);
ori_BPL = img_BPL.clone();
}
if (ori_BPL.rows < 200)
{
ratio = 200.f / ori_BPL.rows;
cv::resize(ori_BPL, img_BPL, Size(), ratio, ratio, INTER_CUBIC);
ori_BPL = img_BPL.clone();
}
if (ori_BPL.cols < 200)
{
ratio = 200.f / ori_BPL.cols;
cv::resize(ori_BPL, img_BPL, Size(), ratio, ratio, INTER_CUBIC);
ori_BPL = img_BPL.clone();
}
if ((ori_AL.cols*ori_AL.rows) > 350000)
{
ratio = sqrt((float)(350000) / (float)(ori_AL.cols*ori_AL.rows));
cv::resize(ori_AL, img_AL, Size(), ratio, ratio, INTER_CUBIC);
ori_AL = img_AL.clone();
}
if ((ori_BPL.cols*ori_BPL.rows) > 350000)
{
ratio = sqrt((float)(350000) / (float)(ori_BPL.cols*ori_BPL.rows));
cv::resize(ori_BPL, img_BPL, Size(), ratio, ratio, INTER_CUBIC);
ori_BPL = img_BPL.clone();
}
int maxLateral, minLateral;
maxLateral = max(max(ori_AL.rows, ori_AL.cols), max(ori_BPL.rows, ori_BPL.cols));
minLateral = min(min(ori_AL.rows, ori_AL.cols), min(ori_BPL.rows, ori_BPL.cols));
if (maxLateral > 700 || minLateral < 200)
{
cout << "The sizes of images are not permitted. (One side cannot be larger than 700 or smaller than 200 and the area should not be larger than 350000)" << endl;
waitKey();
return;
}
cur_A_cols = ori_AL.cols;
cur_A_rows = ori_AL.rows;
cur_BP_cols = ori_BPL.cols;
cur_BP_rows = ori_BPL.rows;
if (ori_A_cols != ori_AL.cols)
{
cout << "The input image A has been resized to " << cur_A_cols << " x " << cur_A_rows << ".\n";
}
if (ori_BP_cols != ori_BPL.cols)
{
cout << "The input image B prime has been resized to " << cur_BP_cols << " x " << cur_BP_rows << ".\n";
}
cv::resize(ori_AL, img_AL, Size(), (float)cur_A_cols / ori_AL.cols, (float)cur_A_rows / ori_AL.rows, INTER_CUBIC);
cv::resize(ori_BPL, img_BPL, Size(), (float)cur_BP_cols / ori_BPL.cols, (float)cur_BP_rows / ori_BPL.rows, INTER_CUBIC);
}
void DeepAnalogy::ComputeAnn() {
if (img_BPL.empty()||img_AL.empty())
{
waitKey();
return;
}
const int param_size = 8;
int ann_size_AB, ann_size_BA;//should be assigned later
int *params_host, *params_device_AB, *params_device_BA;
unsigned int *ann_device_AB, *ann_host_AB, *ann_device_BA, *ann_host_BA;
float *annd_device_AB, *annd_host_AB, *annd_device_BA, *annd_host_BA;
char fname[256];
//set parameters
Parameters params;
params.layers.push_back("conv5_1");
params.layers.push_back("conv4_1");
params.layers.push_back("conv3_1");
params.layers.push_back("conv2_1");
params.layers.push_back("conv1_1");
params.layers.push_back("data");
std::vector<float> weight;
weight.push_back(1.0);
switch (weightLevel)
{
case 1:
weight.push_back(0.7);
weight.push_back(0.6);
weight.push_back(0.5);
weight.push_back(0.0);
break;
case 2:
weight.push_back(0.8);
weight.push_back(0.7);
weight.push_back(0.6);
weight.push_back(0.1);
break;
case 3:
weight.push_back(0.9);
weight.push_back(0.8);
weight.push_back(0.7);
weight.push_back(0.2);
break;
default:
weight.push_back(0.9);
weight.push_back(0.8);
weight.push_back(0.7);
weight.push_back(0.2);
break;
}
weight.push_back(0.0);
std::vector<int> sizes;
sizes.push_back(3);
sizes.push_back(3);
sizes.push_back(3);
sizes.push_back(5);
sizes.push_back(5);
sizes.push_back(3);
params.iter = 10;
//scale and enhance
float ratio = resizeRatio;
Mat img_BP, img_A;
cv::resize(img_AL, img_A, Size(), ratio, ratio, INTER_CUBIC);
cv::resize(img_BPL, img_BP, Size(), ratio, ratio, INTER_CUBIC);
std::vector<int> range;
if (img_A.cols > img_A.rows)
{
range.push_back(img_A.cols / 16);
}
else
{
range.push_back(img_A.rows / 16);
}
range.push_back(6);
range.push_back(6);
range.push_back(4);
range.push_back(4);
range.push_back(2);
//load caffe
::google::InitGoogleLogging("deepanalogy");
string model_file = "vgg19/VGG_ILSVRC_19_layers_deploy.prototxt";
string trained_file = "vgg19/VGG_ILSVRC_19_layers.caffemodel";
Classifier classifier_A(path_model + model_file, path_model + trained_file);
Classifier classifier_B(path_model + model_file, path_model + trained_file);
std::vector<float *> data_A, data_AP;
data_A.resize(params.layers.size());
data_AP.resize(params.layers.size());
std::vector<Dim> data_A_size;
data_A_size.resize(params.layers.size());
classifier_A.Predict(img_A, params.layers, data_AP, data_A, data_A_size);
std::vector<float *> data_B, data_BP;
data_B.resize(params.layers.size());
data_BP.resize(params.layers.size());
std::vector<Dim> data_B_size;
data_B_size.resize(params.layers.size());
classifier_B.Predict(img_BP, params.layers, data_B, data_BP, data_B_size);
clock_t start, finish;
double duration;
start = clock();
ann_size_AB = img_AL.cols*img_AL.rows;
ann_size_BA = img_BPL.cols*img_BPL.rows;
params_host = (int *)malloc(param_size * sizeof(int));
ann_host_AB = (unsigned int *)malloc(ann_size_AB * sizeof(unsigned int));
annd_host_AB = (float *)malloc(ann_size_AB * sizeof(float));
ann_host_BA = (unsigned int *)malloc(ann_size_BA * sizeof(unsigned int));
annd_host_BA = (float *)malloc(ann_size_BA * sizeof(float));
cudaMalloc(¶ms_device_AB, param_size * sizeof(int));
cudaMalloc(¶ms_device_BA, param_size * sizeof(int));
cudaMalloc(&ann_device_AB, ann_size_AB * sizeof(unsigned int));
cudaMalloc(&annd_device_AB, ann_size_AB * sizeof(float));
cudaMalloc(&ann_device_BA, ann_size_BA * sizeof(unsigned int));
cudaMalloc(&annd_device_BA, ann_size_BA * sizeof(float));
int numlayer = params.layers.size();
//feature match
for (int curr_layer = 0; curr_layer < numlayer - 1; curr_layer++)//from 32 to 512
{
//set parameters
params_host[0] = data_A_size[curr_layer].channel;//channels
params_host[1] = data_A_size[curr_layer].height;
params_host[2] = data_A_size[curr_layer].width;
params_host[3] = data_B_size[curr_layer].height;
params_host[4] = data_B_size[curr_layer].width;
params_host[5] = sizes[curr_layer];
params_host[6] = params.iter;
params_host[7] = range[curr_layer];
//copy to device
cudaMemcpy(params_device_AB, params_host, param_size * sizeof(int), cudaMemcpyHostToDevice);
//set parameters
params_host[0] = data_B_size[curr_layer].channel;//channels
params_host[1] = data_B_size[curr_layer].height;
params_host[2] = data_B_size[curr_layer].width;
params_host[3] = data_A_size[curr_layer].height;
params_host[4] = data_A_size[curr_layer].width;
//copy to device
cudaMemcpy(params_device_BA, params_host, param_size * sizeof(int), cudaMemcpyHostToDevice);
////set device pa, device pb, device ann and device annd
dim3 blocksPerGridAB(data_A_size[curr_layer].width / 20 + 1, data_A_size[curr_layer].height / 20 + 1, 1);
dim3 threadsPerBlockAB(20, 20, 1);
ann_size_AB = data_A_size[curr_layer].width* data_A_size[curr_layer].height;
dim3 blocksPerGridBA(data_B_size[curr_layer].width / 20 + 1, data_B_size[curr_layer].height / 20 + 1, 1);
dim3 threadsPerBlockBA(20, 20, 1);
ann_size_BA = data_B_size[curr_layer].width* data_B_size[curr_layer].height;
//initialize ann if needed
if (curr_layer == 0)//initialize, rows and cols both less than 32, just use one block
{
initialAnn_kernel << <blocksPerGridAB, threadsPerBlockAB >> >(ann_device_AB, params_device_AB);
initialAnn_kernel << <blocksPerGridBA, threadsPerBlockBA >> >(ann_device_BA, params_device_BA);
}
else {//upsampling, notice this block's dimension is twice the ann at this point
unsigned int * ann_tmp;
cudaMalloc(&ann_tmp, ann_size_AB * sizeof(unsigned int));
upSample_kernel << <blocksPerGridAB, threadsPerBlockAB >> >(ann_device_AB, ann_tmp, params_device_AB,
data_A_size[curr_layer - 1].width, data_A_size[curr_layer - 1].height);//get new ann_device
cudaMemcpy(ann_device_AB, ann_tmp, ann_size_AB * sizeof(unsigned int), cudaMemcpyDeviceToDevice);
cudaFree(ann_tmp);
cudaMalloc(&ann_tmp, ann_size_BA * sizeof(unsigned int));
upSample_kernel << <blocksPerGridBA, threadsPerBlockBA >> >(ann_device_BA, ann_tmp, params_device_BA,
data_B_size[curr_layer - 1].width, data_B_size[curr_layer - 1].height);//get new ann_device
cudaMemcpy(ann_device_BA, ann_tmp, ann_size_BA * sizeof(unsigned int), cudaMemcpyDeviceToDevice);
cudaFree(ann_tmp);
}
//normarlize two data
float *Ndata_A, *Ndata_AP, *Ndata_B, *Ndata_BP;
float *response_A, *response_BP;
cudaMalloc(&Ndata_A, data_A_size[curr_layer].channel*data_A_size[curr_layer].width*data_A_size[curr_layer].height*sizeof(float));
cudaMalloc(&Ndata_AP, data_A_size[curr_layer].channel*data_A_size[curr_layer].width*data_A_size[curr_layer].height*sizeof(float));
cudaMalloc(&response_A, data_A_size[curr_layer].width*data_A_size[curr_layer].height*sizeof(float));
cudaMalloc(&Ndata_B, data_B_size[curr_layer].channel*data_B_size[curr_layer].width*data_B_size[curr_layer].height*sizeof(float));
cudaMalloc(&Ndata_BP, data_B_size[curr_layer].channel*data_B_size[curr_layer].width*data_B_size[curr_layer].height*sizeof(float));
cudaMalloc(&response_BP, data_B_size[curr_layer].width*data_B_size[curr_layer].height*sizeof(float));
norm(Ndata_A, data_A[curr_layer], response_A, data_A_size[curr_layer]);
norm(Ndata_BP, data_BP[curr_layer], response_BP, data_B_size[curr_layer]);
Mat temp1, temp2;
cv::resize(img_AL, temp1, cv::Size(data_A_size[curr_layer].width, data_A_size[curr_layer].height));
cv::resize(img_BPL, temp2, cv::Size(data_B_size[curr_layer].width, data_B_size[curr_layer].height));
Mat response1, response2;
response1 = Mat(temp1.size(), CV_32FC1);
response2 = Mat(temp2.size(), CV_32FC1);
cudaMemcpy(response1.data, response_A, data_A_size[curr_layer].width*data_A_size[curr_layer].height*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(response2.data, response_BP, data_B_size[curr_layer].width*data_B_size[curr_layer].height*sizeof(float), cudaMemcpyDeviceToHost);
Mat response_byte1, response_byte2;
response1.convertTo(response_byte1, CV_8UC1, 255);
response2.convertTo(response_byte2, CV_8UC1, 255);
blend << <blocksPerGridAB, threadsPerBlockAB >> >(response_A, data_A[curr_layer], data_AP[curr_layer], weight[curr_layer], params_device_AB);
blend << <blocksPerGridBA, threadsPerBlockBA >> >(response_BP, data_BP[curr_layer], data_B[curr_layer], weight[curr_layer], params_device_BA);
norm(Ndata_AP, data_AP[curr_layer], NULL, data_A_size[curr_layer]);
norm(Ndata_B, data_B[curr_layer], NULL, data_B_size[curr_layer]);
//patchmatch
cout << "Finding nearest neighbor field using PatchMatch Algorithm at layer:" << params.layers[curr_layer] << ".\n";
patchmatch << <blocksPerGridAB, threadsPerBlockAB >> >(Ndata_AP, Ndata_BP, Ndata_A, Ndata_B, ann_device_AB, annd_device_AB, params_device_AB);
patchmatch << <blocksPerGridBA, threadsPerBlockBA >> >(Ndata_B, Ndata_A, Ndata_BP, Ndata_AP, ann_device_BA, annd_device_BA, params_device_BA);
cudaFree(Ndata_A);
cudaFree(Ndata_AP);
cudaFree(Ndata_B);
cudaFree(Ndata_BP);
cudaFree(response_A);
cudaFree(response_BP);
//deconv
if (curr_layer < numlayer - 2)
{
int next_layer = curr_layer + 2;
/***************upsample***********************/
// for better deconvolution
params_host[0] = data_A_size[next_layer].channel;//channels
params_host[1] = data_A_size[next_layer].height;
params_host[2] = data_A_size[next_layer].width;
params_host[3] = data_B_size[next_layer].height;
params_host[4] = data_B_size[next_layer].width;
params_host[5] = sizes[next_layer];
params_host[6] = params.iter;
params_host[7] = range[next_layer];
//copy to device
cudaMemcpy(params_device_AB, params_host, param_size * sizeof(int), cudaMemcpyHostToDevice);
//set parameters
params_host[0] = data_B_size[next_layer].channel;//channels
params_host[1] = data_B_size[next_layer].height;
params_host[2] = data_B_size[next_layer].width;
params_host[3] = data_A_size[next_layer].height;
params_host[4] = data_A_size[next_layer].width;
//copy to device
cudaMemcpy(params_device_BA, params_host, param_size * sizeof(int), cudaMemcpyHostToDevice);
////set device pa, device pb, device ann and device annd
dim3 blocksPerGridAB(data_A_size[next_layer].width / 20 + 1, data_A_size[next_layer].height / 20 + 1, 1);
dim3 threadsPerBlockAB(20, 20, 1);
ann_size_AB = data_A_size[next_layer].width* data_A_size[next_layer].height;
dim3 blocksPerGridSC(data_B_size[next_layer].width / 20 + 1, data_B_size[next_layer].height / 20 + 1, 1);
dim3 threadsPerBlockBA(20, 20, 1);
ann_size_BA = data_B_size[next_layer].width* data_B_size[next_layer].height;
unsigned int * ann_tmp;
cudaMalloc(&ann_tmp, ann_size_AB * sizeof(unsigned int));
upSample_kernel << <blocksPerGridAB, threadsPerBlockAB >> >(ann_device_AB, ann_tmp, params_device_AB,
data_A_size[curr_layer].width, data_A_size[curr_layer].height);//get new ann_device
avg_vote << <blocksPerGridAB, threadsPerBlockAB >> >(ann_tmp, data_BP[next_layer], data_AP[next_layer], params_device_AB);
cudaFree(ann_tmp);
cudaMalloc(&ann_tmp, ann_size_BA * sizeof(unsigned int));
upSample_kernel << <blocksPerGridBA, threadsPerBlockBA >> >(ann_device_BA, ann_tmp, params_device_BA,
data_B_size[curr_layer].width, data_B_size[curr_layer].height);//get new ann_devices
avg_vote << <blocksPerGridBA, threadsPerBlockBA >> >(ann_tmp, data_A[next_layer], data_B[next_layer], params_device_BA);
cudaFree(ann_tmp);
/***********************************************/
//set parameters
params_host[0] = data_A_size[curr_layer].channel;//channels
params_host[1] = data_A_size[curr_layer].height;
params_host[2] = data_A_size[curr_layer].width;
params_host[3] = data_B_size[curr_layer].height;
params_host[4] = data_B_size[curr_layer].width;
params_host[5] = sizes[curr_layer];
params_host[6] = params.iter;
params_host[7] = range[curr_layer];
//copy to device
cudaMemcpy(params_device_AB, params_host, param_size * sizeof(int), cudaMemcpyHostToDevice);
//set parameters
params_host[0] = data_B_size[curr_layer].channel;//channels
params_host[1] = data_B_size[curr_layer].height;
params_host[2] = data_B_size[curr_layer].width;
params_host[3] = data_A_size[curr_layer].height;
params_host[4] = data_A_size[curr_layer].width;
//copy to device
cudaMemcpy(params_device_BA, params_host, param_size * sizeof(int), cudaMemcpyHostToDevice);
////set device pa, device pb, device ann and device annd
blocksPerGridAB = dim3(data_A_size[curr_layer].width / 20 + 1, data_A_size[curr_layer].height / 20 + 1, 1);
threadsPerBlockAB = dim3(20, 20, 1);
ann_size_AB = data_A_size[curr_layer].width* data_A_size[curr_layer].height;
blocksPerGridBA = dim3(data_B_size[curr_layer].width / 20 + 1, data_B_size[curr_layer].height / 20 + 1, 1);
threadsPerBlockBA = dim3(20, 20, 1);
ann_size_BA = data_B_size[curr_layer].width* data_B_size[curr_layer].height;
int num1 = data_A_size[curr_layer].channel*data_A_size[curr_layer].width*data_A_size[curr_layer].height;
int num2 = data_A_size[next_layer].channel*data_A_size[next_layer].width*data_A_size[next_layer].height;
float *target;
cudaMalloc(&target, num1 * sizeof(float));
avg_vote << <blocksPerGridAB, threadsPerBlockAB >> >(ann_device_AB, data_BP[curr_layer], target, params_device_AB);
deconv(&classifier_A, params.layers[curr_layer], target, data_A_size[curr_layer], params.layers[next_layer], data_AP[next_layer], data_A_size[next_layer]);
cudaFree(target);
num1 = data_B_size[curr_layer].channel*data_B_size[curr_layer].width*data_B_size[curr_layer].height;
num2 = data_B_size[next_layer].channel*data_B_size[next_layer].width*data_B_size[next_layer].height;
cudaMalloc(&target, num1 * sizeof(float));
avg_vote << <blocksPerGridBA, threadsPerBlockBA >> >(ann_device_BA, data_A[curr_layer], target, params_device_BA);
deconv(&classifier_B, params.layers[curr_layer], target, data_B_size[curr_layer], params.layers[next_layer], data_B[next_layer], data_B_size[next_layer]);
cudaFree(target);
}
}
//upsample
int curr_layer = numlayer - 1;
{
//set parameters
params_host[0] = 3;//channels
params_host[1] = img_AL.rows;
params_host[2] = img_AL.cols;
params_host[3] = img_BPL.rows;
params_host[4] = img_BPL.cols;
params_host[5] = sizes[curr_layer];
params_host[6] = params.iter;
params_host[7] = range[curr_layer];
//copy to device
cudaMemcpy(params_device_AB, params_host, param_size * sizeof(int), cudaMemcpyHostToDevice);
//set parameters
params_host[0] = 3;//channels
params_host[1] = img_BPL.rows;
params_host[2] = img_BPL.cols;
params_host[3] = img_AL.rows;
params_host[4] = img_AL.cols;
//copy to device
cudaMemcpy(params_device_BA, params_host, param_size * sizeof(int), cudaMemcpyHostToDevice);
////set device pa, device pb, device ann and device annd
dim3 blocksPerGridAB(img_AL.cols / 20 + 1, img_AL.rows / 20 + 1, 1);
dim3 threadsPerBlockAB(20, 20, 1);
ann_size_AB = img_AL.cols* img_AL.rows;
dim3 blocksPerGridBA(img_BPL.cols / 20 + 1, img_BPL.rows / 20 + 1, 1);
dim3 threadsPerBlockBA(20, 20, 1);
ann_size_BA = img_BPL.rows* img_BPL.cols;
//updample
unsigned int * ann_tmp;
cudaMalloc(&ann_tmp, ann_size_AB * sizeof(unsigned int));
upSample_kernel << <blocksPerGridAB, threadsPerBlockAB >> >(ann_device_AB, ann_tmp, params_device_AB,
data_A_size[curr_layer - 1].width, data_A_size[curr_layer - 1].height);//get new ann_device
cudaMemcpy(ann_device_AB, ann_tmp, ann_size_AB * sizeof(unsigned int), cudaMemcpyDeviceToDevice);
cudaFree(ann_tmp);
cudaMalloc(&ann_tmp, ann_size_BA * sizeof(unsigned int));
upSample_kernel << <blocksPerGridBA, threadsPerBlockBA >> >(ann_device_BA, ann_tmp, params_device_BA,
data_B_size[curr_layer - 1].width, data_B_size[curr_layer - 1].height);//get new ann_device
cudaMemcpy(ann_device_BA, ann_tmp, ann_size_BA * sizeof(unsigned int), cudaMemcpyDeviceToDevice);
cudaFree(ann_tmp);
cudaMemcpy(ann_host_AB, ann_device_AB, ann_size_AB * sizeof(unsigned int), cudaMemcpyDeviceToHost);
cudaMemcpy(ann_host_BA, ann_device_BA, ann_size_BA * sizeof(unsigned int), cudaMemcpyDeviceToHost);
//free space in device, only need to free pa and pb which are created temporarily
//image downBAale
Mat flow, result_AB, result_BA, err, out, normal;
flow = reconstruct_dflow(img_AL, img_BPL, ann_host_AB, sizes[curr_layer]);
result_AB = reconstruct_avg(img_AL, img_BPL, ann_host_AB, sizes[curr_layer]);
cv::resize(result_AB, out, Size(), (float)ori_A_cols / cur_A_cols, (float)ori_A_rows / cur_A_rows, INTER_CUBIC);
sprintf(fname, "resultAB.png");
imwrite(path_output + fname, out);
flow = reconstruct_dflow(img_BPL, img_AL, ann_host_BA, sizes[curr_layer]);
result_BA = reconstruct_avg(img_BPL, img_AL, ann_host_BA, sizes[curr_layer]);
cv::resize(result_BA, out, Size(), (float)ori_BP_cols / cur_BP_cols, (float)ori_BP_rows / cur_BP_rows, INTER_CUBIC);
sprintf(fname, "resultBA.png");
imwrite(path_output + fname, out);
if (photoTransfer)
{
cout << "Refining photo transfer." << endl;
Mat filtered_AB, filtered_BA, filtered_A, filtered_B, refine_AB, refine_BA;
Mat origin_A, origin_B, res_AB, res_BA;
img_AL.convertTo(origin_A, CV_32FC3, 1/255.0);
img_BPL.convertTo(origin_B, CV_32FC3, 1 / 255.0);
result_AB.convertTo(res_AB, CV_32FC3, 1 / 255.0);
result_BA.convertTo(res_BA, CV_32FC3, 1 / 255.0);
WeightedLeastSquare(filtered_AB, origin_A, res_AB);
WeightedLeastSquare(filtered_BA, origin_B, res_BA);
WeightedLeastSquare(filtered_A, origin_A, origin_A);
WeightedLeastSquare(filtered_B, origin_B, origin_B);
refine_AB = origin_A + filtered_AB - filtered_A;
refine_BA = origin_B + filtered_BA - filtered_B;
sprintf(fname, "refineAB.png");
refine_AB.convertTo(normal, CV_32FC3, 255.0);
cv::resize(normal, out, Size(), (float)ori_A_cols / cur_A_cols, (float)ori_A_rows / cur_A_rows, INTER_CUBIC);
imwrite(path_output + fname, out);
sprintf(fname, "refineBA.png");
refine_BA.convertTo(normal, CV_32FC3, 255.0);
cv::resize(normal, out, Size(), (float)ori_BP_cols / cur_BP_cols, (float)ori_BP_rows / cur_BP_rows, INTER_CUBIC);
imwrite(path_output + fname, out);
}
}
cout << "Saving flow result." << "\n";
//save ann
{
ofstream output1;
char fname[256];
sprintf(fname, "flowAB.txt");
output1.open(path_output + fname);
for (int y = 0; y < img_AL.rows; y++)
for (int x = 0; x < img_AL.cols; x++)
{
unsigned int v = ann_host_AB[y*img_AL.cols + x];
int xbest = INT_TO_X(v);
int ybest = INT_TO_Y(v);
output1 << xbest - x << " " << ybest - y << endl;
}
output1.close();
ofstream output2;
sprintf(fname, "flowBA.txt");
output2.open(path_output + fname);
for (int y = 0; y < img_BPL.rows; y++){
for (int x = 0; x < img_BPL.cols; x++)
{
unsigned int v = ann_host_BA[y*img_BPL.cols + x];
int xbest = INT_TO_X(v);
int ybest = INT_TO_Y(v);
output2 << xbest - x << " " << ybest - y << endl;
}
}
output2.close();
}
cudaFree(params_device_AB);
cudaFree(ann_device_AB);
cudaFree(annd_device_AB);
cudaFree(params_device_BA);
cudaFree(ann_device_BA);
cudaFree(annd_device_BA);
free(ann_host_AB);
free(annd_host_AB);
free(ann_host_BA);
free(annd_host_BA);
free(params_host);
for (int i = 0; i < numlayer; i++)
{
cudaFree(data_A[i]);
cudaFree(data_BP[i]);
}
finish = clock();
duration = (double)(finish - start) / CLOCKS_PER_SEC;
cout << "Finished finding ann. Time : " << duration << endl;
google::ShutdownGoogleLogging();
classifier_A.DeleteNet();
classifier_B.DeleteNet();
}
|
f0284707de32ed50a3693c5a775c8e2277fb54c4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <math.h>
#include <assert.h>
__global__ void randWork(unsigned int *seed, hiprandState_t* states, int *d_rnumbs)
{
hiprand_init(seed[threadIdx.x],threadIdx.x,0,&states[threadIdx.x]);
d_rnumbs[threadIdx.x] = hiprand(&states[threadIdx.x])% 100;
}
int main(){
int nThreads = 10;
hiprandState_t* states;
unsigned int *h_seed = (unsigned int*)malloc(sizeof(unsigned int)*nThreads);
srand(time(NULL));
for(int i=0;i<nThreads;i++)
{
h_seed[i] = rand()%100000;
}
int *rnumbs = (int*)malloc(sizeof(int)*nThreads);
int *d_rnumbs = (int*)malloc(sizeof(int)*nThreads);
hipMalloc((void**)&d_rnumbs, sizeof(int)*nThreads);
hipMalloc((void**) &states, nThreads * sizeof(hiprandState_t));
unsigned int *d_seed;
hipMalloc((void**)&d_seed, sizeof(unsigned int)*nThreads);
hipMemcpy(d_seed, h_seed, sizeof(unsigned int)*nThreads,hipMemcpyHostToDevice);
// veja somente parametros d_seed e states
hipLaunchKernelGGL(( randWork), dim3(1),dim3(nThreads), 0, 0, d_seed ,states,d_rnumbs);
hipMemcpy(rnumbs, d_rnumbs, sizeof(int)*nThreads,hipMemcpyDeviceToHost);
hipFree(states); hipFree(d_seed);
free(h_seed);
printf("Random Numbers:\n");
for (int i = 0; i < nThreads; i++)
printf("%d: %d\n", i, rnumbs[i]);
return 0;
}
|
f0284707de32ed50a3693c5a775c8e2277fb54c4.cu
|
#include <stdio.h>
#include <curand.h>
#include <curand_kernel.h>
#include <math.h>
#include <assert.h>
__global__ void randWork(unsigned int *seed, curandState_t* states, int *d_rnumbs)
{
curand_init(seed[threadIdx.x],threadIdx.x,0,&states[threadIdx.x]);
d_rnumbs[threadIdx.x] = curand(&states[threadIdx.x])% 100;
}
int main(){
int nThreads = 10;
curandState_t* states;
unsigned int *h_seed = (unsigned int*)malloc(sizeof(unsigned int)*nThreads);
srand(time(NULL));
for(int i=0;i<nThreads;i++)
{
h_seed[i] = rand()%100000;
}
int *rnumbs = (int*)malloc(sizeof(int)*nThreads);
int *d_rnumbs = (int*)malloc(sizeof(int)*nThreads);
cudaMalloc((void**)&d_rnumbs, sizeof(int)*nThreads);
cudaMalloc((void**) &states, nThreads * sizeof(curandState_t));
unsigned int *d_seed;
cudaMalloc((void**)&d_seed, sizeof(unsigned int)*nThreads);
cudaMemcpy(d_seed, h_seed, sizeof(unsigned int)*nThreads,cudaMemcpyHostToDevice);
// veja somente parametros d_seed e states
randWork<<<1,nThreads>>>(d_seed ,states,d_rnumbs);
cudaMemcpy(rnumbs, d_rnumbs, sizeof(int)*nThreads,cudaMemcpyDeviceToHost);
cudaFree(states); cudaFree(d_seed);
free(h_seed);
printf("Random Numbers:\n");
for (int i = 0; i < nThreads; i++)
printf("%d: %d\n", i, rnumbs[i]);
return 0;
}
|
76b581917a16959ce8b781e35bd83cd00224d184.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/////////////////////////////////////////////////////////////////////////
//
// :
//
/////////////////////////////////////////////////////////////////////////
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
////////////////////////
//
////////////////////////
typedef struct matrix{
int col;
int row;
double* element;
}Matrix;
//////////////////////////////////
//
//////////////////////////////////
__global__
void gpu_dot(const double* x,const double* w,double* out,const int* x_row,const int* w_row, const int* w_col){
int i,j,k;
double sum = 0;
//i(y)()
//j(x)()
i = blockIdx.y * blockDim.y + threadIdx.y;
j = blockIdx.x * blockDim.x + threadIdx.x;
for(k=0; k<(*w_col); k++){
sum += x[i*(*x_row)+k] * w[k*(*w_row)+j];
}
out[i*(*w_row)+j] = sum;
}
////////////////////////////////////////////////////
//
////////////////////////////////////////////////////
//
void Matrix_constructor(Matrix* self,const int col,const int row);
//
void Matrix_init(Matrix* self);
//
void Matrix_zeros(Matrix* self);
//
void Matrix_print(Matrix* self);
//
void Matrix_free(Matrix* self);
///////////////////////
//
///////////////////////
int main(){
//
time_t start,stop;
//
Matrix x;
Matrix w;
Matrix out;//
//
Matrix_constructor(&x,2000,3000);
Matrix_constructor(&w,3000,5000);
Matrix_constructor(&out,2000,5000);
//
Matrix_init(&x);
Matrix_init(&w);
//0
Matrix_zeros(&out);
//()
double* gpu_x;
double* gpu_w;
double* gpu_out;
//()
int* x_row;
int* w_row;
int* w_col;
//cudaMalloc
start = clock();
//
hipMalloc(&gpu_x,sizeof(double)*x.col*x.row);
hipMalloc(&gpu_w,sizeof(double)*w.col*w.row);
hipMalloc(&gpu_out,sizeof(double)*out.col*out.row);
//
hipMalloc(&x_row,sizeof(int));
hipMalloc(&w_row,sizeof(int));
hipMalloc(&w_col,sizeof(int));
//cudaMalloc
stop = clock();
//cudaMalloc
printf("hipMalloc:%lf\n",(double)(stop-start)/CLOCKS_PER_SEC);
//cudaMemcpy
start = clock();
//
hipMemcpy(gpu_x,x.element,sizeof(double)*x.col*x.row,hipMemcpyHostToDevice);
hipMemcpy(gpu_w,w.element,sizeof(double)*w.col*w.row,hipMemcpyHostToDevice);
//
hipMemcpy(x_row,&(x.row),sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(w_row,&(w.row),sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(w_col,&(w.col),sizeof(int),hipMemcpyHostToDevice);
//cudaMemcpy
stop = clock();
//cudaMemcpy
printf("hipMemcpy(Host_to_Device):%lf\n",(double)(stop-start)/CLOCKS_PER_SEC);
//
start = clock();
//
hipLaunchKernelGGL(( gpu_dot), dim3(dim3(out.row,out.col,1)),dim3(dim3(1,1,1)), 0, 0, gpu_x,gpu_w,gpu_out,x_row,w_row,w_col);
hipDeviceSynchronize();
//
stop = clock();
//
printf(":%lf\n",(double)(stop-start)/CLOCKS_PER_SEC);
//
start = clock();
//
hipMemcpy(out.element,gpu_out,sizeof(double)*out.col*out.row,hipMemcpyDeviceToHost);
//
stop = clock();
//
printf("hipMemcpy(Devise_to_Host):%lf\n",(double)(stop-start)/CLOCKS_PER_SEC);
//
hipFree(gpu_x);
hipFree(gpu_w);
hipFree(gpu_out);
hipFree(x_row);
hipFree(w_row);
hipFree(w_col);
Matrix_free(&x);
Matrix_free(&w);
Matrix_free(&out);
return 0;
}
///////////////////////////////////////////////////////////////////////////////////////////////
// ()
///////////////////////////////////////////////////////////////////////////////////////////////
void Matrix_constructor(Matrix* self,const int col,const int row){
self->col = col;
self->row = row;
self->element = (double*)malloc(sizeof(double)*col*row);
}
void Matrix_init(Matrix* self){
for(int i=0;i<self->col;i++){
for(int j=0;j<self->row;j++){
self->element[i*self->row+j] = i*self->row+j;
}
}
}
void Matrix_zeros(Matrix* self){
for(int i=0;i<self->col;i++){
for(int j=0;j<self->row;j++){
self->element[i*self->row+j] = 0;
}
}
}
void Matrix_print(Matrix* self){
for(int i=0;i<self->col;i++){
for(int j=0;j<self->row;j++){
printf("[%lf]",self->element[i*self->row+j]);
}
printf("\n");
}
}
void Matrix_free(Matrix* self){
free(self->element);
self->element = NULL;
}
|
76b581917a16959ce8b781e35bd83cd00224d184.cu
|
/////////////////////////////////////////////////////////////////////////
//
// 備考:このコードでは二次元行列同士の内積を行い実行時間を観測する。
//
/////////////////////////////////////////////////////////////////////////
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
////////////////////////
// ↓行列構造体定義
////////////////////////
typedef struct matrix{
int col;
int row;
double* element;
}Matrix;
//////////////////////////////////
// ↓内積を行うカーネル関数
//////////////////////////////////
__global__
void gpu_dot(const double* x,const double* w,double* out,const int* x_row,const int* w_row, const int* w_col){
int i,j,k;
double sum = 0;
//変数iには現在実行中のスレッドの固有の番号(y軸)が入る(スレッド数は結果保存用行列構造体の行数個ある)
//変数jには現在実行中のスレッドの固有の番号(x軸)が入る(スレッド数は結果保存用行列構造体の列数個ある)
i = blockIdx.y * blockDim.y + threadIdx.y;
j = blockIdx.x * blockDim.x + threadIdx.x;
for(k=0; k<(*w_col); k++){
sum += x[i*(*x_row)+k] * w[k*(*w_row)+j];
}
out[i*(*w_row)+j] = sum;
}
////////////////////////////////////////////////////
// ↓行列構造体を操作する関数のプロトタイプ宣言
////////////////////////////////////////////////////
//↓コンストラクタ
void Matrix_constructor(Matrix* self,const int col,const int row);
//↓行列の要素に要素番号代入
void Matrix_init(Matrix* self);
//↓行列をゼロクリア
void Matrix_zeros(Matrix* self);
//↓行列の中身をすべて表示
void Matrix_print(Matrix* self);
//行列構造体内の要素開放
void Matrix_free(Matrix* self);
///////////////////////
// ↓メイン関数
///////////////////////
int main(){
//↓タイマー用変数宣言
time_t start,stop;
//↓行列構造体宣言
Matrix x;
Matrix w;
Matrix out;//←計算結果保存用
//↓行列構造体のコンストラクタ
Matrix_constructor(&x,2000,3000);
Matrix_constructor(&w,3000,5000);
Matrix_constructor(&out,2000,5000);
//↓入力用の行列に数値代入
Matrix_init(&x);
Matrix_init(&w);
//↓出力保存用行列を0クリア
Matrix_zeros(&out);
//↓カーネル関数の引数に使う変数宣言(配列として使う)
double* gpu_x;
double* gpu_w;
double* gpu_out;
//↓カーネル関数の引数に使う変数宣言(定数として使う)
int* x_row;
int* w_row;
int* w_col;
//↓cudaMallocでかかる時間測定開始
start = clock();
//↓カーネルの引数に使う配列の動的確保
cudaMalloc(&gpu_x,sizeof(double)*x.col*x.row);
cudaMalloc(&gpu_w,sizeof(double)*w.col*w.row);
cudaMalloc(&gpu_out,sizeof(double)*out.col*out.row);
//↓カーネルの引数に使う定数の動的確保
cudaMalloc(&x_row,sizeof(int));
cudaMalloc(&w_row,sizeof(int));
cudaMalloc(&w_col,sizeof(int));
//↓cudaMallocでかかる時間測定終了
stop = clock();
//↓cudaMallocでかかる時間表示
printf("cudaMalloc:%lf秒\n",(double)(stop-start)/CLOCKS_PER_SEC);
//↓cudaMemcpyでかかる時間測定開始
start = clock();
//↓計算で使う行列の中身をカーネルの引数で使う変数へコピー
cudaMemcpy(gpu_x,x.element,sizeof(double)*x.col*x.row,cudaMemcpyHostToDevice);
cudaMemcpy(gpu_w,w.element,sizeof(double)*w.col*w.row,cudaMemcpyHostToDevice);
//↓計算で使う定数の中身をカーネルの引数で使う変数へコピー
cudaMemcpy(x_row,&(x.row),sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(w_row,&(w.row),sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(w_col,&(w.col),sizeof(int),cudaMemcpyHostToDevice);
//↓cudaMemcpyでかかる時間測定終了
stop = clock();
//↓cudaMemcpyでかかる時間表示
printf("cudaMemcpy(Host_to_Device):%lf秒\n",(double)(stop-start)/CLOCKS_PER_SEC);
//↓内積計算でかかる時間測定開始
start = clock();
//↓内積計算実行
gpu_dot<<<dim3(out.row,out.col,1),dim3(1,1,1)>>>(gpu_x,gpu_w,gpu_out,x_row,w_row,w_col);
cudaDeviceSynchronize();
//↓内積計算でかかる時間測定終了
stop = clock();
//↓内積計算でかかる時間表示
printf("内積計算:%lf秒\n",(double)(stop-start)/CLOCKS_PER_SEC);
//↓カーネル用変数からホスト用変数に内容をコピーするのにかかる時間測定開始
start = clock();
//↓カーネル用変数からホスト用変数に内容をコピー
cudaMemcpy(out.element,gpu_out,sizeof(double)*out.col*out.row,cudaMemcpyDeviceToHost);
//↓カーネル用変数からホスト用変数に内容をコピーするのにかかる時間測定終了
stop = clock();
//↓カーネル用変数からホスト用変数に内容をコピーするのにかかる時間表示
printf("cudaMemcpy(Devise_to_Host):%lf秒\n",(double)(stop-start)/CLOCKS_PER_SEC);
//↓ホスト側デバイス側共に動的確保した領域開放
cudaFree(gpu_x);
cudaFree(gpu_w);
cudaFree(gpu_out);
cudaFree(x_row);
cudaFree(w_row);
cudaFree(w_col);
Matrix_free(&x);
Matrix_free(&w);
Matrix_free(&out);
return 0;
}
///////////////////////////////////////////////////////////////////////////////////////////////
// ↓ここから行列構造体を操作する関数の実装(関数の解説は上記のプロトタイプ宣言部分に記載)
///////////////////////////////////////////////////////////////////////////////////////////////
void Matrix_constructor(Matrix* self,const int col,const int row){
self->col = col;
self->row = row;
self->element = (double*)malloc(sizeof(double)*col*row);
}
void Matrix_init(Matrix* self){
for(int i=0;i<self->col;i++){
for(int j=0;j<self->row;j++){
self->element[i*self->row+j] = i*self->row+j;
}
}
}
void Matrix_zeros(Matrix* self){
for(int i=0;i<self->col;i++){
for(int j=0;j<self->row;j++){
self->element[i*self->row+j] = 0;
}
}
}
void Matrix_print(Matrix* self){
for(int i=0;i<self->col;i++){
for(int j=0;j<self->row;j++){
printf("[%lf]",self->element[i*self->row+j]);
}
printf("\n");
}
}
void Matrix_free(Matrix* self){
free(self->element);
self->element = NULL;
}
|
454055c709b0a6a8431fe9671c3c77a314c6d453.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void
kernel( void ) {
}
int main( void ) {
hipLaunchKernelGGL(( kernel), dim3(1),dim3(1), 0, 0, );
printf( "Hello, World!\n" );
return 0;
}
|
454055c709b0a6a8431fe9671c3c77a314c6d453.cu
|
#include <stdio.h>
__global__ void
kernel( void ) {
}
int main( void ) {
kernel<<<1,1>>>();
printf( "Hello, World!\n" );
return 0;
}
|
8a32f82adcdcae89acfa705ba3716aa2135dd8d5.hip
|
// !!! This is a file automatically generated by hipify!!!
/*******************************************
* vect_add.cu
* By: Thomas Kinch
* 4/11/18
* A basic add vector program using CUDA.
*******************************************/
#include <hip/hip_runtime.h>
#include <stdio.h>
#define threads 512
__global__ void vecAdd(int *d_a, int *d_b, int *d_c){
int i = threadIdx.x;
if(i < threads){
d_c[i] = d_a[i] + d_b[i];
}
}
int main(){
int *h_a, *h_b, *h_c; //Host variables
int *d_a, *d_b, *d_c; //Device
int size = threads * sizeof(int);
//Malloc memory for host variables
h_a = (int*)malloc(size);
h_b = (int*)malloc(size);
h_c = (int*)malloc(size);
//Define Host variables
for(int i = 0; i < threads; i++){
h_a[i] = i;
h_b[i] = i;
}
//Malloc memory for device variables
hipMalloc((void**)&d_a, size);
hipMalloc((void**)&d_b, size);
hipMalloc((void**)&d_c, size);
//Memcpy - copy host values to device
hipMemcpy(d_a, h_a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, size, hipMemcpyHostToDevice);
//Add the Vectors
hipLaunchKernelGGL(( vecAdd), dim3(1), dim3(threads), 0, 0, d_a, d_b, d_c);
hipDeviceSynchronize();
//Copy device result to the host
hipMemcpy(h_c, d_c, size, hipMemcpyDeviceToHost);
//Print host_c variables
for(int i = 0; i < 20; i++){
printf("h_c[%d] = %d\n", i, h_c[i]);
}
//Free memory
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
free(h_a);
free(h_b);
free(h_c);
return 0;
}
|
8a32f82adcdcae89acfa705ba3716aa2135dd8d5.cu
|
/*******************************************
* vect_add.cu
* By: Thomas Kinch
* 4/11/18
* A basic add vector program using CUDA.
*******************************************/
#include <cuda.h>
#include <stdio.h>
#define threads 512
__global__ void vecAdd(int *d_a, int *d_b, int *d_c){
int i = threadIdx.x;
if(i < threads){
d_c[i] = d_a[i] + d_b[i];
}
}
int main(){
int *h_a, *h_b, *h_c; //Host variables
int *d_a, *d_b, *d_c; //Device
int size = threads * sizeof(int);
//Malloc memory for host variables
h_a = (int*)malloc(size);
h_b = (int*)malloc(size);
h_c = (int*)malloc(size);
//Define Host variables
for(int i = 0; i < threads; i++){
h_a[i] = i;
h_b[i] = i;
}
//Malloc memory for device variables
cudaMalloc((void**)&d_a, size);
cudaMalloc((void**)&d_b, size);
cudaMalloc((void**)&d_c, size);
//Memcpy - copy host values to device
cudaMemcpy(d_a, h_a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, size, cudaMemcpyHostToDevice);
//Add the Vectors
vecAdd<<<1, threads>>>(d_a, d_b, d_c);
cudaThreadSynchronize();
//Copy device result to the host
cudaMemcpy(h_c, d_c, size, cudaMemcpyDeviceToHost);
//Print host_c variables
for(int i = 0; i < 20; i++){
printf("h_c[%d] = %d\n", i, h_c[i]);
}
//Free memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
free(h_a);
free(h_b);
free(h_c);
return 0;
}
|
1045f1eb45ee7c1b58ee14aed3ef346cdf4c6da6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <math.h>
// Kernel function to add the elements of two arrays
__global__
void add(int n, float *x, float *y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 1<<20;
float *x, *y;
// Allocate Unified Memory accessible from CPU or GPU
hipMallocManaged(&x, N*sizeof(float));
hipMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the GPU
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( add), dim3(numBlocks), dim3(blockSize), 0, 0, N, x, y);
// Wait for GPU to finish before accessing on host
hipDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
hipFree(x);
hipFree(y);
return 0;
}
/*
C:\tools
nvprof add_cuda2.exe
==19500== NVPROF is profiling process 19500, command: add_cuda2.exe
Max error: 0
==19500== Profiling application: add_cuda2.exe
==19500== Profiling result:
Type Time(%) Time Calls Avg Min Max Name
GPU activities: 100.00% 131.62us 1 131.62us 131.62us 131.62us add(int, float*, float*)
API calls: 75.64% 222.60ms 2 111.30ms 977.70us 221.62ms hipMallocManaged
18.73% 55.133ms 1 55.133ms 55.133ms 55.133ms hipDevicePrimaryCtxRelease
4.51% 13.269ms 1 13.269ms 13.269ms 13.269ms cudaLaunchKernel
0.85% 2.5044ms 2 1.2522ms 756.50us 1.7479ms hipFree
0.10% 286.50us 97 2.9530us 100ns 124.30us hipDeviceGetAttribute
0.09% 256.50us 1 256.50us 256.50us 256.50us hipDeviceSynchronize
0.07% 199.50us 1 199.50us 199.50us 199.50us hipModuleUnload
0.01% 23.600us 1 23.600us 23.600us 23.600us cuDeviceTotalMem
0.00% 10.400us 1 10.400us 10.400us 10.400us hipDeviceGetPCIBusId
0.00% 3.0000us 3 1.0000us 200ns 2.1000us hipGetDeviceCount
0.00% 1.2000us 2 600ns 100ns 1.1000us hipDeviceGet
0.00% 700ns 1 700ns 700ns 700ns hipDeviceGetName
0.00% 200ns 1 200ns 200ns 200ns hipDeviceGetUuid
0.00% 200ns 1 200ns 200ns 200ns cuDeviceGetLuid
==19500== Unified Memory profiling result:
Device "Quadro T1000 (0)"
Count Avg Size Min Size Max Size Total Size Total Time Name
258 31.751KB 4.0000KB 32.000KB 8.000000MB 9.149400ms Host To Device
384 32.000KB 32.000KB 32.000KB 12.00000MB 78.10820ms Device To Host
*/
|
1045f1eb45ee7c1b58ee14aed3ef346cdf4c6da6.cu
|
#include <iostream>
#include <math.h>
// Kernel function to add the elements of two arrays
__global__
void add(int n, float *x, float *y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 1<<20;
float *x, *y;
// Allocate Unified Memory – accessible from CPU or GPU
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the GPU
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
add<<<numBlocks, blockSize>>>(N, x, y);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
cudaFree(x);
cudaFree(y);
return 0;
}
/*
C:\tools
λ nvprof add_cuda2.exe
==19500== NVPROF is profiling process 19500, command: add_cuda2.exe
Max error: 0
==19500== Profiling application: add_cuda2.exe
==19500== Profiling result:
Type Time(%) Time Calls Avg Min Max Name
GPU activities: 100.00% 131.62us 1 131.62us 131.62us 131.62us add(int, float*, float*)
API calls: 75.64% 222.60ms 2 111.30ms 977.70us 221.62ms cudaMallocManaged
18.73% 55.133ms 1 55.133ms 55.133ms 55.133ms cuDevicePrimaryCtxRelease
4.51% 13.269ms 1 13.269ms 13.269ms 13.269ms cudaLaunchKernel
0.85% 2.5044ms 2 1.2522ms 756.50us 1.7479ms cudaFree
0.10% 286.50us 97 2.9530us 100ns 124.30us cuDeviceGetAttribute
0.09% 256.50us 1 256.50us 256.50us 256.50us cudaDeviceSynchronize
0.07% 199.50us 1 199.50us 199.50us 199.50us cuModuleUnload
0.01% 23.600us 1 23.600us 23.600us 23.600us cuDeviceTotalMem
0.00% 10.400us 1 10.400us 10.400us 10.400us cuDeviceGetPCIBusId
0.00% 3.0000us 3 1.0000us 200ns 2.1000us cuDeviceGetCount
0.00% 1.2000us 2 600ns 100ns 1.1000us cuDeviceGet
0.00% 700ns 1 700ns 700ns 700ns cuDeviceGetName
0.00% 200ns 1 200ns 200ns 200ns cuDeviceGetUuid
0.00% 200ns 1 200ns 200ns 200ns cuDeviceGetLuid
==19500== Unified Memory profiling result:
Device "Quadro T1000 (0)"
Count Avg Size Min Size Max Size Total Size Total Time Name
258 31.751KB 4.0000KB 32.000KB 8.000000MB 9.149400ms Host To Device
384 32.000KB 32.000KB 32.000KB 12.00000MB 78.10820ms Device To Host
*/
|
04783473ae9d8cffd5e1f64138aa98bec21f7a7f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layers/threshold_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ThresholdForward(const int n, const Dtype threshold,
const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > threshold ? 1 : 0;
}
}
template <typename Dtype>
void ThresholdLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ThresholdForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS),
0, Caffe::cuda_stream(),
count, threshold_, bottom_data, top_data);
CUDA_CHECK(hipStreamSynchronize(Caffe::cuda_stream()));
}
INSTANTIATE_LAYER_GPU_FORWARD(ThresholdLayer);
} // namespace caffe
|
04783473ae9d8cffd5e1f64138aa98bec21f7a7f.cu
|
#include <vector>
#include "caffe/layers/threshold_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ThresholdForward(const int n, const Dtype threshold,
const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > threshold ? 1 : 0;
}
}
template <typename Dtype>
void ThresholdLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
ThresholdForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS,
0, Caffe::cuda_stream()>>>(
count, threshold_, bottom_data, top_data);
CUDA_CHECK(cudaStreamSynchronize(Caffe::cuda_stream()));
}
INSTANTIATE_LAYER_GPU_FORWARD(ThresholdLayer);
} // namespace caffe
|
f3b501e13096bb9e0404b9499dbdc26e7b48f6c5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <fstream>
#include <vector>
#include <algorithm>
#include <omp.h>
#include <math.h> /* fabsf */
#include <string.h>
#include <stdlib.h>
#include <time.h>
#define DEBUG 0
//Error check-----
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
//Error check-----
//This is a very good idea to wrap your calls with that function.. Otherwise you will not be able to see what is the error.
//Moreover, you may also want to look at how to use cuda-memcheck and cuda-gdb for debugging.
__global__ void scalesk(){
//TO DO: GPU SCALE
}
void wrapper(int* adj, int* xadj, int* tadj, int* txadj, double* rv, double* cv, int* nov, int* nnz, int siter){
printf("Wrapper here! \n");
//TO DO: DRIVER CODE
hipEventCreate(&start);
hipEventRecord(start, 0);
hipLaunchKernelGGL(( scalesk), dim3(NO_BLOCKS),dim3(NO_THREADS), 0, 0, );
gpuErrchk( hipDeviceSynchronize() );
hipEventCreate(&stop);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
printf("GPU scale took: %f s\n", elapsedTime/1000);
}
|
f3b501e13096bb9e0404b9499dbdc26e7b48f6c5.cu
|
#include <iostream>
#include <fstream>
#include <vector>
#include <algorithm>
#include <omp.h>
#include <math.h> /* fabsf */
#include <string.h>
#include <stdlib.h>
#include <time.h>
#define DEBUG 0
//Error check-----
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
//Error check-----
//This is a very good idea to wrap your calls with that function.. Otherwise you will not be able to see what is the error.
//Moreover, you may also want to look at how to use cuda-memcheck and cuda-gdb for debugging.
__global__ void scalesk(){
//TO DO: GPU SCALE
}
void wrapper(int* adj, int* xadj, int* tadj, int* txadj, double* rv, double* cv, int* nov, int* nnz, int siter){
printf("Wrapper here! \n");
//TO DO: DRIVER CODE
cudaEventCreate(&start);
cudaEventRecord(start, 0);
scalesk<<<NO_BLOCKS,NO_THREADS>>>();
gpuErrchk( cudaDeviceSynchronize() );
cudaEventCreate(&stop);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("GPU scale took: %f s\n", elapsedTime/1000);
}
|
99ce09acc308ad5addc03106e85371ae606272b8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <time.h>
#include <stdio.h>
#include <opencv2\opencv.hpp>
#include <opencv\highgui.h>
#include <opencv\cxcore.h>
#include <opencv\cv.h>
#include <opencv2\core\mat.hpp>
using namespace cv;
using namespace std;
#define clipping(x) x>255?255:x<0?0:x
#define WIDTH 1920 // 1920 or 416
#define HEIGHT 1080 // 1080 or 240
#define NUM_FRAME 240 //240 or 300
#define VIDEO_NUM 1
#define USEGPU 1
hipError_t addWithCuda(unsigned char *outArr, unsigned char *inputArr, unsigned int size);
__global__ void addKernel(unsigned char *dev_out, unsigned char *dev_in, int VideoSize)
{
//https://en.wikipedia.org/wiki/YUV#Y.E2.80.B2UV420p_.28and_Y.E2.80.B2V12_or_YV12.29_to_RGB888_conversion !
int pixel_ROW = blockIdx.x * blockDim.x + threadIdx.y; //x .
int pixel_COLUMN = blockIdx.y * blockDim.y + threadIdx.x; //y .
if (pixel_COLUMN >= HEIGHT || pixel_ROW >= WIDTH) return; // 0 !
// !!!
unsigned char y = dev_in[pixel_COLUMN*WIDTH + pixel_ROW];
unsigned char u = dev_in[(int)(pixel_COLUMN / 2)*(WIDTH / 2) + (int)(pixel_ROW / 2) + (WIDTH*HEIGHT)];
unsigned char v = dev_in[(int)(pixel_COLUMN / 2)*(WIDTH / 2) + (int)(pixel_ROW / 2) + (WIDTH*HEIGHT) + ((WIDTH*HEIGHT) / 4)];
unsigned char b = y + (1.370705 * (v - 128));
unsigned char g = y - (0.698001 * (v - 128)) - (0.337633 * (u - 128));
unsigned char r = y + (1.732446 * (u - 128));
dev_out[pixel_COLUMN*WIDTH + pixel_ROW] = b;
dev_out[pixel_COLUMN*WIDTH + pixel_ROW + (WIDTH*HEIGHT)] = g;
dev_out[pixel_COLUMN*WIDTH + pixel_ROW + (WIDTH*HEIGHT) + (WIDTH*HEIGHT)] = r;
}
int main()
{
hipError_t cudaStatus;
//const int arraySize = 5;
//const int a[arraySize] = { 1, 2, 3, 4, 5 };
//const int b[arraySize] = { 10, 20, 30, 40, 50 };
//int c[arraySize] = { 0 };
//////////////////////////////////////////////////////////////////
unsigned char **frame_no_loss_yuv;
frame_no_loss_yuv = new unsigned char *[NUM_FRAME]; // .
for (int i = 0; i < NUM_FRAME; i++) {
frame_no_loss_yuv[i] = new unsigned char[WIDTH*HEIGHT * 3 / 2];
}
// Kimono1_1920x1080_24.yuv or rec.yuv
FILE* infile = fopen("Kimono1_1920x1080_24.yuv", "rb"); //YUV
if (!infile) { //
printf("There isn't file!\n");
}
for (int i = 0; i < NUM_FRAME; i++) {
fread(frame_no_loss_yuv[i], 1, WIDTH*HEIGHT * 3 / 2, infile);
#if !USEGPU //CPU
Mat mYUV(HEIGHT + HEIGHT / 2, WIDTH, CV_8UC1, (void*)frame_no_loss_yuv[i]);
Mat mRGB(HEIGHT, WIDTH, CV_8UC3);
cvtColor(mYUV, mRGB, CV_YUV2RGB_YV12, 3);
#else //GPU
unsigned char *ArrRGB = new unsigned char[WIDTH * HEIGHT * 3 * 1];
cudaStatus = addWithCuda(ArrRGB, frame_no_loss_yuv[i], WIDTH*HEIGHT); // . out .
if (cudaStatus != hipSuccess) { //
fprintf(stderr, "addWithCuda failed!");
return 1;
}
// ArrRGB B/G/R .
//unsignedchar* to MATRIX
//ANSWER -> cv::Mat my_mat(rows, cols, CV_8UC1, &buf[0]); //in case of BGR image use CV_8UC3
//cvtColor(frame, frame, CV_RGB2BGR);
//http://stackoverflow.com/questions/15821253/merging-three-grayscale-r-g-b-images-into-a-single-color-image-in-opencv
Mat matB(HEIGHT, WIDTH, CV_8UC1, &ArrRGB[0]); //
Mat matG(HEIGHT, WIDTH, CV_8UC1, &ArrRGB[WIDTH*HEIGHT]); //
Mat matR(HEIGHT, WIDTH, CV_8UC1, &ArrRGB[WIDTH*HEIGHT*2]); //
vector<Mat> array_to_merge;
array_to_merge.push_back(matR);
array_to_merge.push_back(matG);
array_to_merge.push_back(matB);
Mat mRGB;
merge(array_to_merge, mRGB);
free(ArrRGB);
matB.release();
matG.release();
matR.release();
#endif
imshow("DISPLAY_YUV", mRGB); //temp
cvWaitKey(1); // .
#if !USEGPU
mYUV.release();
#endif
mRGB.release(); //temp
}
// fseek http://forum.falinux.com/zbxe/index.php?document_srl=408250&mid=C_LIB
///////////////////////////////////////////////////////////////////
// Add vectors in parallel.
//printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
// c[0], c[1], c[2], c[3], c[4]);
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
#if USEGPU
#endif
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t addWithCuda(unsigned char *outArr, unsigned char *inputArr, unsigned int VideoSize)
{
int maxThreadsX, maxThreadsY;
hipDeviceGetAttribute(&maxThreadsX, hipDeviceAttributeMaxGridDimX, 0);
hipDeviceGetAttribute(&maxThreadsY, hipDeviceAttributeMaxGridDimY, 0);
//Set block size and grid size to size of the image.
int maxnThreads = 32; // 32x32
float XSize = ceil(WIDTH / 32.0f);
float YSize = ceil(HEIGHT / 32.0f);
if (XSize > maxThreadsX)
XSize = maxThreadsX;
if (YSize > maxThreadsY)
YSize = maxThreadsY;
dim3 nBlocks(XSize, YSize);
// nBlocks.x nBlocks.y
dim3 nThreads(maxnThreads, maxnThreads); // 32x32 .
unsigned char *dev_in = 0; // input YUV
unsigned char *dev_out = 0; // output RGB
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_in, VideoSize * sizeof(unsigned char) * 3 / 2); //YUV input
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_out, VideoSize * sizeof(unsigned char) * 3 * 1); //RGB output Malloc
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
//cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int));
//if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMalloc failed!");
// goto Error;
//}
// USELESS CODE!!
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_in, inputArr, VideoSize * sizeof(unsigned char) * 3 / 2, hipMemcpyHostToDevice); //yuv gpu memory copy
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
//cudaStatus = hipMemcpy(dev_out, outArr, VideoSize * sizeof(unsigned char) * 3 * 1, hipMemcpyHostToDevice); //rgb gpu memory copy
//if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMemcpy failed!");
// goto Error;
//}
// USELESS CODE!!
// Launch a kernel on the GPU with one thread for each element.
hipLaunchKernelGGL(( addKernel), dim3(nBlocks), dim3(nThreads) , 0, 0, dev_out, dev_in, VideoSize); //BlockSize & Thread Size !
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(outArr, dev_out, VideoSize * sizeof(unsigned char) * 3 * 1, hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_in);
hipFree(dev_out);
return cudaStatus;
}
|
99ce09acc308ad5addc03106e85371ae606272b8.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <time.h>
#include <stdio.h>
#include <opencv2\opencv.hpp>
#include <opencv\highgui.h>
#include <opencv\cxcore.h>
#include <opencv\cv.h>
#include <opencv2\core\mat.hpp>
using namespace cv;
using namespace std;
#define clipping(x) x>255?255:x<0?0:x
#define WIDTH 1920 // 1920 or 416
#define HEIGHT 1080 // 1080 or 240
#define NUM_FRAME 240 //240 or 300
#define VIDEO_NUM 1
#define USEGPU 1
cudaError_t addWithCuda(unsigned char *outArr, unsigned char *inputArr, unsigned int size);
__global__ void addKernel(unsigned char *dev_out, unsigned char *dev_in, int VideoSize)
{
//https://en.wikipedia.org/wiki/YUV#Y.E2.80.B2UV420p_.28and_Y.E2.80.B2V12_or_YV12.29_to_RGB888_conversion 참조!
int pixel_ROW = blockIdx.x * blockDim.x + threadIdx.y; //x 픽셀 좌표.
int pixel_COLUMN = blockIdx.y * blockDim.y + threadIdx.x; //y 픽셀 좌표.
if (pixel_COLUMN >= HEIGHT || pixel_ROW >= WIDTH) return; //이게 없으면 코드가 더 돌아서 없는 영역을 0으로 바꾸고 해서 문제가 생김!
// 해상도 외의 영역을 반드시 잘라 줄 필요가 있음!!!
unsigned char y = dev_in[pixel_COLUMN*WIDTH + pixel_ROW];
unsigned char u = dev_in[(int)(pixel_COLUMN / 2)*(WIDTH / 2) + (int)(pixel_ROW / 2) + (WIDTH*HEIGHT)];
unsigned char v = dev_in[(int)(pixel_COLUMN / 2)*(WIDTH / 2) + (int)(pixel_ROW / 2) + (WIDTH*HEIGHT) + ((WIDTH*HEIGHT) / 4)];
unsigned char b = y + (1.370705 * (v - 128));
unsigned char g = y - (0.698001 * (v - 128)) - (0.337633 * (u - 128));
unsigned char r = y + (1.732446 * (u - 128));
dev_out[pixel_COLUMN*WIDTH + pixel_ROW] = b;
dev_out[pixel_COLUMN*WIDTH + pixel_ROW + (WIDTH*HEIGHT)] = g;
dev_out[pixel_COLUMN*WIDTH + pixel_ROW + (WIDTH*HEIGHT) + (WIDTH*HEIGHT)] = r;
}
int main()
{
cudaError_t cudaStatus;
//const int arraySize = 5;
//const int a[arraySize] = { 1, 2, 3, 4, 5 };
//const int b[arraySize] = { 10, 20, 30, 40, 50 };
//int c[arraySize] = { 0 };
//////////////////////////////////////////////////////////////////
unsigned char **frame_no_loss_yuv;
frame_no_loss_yuv = new unsigned char *[NUM_FRAME]; //해당 동영상 만큼의 프레임을 읽어오는 코드.
for (int i = 0; i < NUM_FRAME; i++) {
frame_no_loss_yuv[i] = new unsigned char[WIDTH*HEIGHT * 3 / 2];
}
// Kimono1_1920x1080_24.yuv or rec.yuv
FILE* infile = fopen("Kimono1_1920x1080_24.yuv", "rb"); //YUV를 바이너리 파일로 읽음
if (!infile) { //파일 없으면
printf("There isn't file!\n");
}
for (int i = 0; i < NUM_FRAME; i++) {
fread(frame_no_loss_yuv[i], 1, WIDTH*HEIGHT * 3 / 2, infile);
#if !USEGPU //CPU로 동작 할 때
Mat mYUV(HEIGHT + HEIGHT / 2, WIDTH, CV_8UC1, (void*)frame_no_loss_yuv[i]);
Mat mRGB(HEIGHT, WIDTH, CV_8UC3);
cvtColor(mYUV, mRGB, CV_YUV2RGB_YV12, 3);
#else //GPU로 동작 할 때
unsigned char *ArrRGB = new unsigned char[WIDTH * HEIGHT * 3 * 1];
cudaStatus = addWithCuda(ArrRGB, frame_no_loss_yuv[i], WIDTH*HEIGHT); // 기본코드. 배열을 집어넣고 쿠다 연산을 했을때 out값에 잘 실행 됐는지의 여부가 나오는데 그 코드이다.
if (cudaStatus != cudaSuccess) { //잘 안됐엉
fprintf(stderr, "addWithCuda failed!");
return 1;
}
// 이 상태에서는 ArrRGB 배열에는 B/G/R 통 프레임 순으로 담겨 있다.
//unsignedchar* to MATRIX
//ANSWER -> cv::Mat my_mat(rows, cols, CV_8UC1, &buf[0]); //in case of BGR image use CV_8UC3
//cvtColor(frame, frame, CV_RGB2BGR);
//http://stackoverflow.com/questions/15821253/merging-three-grayscale-r-g-b-images-into-a-single-color-image-in-opencv 참고
Mat matB(HEIGHT, WIDTH, CV_8UC1, &ArrRGB[0]); //
Mat matG(HEIGHT, WIDTH, CV_8UC1, &ArrRGB[WIDTH*HEIGHT]); //
Mat matR(HEIGHT, WIDTH, CV_8UC1, &ArrRGB[WIDTH*HEIGHT*2]); //
vector<Mat> array_to_merge;
array_to_merge.push_back(matR);
array_to_merge.push_back(matG);
array_to_merge.push_back(matB);
Mat mRGB;
merge(array_to_merge, mRGB);
free(ArrRGB);
matB.release();
matG.release();
matR.release();
#endif
imshow("DISPLAY_YUV", mRGB); //temp
cvWaitKey(1); // 이게 없으면 코드가 안돌음.
#if !USEGPU
mYUV.release();
#endif
mRGB.release(); //temp
}
//혹시 fseek 쓸거면 http://forum.falinux.com/zbxe/index.php?document_srl=408250&mid=C_LIB 참조
///////////////////////////////////////////////////////////////////
// Add vectors in parallel.
//printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
// c[0], c[1], c[2], c[3], c[4]);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
#if USEGPU
#endif
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(unsigned char *outArr, unsigned char *inputArr, unsigned int VideoSize)
{
int maxThreadsX, maxThreadsY;
cudaDeviceGetAttribute(&maxThreadsX, cudaDevAttrMaxGridDimX, 0);
cudaDeviceGetAttribute(&maxThreadsY, cudaDevAttrMaxGridDimY, 0);
//Set block size and grid size to size of the image.
int maxnThreads = 32; // 디폴트 32x32
float XSize = ceil(WIDTH / 32.0f);
float YSize = ceil(HEIGHT / 32.0f);
if (XSize > maxThreadsX)
XSize = maxThreadsX;
if (YSize > maxThreadsY)
YSize = maxThreadsY;
dim3 nBlocks(XSize, YSize);
// nBlocks.x nBlocks.y 확인
dim3 nThreads(maxnThreads, maxnThreads); // 쓰레드 32x32 디폴트.
unsigned char *dev_in = 0; // input YUV
unsigned char *dev_out = 0; // output RGB
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_in, VideoSize * sizeof(unsigned char) * 3 / 2); //YUV input
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_out, VideoSize * sizeof(unsigned char) * 3 * 1); //RGB output Malloc
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
//cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
//if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed!");
// goto Error;
//}
// USELESS CODE!!
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_in, inputArr, VideoSize * sizeof(unsigned char) * 3 / 2, cudaMemcpyHostToDevice); //yuv gpu memory copy
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
//cudaStatus = cudaMemcpy(dev_out, outArr, VideoSize * sizeof(unsigned char) * 3 * 1, cudaMemcpyHostToDevice); //rgb gpu memory copy
//if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMemcpy failed!");
// goto Error;
//}
// USELESS CODE!!
// Launch a kernel on the GPU with one thread for each element.
addKernel<<< nBlocks, nThreads >>>(dev_out, dev_in, VideoSize); //BlockSize 수 & Thread Size 수 이건 사람마다 해석이 달라서 내 해석대로 함!
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(outArr, dev_out, VideoSize * sizeof(unsigned char) * 3 * 1, cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_in);
cudaFree(dev_out);
return cudaStatus;
}
|
a04071212a341f29e698497edece34770acb8c39.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@generated from zhemv_mgpu.cu normal z -> d, Fri Sep 11 18:29:22 2015
@author Mark Gates
*/
#include "common_magma.h"
#include "commonblas_d.h"
#define PRECISION_d
#define NB_X 64
#define NB_Y 4
#define bank_shift 33
#define quarter_NB_X 16
#define half_NB_X 32
/*******************************************************************************
Lower case, compute block multiply, work = A*x, for any size n:
[ (A11*x1) (A21^H*x2) (A31^H*x3) ] [ A11 A21^H A31^H ] [ x1 ]
work = [ --- (A21*x1 + A22*x2) (A32^H*x3) ] = [ A21 A22 A32^H ] * [ x2 ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ] [ A31 A32 A33 ] [ x3 ]
Uses a 64x4 thread block.
For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed).
For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles.
In both cases, each thread multiplies 4 elements.
For rows past the bottom of the matrix, the A pointer is adjusted to be the
last valid row of A, which multiple threads will read.
Extra rows are ignored when saving results to work.
Columns past the right edge are explicitly ignored when loading.
x values past the bottom are set to zero, thus, extra columns are zeroed
when multiplying.
Previously:
[ (A11*x1) --- ]
work = [ (A21^H*x2) (A21*x1 + A22*x2) --- ]
[ (A31^H*x3) (A32^H*x3) (A31*x1 + A32*x2 + A33*x3) ]
which doesn't work as well because that has dimension blocks*NB by blocks,
where blocks*NB >= n, and it can be that blocks*NB > lda, so it won't fit in
lda*blocks space. This is why it used to need lwork = lda*(blocks + 1).
********************************************************************/
__global__ void
dsymv_kernel_L_mgpu(
int n,
double const * __restrict__ A, int lda,
double const * __restrict__ x, int incx,
double * __restrict__ work,
int my_gpu_id,
int ngpu,
int block_offset )
{
#if (__CUDA_ARCH__ >= 200)
// treats sA as 16x64 block
#define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ]
// treats sA as 32x32 block
#define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)])
// 64x4 thread block
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int blk = blockIdx.x;
const int blk_ind = NB_X * blk;
const int td = NB_X * ty + tx;
// GPUs are renumbered so that GPU 0 starts with block 0, GPU 1 starts with block 1, etc.
if ( blk < my_gpu_id ) {
return;
}
// 32x8 thread block
const int tx2 = td % half_NB_X;
const int ty2 = td / half_NB_X;
// If this blk has fewer than NB_X rows, partial is the number of valid rows,
// so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid.
// Else, partial == 0.
const int partial = (blk == gridDim.x - 1 ? ((n + block_offset) % NB_X) : 0);
double psum, psum_t;
double total = MAGMA_D_ZERO;
// sA is used as a 32x32 block, sA32(i,j),
// and as a 16x64 block, sA16(i,j), in different parts of the code.
// sA must be at least half_NB_X*bank_shift = 32x33 = 1056;
// quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056
__shared__ double sA [quarter_NB_X][NB_X + 2]; // TODO +3 used in dsymv (single GPU); why?
__shared__ double sx_blk[NB_X]; // for x[ blk ]
__shared__ double sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks left of diag
double rA[4];
double psums_t[4];
// --------------------
// load 64x1 block x(blk_ind + 0:63) into sx_blk
x += (blk_ind + tx)*incx; // x is x(blk_ind + tx)
if ( ty == 0 ) {
// GPUs are renumbered so that GPU 0 has block 0, which is partial of offset.
if ( (partial && tx >= partial) ||
(blk == 0 /*&& my_gpu_id == 0*/ && tx < block_offset) ) {
sx_blk[tx] = MAGMA_D_ZERO;
}
else {
sx_blk[tx] = x[0];
}
}
// --------------------
// move to block row
work += blk*lda; // work is work(0, blk)
A += blk_ind; // A is A(blk_ind, 0)
A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2)
if ( blk % ngpu == my_gpu_id ) {
// this GPU owns this diagonal block, so
// move to 32x32 diag block
A += (blk/ngpu)*NB_X*lda; // A is A(blk_ind + tx2, blk_ind + ty2)
// load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_D_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2)
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle,
// as four 32x8 sections in parallel:
// columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 ) {
sA32(j, tx2) = ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
// each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3)
psum = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 32x32 diag block, then repeat steps from first diag block
A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2)
// load 32x32 diag block A[block + 0:31, block + 0:31] into sA
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_D_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 ) {
sA32(j, tx2) = ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
psum = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to off-diag 32x32 block
A -= half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + ty2)
// load 32x32 block of A into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_D_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// multiply 32x32 block (below diag)
psum = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2 + j*8) * sx_blk[j*8 + ty2];
}
//__syncthreads(); // no sync needed here
// multiply transposed 32x32 block (above diag)
psum_t = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += ( sA32(ty2*4 + j, tx2) ) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial sums for non-transposed 32x32 block
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// store partial sums for transposed 32x32 block
sA32(ty2, tx2) = psum_t;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to leftmost 64x64 block in block row, and
// switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block
A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + ty2)
A -= (blk/ngpu)*NB_X*lda; // A is A(blk_ind + tx2, ty2)
}
// finish switching thread offset
A -= ty2*lda + tx2; // A is A(blk_ind, 0)
A += 4*ty*lda + tx; // A is A(blk_ind + tx, 4*ty)
if ( partial && tx >= partial ) {
A = A - tx + (partial - 1); // A is A(blk_ind + partial-1, 4*ty), the bottom-most valid row
}
x -= blk_ind*incx; // x is x(tx)
// 16x16 thread block
const int tx4 = td % quarter_NB_X;
const int ty4 = td / quarter_NB_X;
// cycle over blocks jj left of diagonal, in block row blk
for (int jj=my_gpu_id; jj < blk; jj += ngpu) {
// load 64x1 block x(jj_ind + 0:63) into sx_jj
// since this block is left of diagonal, x must have all NB rows
// only the first block column (jj=0, on GPU 0) deals with offset
if ( ty == 0 ) {
if ( jj == 0 && tx < block_offset ) {
sx_jj[tx] = MAGMA_D_ZERO;
}
else {
sx_jj[tx] = x[jj*NB_X*incx];
}
}
__syncthreads();
for (int k=0; k < 4; k++) {
// load 64x16 block of A into rA, 4 elements per thread,
// as four 64x4 sections in parallel:
// columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15
// since this block is left of diagonal, it has all NB columns,
// and block of x must have all NB rows.
#pragma unroll
for (int j=0; j < 4; j++) {
rA[j] = A[j*lda];
}
// 1) multiply 64x16 block A_{blk,jj} * x_jj
// each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k)
// 2) multiply transposed 16x64 block A_{blk,jj}^H * x_blk,
// storing each product Aji*xi to sA(j,i)
#pragma unroll
for (int j=0; j < 4; j++) {
total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj
sA16(ty*4 + j, tx) = ( rA[j] ) * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk
}
__syncthreads();
// do partial row sums for transposed 16x64 result
// use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty)
// sum sixteen 16x4 sections in parallel:
// columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63
psum_t = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA16(tx4, ty4*4 + j);
}
__syncthreads();
// store partial row sums of transposed result, y_jj (locally)
psums_t[k] = psum_t;
// move right to next 64x16 block
A += lda * quarter_NB_X; // A is A(blk_ind + tx#, jj*NB_x + (k+1)*NB_X/4 + 4*ty), # tx or partial
}
// already at next 64x64 block
// A is A(blk_ind + tx#, (jj+1)*NB_x + 4*ty), # tx or partial
// store partial row sums of transposed result, y_jj
#pragma unroll
for (int k=0; k < 4; k++) {
sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k];
}
__syncthreads();
// sum up partial row sums of transposed result, y_jj, and store final total to workspace
// thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16
// since this is the transposed block above the diagonal, it must have all NB rows
if ( ty4 < 4 ) {
int ty4_nb4 = ty4*quarter_NB_X;
psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4)
+ sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4)
+ sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4)
+ sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4)
+ sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4)
+ sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4)
+ sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4)
+ sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4);
work[jj*NB_X + tx4 + ty4_nb4] = psum_t; // store at work( jj*NB_X + tx4 + ty4*16, blk )
}
__syncthreads();
}
// store row sums
sA16(ty, tx) = total;
__syncthreads();
// sum up final total, y_blk, for row tx
if ( ty == 0 && (partial == 0 || tx < partial) ) {
total = sA16(0, tx)
+ sA16(1, tx)
+ sA16(2, tx)
+ sA16(3, tx);
work[blk*NB_X + tx] = total; // store at work( blk*NB_X + tx, blk )
}
#endif /* (__CUDA_ARCH__ >= 200) */
}
// end dsymv_kernel_L_mgpu
/**************************************************************
Lower case, sum up partial results per GPU.
Each block sums one block row; each thread sums one row.
On input (for 3 blocks):
[ (A11*x1) (A21^H*x2) (A31^H*x3) ]
work = [ --- (A21*x1 + A22*x2) (A32^H*x3) ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ]
On output:
[ (A11*x1) + (A21^H*x2) + (A31^H*x3) ]
y = alpha*[ (A21*x1 + A22*x2) + (A32^H*x3) ]
[ (A21*x1 + A22*x2 + A33*x3) ]
Note beta*y is not included here; see magmablas_dsymv_mgpu_sync.
The above workspace is distributed over multiple GPUs as diagrammed for 5 blocks:
[ * x x x x ] blk=0 * data for non-transposed row w_blk = A_{blk,1:blk} * x_{1:blk}
work[gpu=0] = [ * ] blk=1 x data for transposed block w_jj = A_{blk,jj}^H * x_{blk}
[ * x x ] blk=2 blanks are not set
[ * ] blk=3
[ * ] blk=4
[ ] blk=0 (blank)
work[gpu=1] = [ * x x x ] blk=1
[ * ] blk=2
[ * x ] blk=3
[ * ] blk=4
On output, rows across are summed up.
Entries left of the diagonal blocks are not accessed.
Blank rows, where a GPU has no data to contribute, are explicitly set to zero in y.
[ * + x + x + x ]
y[gpu=0] = [ * ]
[ * + x ]
[ * ]
[ 0 ] (explicitly set to 0)
y[gpu=1] = [ * + x + x ]
[ * ]
[ * ]
********************************************************************/
__global__ void
dsymv_kernel_L_mgpu_sum(
int n,
double alpha,
int lda,
double * __restrict__ y, int incy,
double const * __restrict__ work,
int my_gpu_id,
int ngpu,
int block_offset)
{
int tx = threadIdx.x;
int blk = blockIdx.x;
int blk_ind = blk * NB_X;
int ind = blk_ind + tx;
int blocks = gridDim.x;
// Don't write outside [block_offset, ..., n+block_offset)
if ( ind >= block_offset && ind < n+block_offset ) {
double Ax = MAGMA_D_ZERO;
// GPUs are renumbered so that GPU 0 starts with block 0,
// GPU 1 starts with block 1, etc.,
// therefore only blk >= my_gpu_id have non-zero data.
if ( blk >= my_gpu_id ) {
work += ind;
// if this GPU owns block-column blk, all blocks j=[blk, ..., blocks) contain data;
// else only block j=blk contains data.
int last = blocks-1;
if ( blk % ngpu != my_gpu_id ) {
last = blk;
}
for (int j = blk; j <= last; ++j) {
Ax += work[j*lda];
}
}
y[ind * incy] = alpha*Ax; // see magmablas_dsymv_sync for beta*y
}
}
// end dsymv_kernel_L_mgpu_sum
/**
Purpose
-------
magmablas_dsymv_mgpu performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n symmetric matrix.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
- = MagmaUpper: Only the upper triangular part of A is to be referenced. **Not currently supported.**
- = MagmaLower: Only the lower triangular part of A is to be referenced.
@param[in]
n INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
@param[in]
alpha DOUBLE_PRECISION.
On entry, ALPHA specifies the scalar alpha.
@param[in]
d_lA Array of pointers, dimension (ngpu), to block-column distributed
matrix A, with block size nb.
d_lA[dev] is a DOUBLE_PRECISION array on GPU dev, of
dimension (LDDA, nlocal), where
\n
{ floor(n/nb/ngpu)*nb + nb if dev < floor(n/nb) % ngpu,
nlocal = { floor(n/nb/ngpu)*nb + n%nb if dev == floor(n/nb) % ngpu,
{ floor(n/nb/ngpu)*nb otherwise.
\n
Before entry with UPLO = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the symmetric matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the symmetric matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
@param[in]
offset INTEGER.
Row & column offset to start of matrix A within the distributed d_lA
structure. Note that N is the size of this multiply, excluding the
offset, so the size of the original parent matrix is N+offset.
Also, x and y do not have an offset.
@param[in]
ldda INTEGER.
On entry, LDDA specifies the first dimension of A as declared
in the calling (sub) program. LDDA must be at least
max( 1, n + offset ).
It is recommended that ldda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
@param[in]
x DOUBLE_PRECISION array **on the CPU** (not the GPU), of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
@param[in]
incx INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
@param[in]
beta DOUBLE_PRECISION.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[in,out]
y DOUBLE_PRECISION array **on the CPU** (not the GPU), of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
@param[in]
incy INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
@param
hwork (workspace) DOUBLE_PRECISION array on the CPU, of dimension (lhwork).
@param[in]
lhwork INTEGER.
The dimension of the array hwork. lhwork >= ngpu*nb.
@param
dwork (workspaces) Array of pointers, dimension (ngpu), to workspace on each GPU.
dwork[dev] is a DOUBLE_PRECISION array on GPU dev, of dimension (ldwork).
@param[in]
ldwork INTEGER.
The dimension of each array dwork[dev].
ldwork >= ldda*( ceil((n + offset % nb) / nb) + 1 ).
@param[in]
ngpu INTEGER.
The number of GPUs to use.
@param[in]
nb INTEGER.
The block size used for distributing d_lA. Must be 64.
@param[in]
queues magma_queue_t array of dimension (ngpu).
queues[dev] is an execution queue on GPU dev.
@ingroup magma_dblas2
********************************************************************/
extern "C"
magma_int_t
magmablas_dsymv_mgpu(
magma_uplo_t uplo,
magma_int_t n,
double alpha,
magmaDouble_const_ptr const d_lA[], magma_int_t ldda,
magma_int_t offset,
double const *x, magma_int_t incx,
double beta, // unused, see magmablas_dsymv_mgpu_sync
double *y, magma_int_t incy, // unused
double *hwork, magma_int_t lhwork,
magmaDouble_ptr dwork[], magma_int_t ldwork,
magma_int_t ngpu,
magma_int_t nb,
magma_queue_t queues[] )
{
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
// --------------------
// no CUDA ARCH 1.x version
fprintf( stderr, "%s not supported on CUDA arch 1.x\n", __func__ );
return MAGMA_ERR_NOT_SUPPORTED;
}
// --------------------
// CUDA ARCH 2.x (Fermi) version
int upper = (uplo == MagmaUpper);
magma_int_t offset_block_id = offset / NB_X;
magma_int_t offset_gpu_id = offset_block_id % ngpu;
magma_int_t block_offset = offset % NB_X;
magma_int_t blocks = magma_ceildiv( n + block_offset, NB_X );
magma_int_t ldwmin = ldda*(blocks + 1);
magma_int_t lhwmin = n*ngpu;
/*
* Test the input parameters.
*/
magma_int_t info = 0;
if ( (! upper) && (uplo != MagmaLower) ) {
info = -1;
} else if ( n < 0 ) {
info = -2;
} else if ( ldda < max(1,n+offset) ) {
info = -5;
} else if ( offset < 0 ) {
info = -6;
} else if ( incx == 0 ) {
info = -8;
} else if ( incy == 0 ) {
info = -11;
} else if ( lhwork < lhwmin ) {
info = -13;
} else if ( ldwork < ldwmin ) {
info = -15;
} else if ( ngpu < 1 ) {
info = -16;
} else if ( nb != NB_X ) {
info = -17;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( n == 0 )
return info;
magma_device_t orig_dev;
magma_getdevice( &orig_dev );
magma_int_t dev;
for (dev=0; dev < ngpu; dev++) {
magma_setdevice( dev );
// blocks before the offset block
magma_int_t num_blocks_skipped = offset_block_id / ngpu;
if ( dev < offset_gpu_id ) {
num_blocks_skipped += 1;
}
// shift dA to first block >= offset block that is owned by this GPU
double const *dA_dev = d_lA[dev] + offset_block_id*NB_X + num_blocks_skipped*NB_X*ldda;
// first column of dwork is to broadcast x to all GPUs.
// remaining blocks number of columns is for partial sums from
// each block, as in single GPU version.
double *dx_dev = dwork[dev];
double *dwork_dev = dwork[dev] + ldda;
// renumber GPUs starting from the offset block
magma_int_t new_gpu_id = (dev + ngpu - offset_gpu_id) % ngpu;
dim3 grid( blocks, 1 );
// copy x to each GPU
magma_dsetvector_async( n, x, incx, dx_dev + block_offset, 1, queues[dev] );
// perform work = A*x, partial row sums
dim3 threads( NB_X, NB_Y );
// perform w = sum( work ), larger partial row sums
dim3 threads_sum( NB_X, 1 );
if ( upper ) {
hipLaunchKernelGGL(( dsymv_kernel_U_mgpu), dim3(grid), dim3(threads), 0, queues[dev] ,
n, dA_dev, ldda, dx_dev, 1, dwork_dev,
new_gpu_id, ngpu, block_offset );
hipLaunchKernelGGL(( dsymv_kernel_U_mgpu_sum), dim3(grid), dim3(threads_sum), 0, queues[dev] ,
n, alpha, ldda, dx_dev, 1, dwork_dev,
new_gpu_id, ngpu, block_offset );
}
else {
hipLaunchKernelGGL(( dsymv_kernel_L_mgpu), dim3(grid), dim3(threads), 0, queues[dev] ,
n, dA_dev, ldda, dx_dev, 1, dwork_dev,
new_gpu_id, ngpu, block_offset );
hipLaunchKernelGGL(( dsymv_kernel_L_mgpu_sum), dim3(grid), dim3(threads_sum), 0, queues[dev] ,
n, alpha, ldda, dx_dev, 1, dwork_dev,
new_gpu_id, ngpu, block_offset );
}
}
// 2nd loop in case hwork is not pinned, causing this to be sync instead of async.
for (dev=0; dev < ngpu; dev++) {
// copy w to CPU
magma_setdevice( dev );
double *dx_dev = dwork[dev];
magma_dgetvector_async( n, dx_dev + block_offset, 1, &hwork[dev*n], 1, queues[dev] );
}
// see magmablas_dsymv_mgpu_sync for final row sums
magma_setdevice( orig_dev );
return info;
}
/**
Synchronizes and acculumates final dsymv result.
For convenience, the parameters are identical to magmablas_dsymv_mgpu
(though some are unused here).
@see magmablas_dsymv_mgpu
@ingroup magma_dblas2
********************************************************************/
extern "C" magma_int_t
magmablas_dsymv_mgpu_sync(
magma_uplo_t uplo, // unused, see magmablas_dsymv_mgpu
magma_int_t n,
double alpha, // unused
magmaDouble_const_ptr const d_lA[], magma_int_t ldda,
magma_int_t offset, // unused
double const *x, magma_int_t incx, // unused
double beta,
double *y, magma_int_t incy, // unused
double *hwork, magma_int_t lhwork,
magmaDouble_ptr dwork[], magma_int_t ldwork, // unused
magma_int_t ngpu,
magma_int_t nb, // unused
magma_queue_t queues[] )
{
const double c_one = MAGMA_D_ONE;
const magma_int_t ione = 1;
magma_device_t dev;
magma_int_t lhwmin = n*ngpu;
/*
* Test the input parameters.
*/
magma_int_t info = 0;
//if ( (! upper) && (uplo != MagmaLower) ) { // unused
// info = -1;
//} else
if ( n < 0 ) {
info = -2;
} else if ( ldda < max(1,n+offset) ) {
info = -5;
} else if ( offset < 0 ) {
info = -6;
} else if ( incx == 0 ) {
info = -8;
} else if ( incy == 0 ) {
info = -11;
} else if ( lhwork < lhwmin ) {
info = -13;
//} else if ( ldwork < ldwmin ) { // unused
// info = -15;
} else if ( ngpu < 1 ) {
info = -16;
} else if ( nb != NB_X ) {
info = -17;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( n == 0 )
return info;
magma_device_t orig_dev;
magma_getdevice( &orig_dev );
// scale y = beta*y
blasf77_dscal( &n, &beta, y, &incy );
// sum reduce, y += sum( hwork )
for (dev=0; dev < ngpu; ++dev) {
magma_setdevice( dev );
magma_queue_sync( queues[dev] );
blasf77_daxpy( &n, &c_one, &hwork[dev*n], &ione, y, &ione );
}
magma_setdevice( orig_dev );
return info;
}
|
a04071212a341f29e698497edece34770acb8c39.cu
|
/*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@generated from zhemv_mgpu.cu normal z -> d, Fri Sep 11 18:29:22 2015
@author Mark Gates
*/
#include "common_magma.h"
#include "commonblas_d.h"
#define PRECISION_d
#define NB_X 64
#define NB_Y 4
#define bank_shift 33
#define quarter_NB_X 16
#define half_NB_X 32
/*******************************************************************************
Lower case, compute block multiply, work = A*x, for any size n:
[ (A11*x1) (A21^H*x2) (A31^H*x3) ] [ A11 A21^H A31^H ] [ x1 ]
work = [ --- (A21*x1 + A22*x2) (A32^H*x3) ] = [ A21 A22 A32^H ] * [ x2 ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ] [ A31 A32 A33 ] [ x3 ]
Uses a 64x4 thread block.
For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed).
For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles.
In both cases, each thread multiplies 4 elements.
For rows past the bottom of the matrix, the A pointer is adjusted to be the
last valid row of A, which multiple threads will read.
Extra rows are ignored when saving results to work.
Columns past the right edge are explicitly ignored when loading.
x values past the bottom are set to zero, thus, extra columns are zeroed
when multiplying.
Previously:
[ (A11*x1) --- ]
work = [ (A21^H*x2) (A21*x1 + A22*x2) --- ]
[ (A31^H*x3) (A32^H*x3) (A31*x1 + A32*x2 + A33*x3) ]
which doesn't work as well because that has dimension blocks*NB by blocks,
where blocks*NB >= n, and it can be that blocks*NB > lda, so it won't fit in
lda*blocks space. This is why it used to need lwork = lda*(blocks + 1).
********************************************************************/
__global__ void
dsymv_kernel_L_mgpu(
int n,
double const * __restrict__ A, int lda,
double const * __restrict__ x, int incx,
double * __restrict__ work,
int my_gpu_id,
int ngpu,
int block_offset )
{
#if (__CUDA_ARCH__ >= 200)
// treats sA as 16x64 block
#define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ]
// treats sA as 32x32 block
#define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)])
// 64x4 thread block
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int blk = blockIdx.x;
const int blk_ind = NB_X * blk;
const int td = NB_X * ty + tx;
// GPUs are renumbered so that GPU 0 starts with block 0, GPU 1 starts with block 1, etc.
if ( blk < my_gpu_id ) {
return;
}
// 32x8 thread block
const int tx2 = td % half_NB_X;
const int ty2 = td / half_NB_X;
// If this blk has fewer than NB_X rows, partial is the number of valid rows,
// so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid.
// Else, partial == 0.
const int partial = (blk == gridDim.x - 1 ? ((n + block_offset) % NB_X) : 0);
double psum, psum_t;
double total = MAGMA_D_ZERO;
// sA is used as a 32x32 block, sA32(i,j),
// and as a 16x64 block, sA16(i,j), in different parts of the code.
// sA must be at least half_NB_X*bank_shift = 32x33 = 1056;
// quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056
__shared__ double sA [quarter_NB_X][NB_X + 2]; // TODO +3 used in dsymv (single GPU); why?
__shared__ double sx_blk[NB_X]; // for x[ blk ]
__shared__ double sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks left of diag
double rA[4];
double psums_t[4];
// --------------------
// load 64x1 block x(blk_ind + 0:63) into sx_blk
x += (blk_ind + tx)*incx; // x is x(blk_ind + tx)
if ( ty == 0 ) {
// GPUs are renumbered so that GPU 0 has block 0, which is partial of offset.
if ( (partial && tx >= partial) ||
(blk == 0 /*&& my_gpu_id == 0*/ && tx < block_offset) ) {
sx_blk[tx] = MAGMA_D_ZERO;
}
else {
sx_blk[tx] = x[0];
}
}
// --------------------
// move to block row
work += blk*lda; // work is work(0, blk)
A += blk_ind; // A is A(blk_ind, 0)
A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2)
if ( blk % ngpu == my_gpu_id ) {
// this GPU owns this diagonal block, so
// move to 32x32 diag block
A += (blk/ngpu)*NB_X*lda; // A is A(blk_ind + tx2, blk_ind + ty2)
// load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_D_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2)
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle,
// as four 32x8 sections in parallel:
// columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 ) {
sA32(j, tx2) = ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
// each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3)
psum = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 32x32 diag block, then repeat steps from first diag block
A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2)
// load 32x32 diag block A[block + 0:31, block + 0:31] into sA
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_D_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 ) {
sA32(j, tx2) = ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
psum = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to off-diag 32x32 block
A -= half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + ty2)
// load 32x32 block of A into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_D_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// multiply 32x32 block (below diag)
psum = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2 + j*8) * sx_blk[j*8 + ty2];
}
//__syncthreads(); // no sync needed here
// multiply transposed 32x32 block (above diag)
psum_t = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += ( sA32(ty2*4 + j, tx2) ) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial sums for non-transposed 32x32 block
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// store partial sums for transposed 32x32 block
sA32(ty2, tx2) = psum_t;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to leftmost 64x64 block in block row, and
// switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block
A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + ty2)
A -= (blk/ngpu)*NB_X*lda; // A is A(blk_ind + tx2, ty2)
}
// finish switching thread offset
A -= ty2*lda + tx2; // A is A(blk_ind, 0)
A += 4*ty*lda + tx; // A is A(blk_ind + tx, 4*ty)
if ( partial && tx >= partial ) {
A = A - tx + (partial - 1); // A is A(blk_ind + partial-1, 4*ty), the bottom-most valid row
}
x -= blk_ind*incx; // x is x(tx)
// 16x16 thread block
const int tx4 = td % quarter_NB_X;
const int ty4 = td / quarter_NB_X;
// cycle over blocks jj left of diagonal, in block row blk
for (int jj=my_gpu_id; jj < blk; jj += ngpu) {
// load 64x1 block x(jj_ind + 0:63) into sx_jj
// since this block is left of diagonal, x must have all NB rows
// only the first block column (jj=0, on GPU 0) deals with offset
if ( ty == 0 ) {
if ( jj == 0 && tx < block_offset ) {
sx_jj[tx] = MAGMA_D_ZERO;
}
else {
sx_jj[tx] = x[jj*NB_X*incx];
}
}
__syncthreads();
for (int k=0; k < 4; k++) {
// load 64x16 block of A into rA, 4 elements per thread,
// as four 64x4 sections in parallel:
// columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15
// since this block is left of diagonal, it has all NB columns,
// and block of x must have all NB rows.
#pragma unroll
for (int j=0; j < 4; j++) {
rA[j] = A[j*lda];
}
// 1) multiply 64x16 block A_{blk,jj} * x_jj
// each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k)
// 2) multiply transposed 16x64 block A_{blk,jj}^H * x_blk,
// storing each product Aji*xi to sA(j,i)
#pragma unroll
for (int j=0; j < 4; j++) {
total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj
sA16(ty*4 + j, tx) = ( rA[j] ) * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk
}
__syncthreads();
// do partial row sums for transposed 16x64 result
// use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty)
// sum sixteen 16x4 sections in parallel:
// columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63
psum_t = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA16(tx4, ty4*4 + j);
}
__syncthreads();
// store partial row sums of transposed result, y_jj (locally)
psums_t[k] = psum_t;
// move right to next 64x16 block
A += lda * quarter_NB_X; // A is A(blk_ind + tx#, jj*NB_x + (k+1)*NB_X/4 + 4*ty), # tx or partial
}
// already at next 64x64 block
// A is A(blk_ind + tx#, (jj+1)*NB_x + 4*ty), # tx or partial
// store partial row sums of transposed result, y_jj
#pragma unroll
for (int k=0; k < 4; k++) {
sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k];
}
__syncthreads();
// sum up partial row sums of transposed result, y_jj, and store final total to workspace
// thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16
// since this is the transposed block above the diagonal, it must have all NB rows
if ( ty4 < 4 ) {
int ty4_nb4 = ty4*quarter_NB_X;
psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4)
+ sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4)
+ sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4)
+ sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4)
+ sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4)
+ sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4)
+ sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4)
+ sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4);
work[jj*NB_X + tx4 + ty4_nb4] = psum_t; // store at work( jj*NB_X + tx4 + ty4*16, blk )
}
__syncthreads();
}
// store row sums
sA16(ty, tx) = total;
__syncthreads();
// sum up final total, y_blk, for row tx
if ( ty == 0 && (partial == 0 || tx < partial) ) {
total = sA16(0, tx)
+ sA16(1, tx)
+ sA16(2, tx)
+ sA16(3, tx);
work[blk*NB_X + tx] = total; // store at work( blk*NB_X + tx, blk )
}
#endif /* (__CUDA_ARCH__ >= 200) */
}
// end dsymv_kernel_L_mgpu
/**************************************************************
Lower case, sum up partial results per GPU.
Each block sums one block row; each thread sums one row.
On input (for 3 blocks):
[ (A11*x1) (A21^H*x2) (A31^H*x3) ]
work = [ --- (A21*x1 + A22*x2) (A32^H*x3) ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ]
On output:
[ (A11*x1) + (A21^H*x2) + (A31^H*x3) ]
y = alpha*[ (A21*x1 + A22*x2) + (A32^H*x3) ]
[ (A21*x1 + A22*x2 + A33*x3) ]
Note beta*y is not included here; see magmablas_dsymv_mgpu_sync.
The above workspace is distributed over multiple GPUs as diagrammed for 5 blocks:
[ * x x x x ] blk=0 * data for non-transposed row w_blk = A_{blk,1:blk} * x_{1:blk}
work[gpu=0] = [ * ] blk=1 x data for transposed block w_jj = A_{blk,jj}^H * x_{blk}
[ * x x ] blk=2 blanks are not set
[ * ] blk=3
[ * ] blk=4
[ ] blk=0 (blank)
work[gpu=1] = [ * x x x ] blk=1
[ * ] blk=2
[ * x ] blk=3
[ * ] blk=4
On output, rows across are summed up.
Entries left of the diagonal blocks are not accessed.
Blank rows, where a GPU has no data to contribute, are explicitly set to zero in y.
[ * + x + x + x ]
y[gpu=0] = [ * ]
[ * + x ]
[ * ]
[ 0 ] (explicitly set to 0)
y[gpu=1] = [ * + x + x ]
[ * ]
[ * ]
********************************************************************/
__global__ void
dsymv_kernel_L_mgpu_sum(
int n,
double alpha,
int lda,
double * __restrict__ y, int incy,
double const * __restrict__ work,
int my_gpu_id,
int ngpu,
int block_offset)
{
int tx = threadIdx.x;
int blk = blockIdx.x;
int blk_ind = blk * NB_X;
int ind = blk_ind + tx;
int blocks = gridDim.x;
// Don't write outside [block_offset, ..., n+block_offset)
if ( ind >= block_offset && ind < n+block_offset ) {
double Ax = MAGMA_D_ZERO;
// GPUs are renumbered so that GPU 0 starts with block 0,
// GPU 1 starts with block 1, etc.,
// therefore only blk >= my_gpu_id have non-zero data.
if ( blk >= my_gpu_id ) {
work += ind;
// if this GPU owns block-column blk, all blocks j=[blk, ..., blocks) contain data;
// else only block j=blk contains data.
int last = blocks-1;
if ( blk % ngpu != my_gpu_id ) {
last = blk;
}
for (int j = blk; j <= last; ++j) {
Ax += work[j*lda];
}
}
y[ind * incy] = alpha*Ax; // see magmablas_dsymv_sync for beta*y
}
}
// end dsymv_kernel_L_mgpu_sum
/**
Purpose
-------
magmablas_dsymv_mgpu performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n symmetric matrix.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
- = MagmaUpper: Only the upper triangular part of A is to be referenced. **Not currently supported.**
- = MagmaLower: Only the lower triangular part of A is to be referenced.
@param[in]
n INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
@param[in]
alpha DOUBLE_PRECISION.
On entry, ALPHA specifies the scalar alpha.
@param[in]
d_lA Array of pointers, dimension (ngpu), to block-column distributed
matrix A, with block size nb.
d_lA[dev] is a DOUBLE_PRECISION array on GPU dev, of
dimension (LDDA, nlocal), where
\n
{ floor(n/nb/ngpu)*nb + nb if dev < floor(n/nb) % ngpu,
nlocal = { floor(n/nb/ngpu)*nb + n%nb if dev == floor(n/nb) % ngpu,
{ floor(n/nb/ngpu)*nb otherwise.
\n
Before entry with UPLO = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the symmetric matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the symmetric matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
@param[in]
offset INTEGER.
Row & column offset to start of matrix A within the distributed d_lA
structure. Note that N is the size of this multiply, excluding the
offset, so the size of the original parent matrix is N+offset.
Also, x and y do not have an offset.
@param[in]
ldda INTEGER.
On entry, LDDA specifies the first dimension of A as declared
in the calling (sub) program. LDDA must be at least
max( 1, n + offset ).
It is recommended that ldda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
@param[in]
x DOUBLE_PRECISION array **on the CPU** (not the GPU), of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
@param[in]
incx INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
@param[in]
beta DOUBLE_PRECISION.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[in,out]
y DOUBLE_PRECISION array **on the CPU** (not the GPU), of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
@param[in]
incy INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
@param
hwork (workspace) DOUBLE_PRECISION array on the CPU, of dimension (lhwork).
@param[in]
lhwork INTEGER.
The dimension of the array hwork. lhwork >= ngpu*nb.
@param
dwork (workspaces) Array of pointers, dimension (ngpu), to workspace on each GPU.
dwork[dev] is a DOUBLE_PRECISION array on GPU dev, of dimension (ldwork).
@param[in]
ldwork INTEGER.
The dimension of each array dwork[dev].
ldwork >= ldda*( ceil((n + offset % nb) / nb) + 1 ).
@param[in]
ngpu INTEGER.
The number of GPUs to use.
@param[in]
nb INTEGER.
The block size used for distributing d_lA. Must be 64.
@param[in]
queues magma_queue_t array of dimension (ngpu).
queues[dev] is an execution queue on GPU dev.
@ingroup magma_dblas2
********************************************************************/
extern "C"
magma_int_t
magmablas_dsymv_mgpu(
magma_uplo_t uplo,
magma_int_t n,
double alpha,
magmaDouble_const_ptr const d_lA[], magma_int_t ldda,
magma_int_t offset,
double const *x, magma_int_t incx,
double beta, // unused, see magmablas_dsymv_mgpu_sync
double *y, magma_int_t incy, // unused
double *hwork, magma_int_t lhwork,
magmaDouble_ptr dwork[], magma_int_t ldwork,
magma_int_t ngpu,
magma_int_t nb,
magma_queue_t queues[] )
{
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
// --------------------
// no CUDA ARCH 1.x version
fprintf( stderr, "%s not supported on CUDA arch 1.x\n", __func__ );
return MAGMA_ERR_NOT_SUPPORTED;
}
// --------------------
// CUDA ARCH 2.x (Fermi) version
int upper = (uplo == MagmaUpper);
magma_int_t offset_block_id = offset / NB_X;
magma_int_t offset_gpu_id = offset_block_id % ngpu;
magma_int_t block_offset = offset % NB_X;
magma_int_t blocks = magma_ceildiv( n + block_offset, NB_X );
magma_int_t ldwmin = ldda*(blocks + 1);
magma_int_t lhwmin = n*ngpu;
/*
* Test the input parameters.
*/
magma_int_t info = 0;
if ( (! upper) && (uplo != MagmaLower) ) {
info = -1;
} else if ( n < 0 ) {
info = -2;
} else if ( ldda < max(1,n+offset) ) {
info = -5;
} else if ( offset < 0 ) {
info = -6;
} else if ( incx == 0 ) {
info = -8;
} else if ( incy == 0 ) {
info = -11;
} else if ( lhwork < lhwmin ) {
info = -13;
} else if ( ldwork < ldwmin ) {
info = -15;
} else if ( ngpu < 1 ) {
info = -16;
} else if ( nb != NB_X ) {
info = -17;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( n == 0 )
return info;
magma_device_t orig_dev;
magma_getdevice( &orig_dev );
magma_int_t dev;
for (dev=0; dev < ngpu; dev++) {
magma_setdevice( dev );
// blocks before the offset block
magma_int_t num_blocks_skipped = offset_block_id / ngpu;
if ( dev < offset_gpu_id ) {
num_blocks_skipped += 1;
}
// shift dA to first block >= offset block that is owned by this GPU
double const *dA_dev = d_lA[dev] + offset_block_id*NB_X + num_blocks_skipped*NB_X*ldda;
// first column of dwork is to broadcast x to all GPUs.
// remaining blocks number of columns is for partial sums from
// each block, as in single GPU version.
double *dx_dev = dwork[dev];
double *dwork_dev = dwork[dev] + ldda;
// renumber GPUs starting from the offset block
magma_int_t new_gpu_id = (dev + ngpu - offset_gpu_id) % ngpu;
dim3 grid( blocks, 1 );
// copy x to each GPU
magma_dsetvector_async( n, x, incx, dx_dev + block_offset, 1, queues[dev] );
// perform work = A*x, partial row sums
dim3 threads( NB_X, NB_Y );
// perform w = sum( work ), larger partial row sums
dim3 threads_sum( NB_X, 1 );
if ( upper ) {
dsymv_kernel_U_mgpu<<< grid, threads, 0, queues[dev] >>>(
n, dA_dev, ldda, dx_dev, 1, dwork_dev,
new_gpu_id, ngpu, block_offset );
dsymv_kernel_U_mgpu_sum<<< grid, threads_sum, 0, queues[dev] >>>(
n, alpha, ldda, dx_dev, 1, dwork_dev,
new_gpu_id, ngpu, block_offset );
}
else {
dsymv_kernel_L_mgpu<<< grid, threads, 0, queues[dev] >>>(
n, dA_dev, ldda, dx_dev, 1, dwork_dev,
new_gpu_id, ngpu, block_offset );
dsymv_kernel_L_mgpu_sum<<< grid, threads_sum, 0, queues[dev] >>>(
n, alpha, ldda, dx_dev, 1, dwork_dev,
new_gpu_id, ngpu, block_offset );
}
}
// 2nd loop in case hwork is not pinned, causing this to be sync instead of async.
for (dev=0; dev < ngpu; dev++) {
// copy w to CPU
magma_setdevice( dev );
double *dx_dev = dwork[dev];
magma_dgetvector_async( n, dx_dev + block_offset, 1, &hwork[dev*n], 1, queues[dev] );
}
// see magmablas_dsymv_mgpu_sync for final row sums
magma_setdevice( orig_dev );
return info;
}
/**
Synchronizes and acculumates final dsymv result.
For convenience, the parameters are identical to magmablas_dsymv_mgpu
(though some are unused here).
@see magmablas_dsymv_mgpu
@ingroup magma_dblas2
********************************************************************/
extern "C" magma_int_t
magmablas_dsymv_mgpu_sync(
magma_uplo_t uplo, // unused, see magmablas_dsymv_mgpu
magma_int_t n,
double alpha, // unused
magmaDouble_const_ptr const d_lA[], magma_int_t ldda,
magma_int_t offset, // unused
double const *x, magma_int_t incx, // unused
double beta,
double *y, magma_int_t incy, // unused
double *hwork, magma_int_t lhwork,
magmaDouble_ptr dwork[], magma_int_t ldwork, // unused
magma_int_t ngpu,
magma_int_t nb, // unused
magma_queue_t queues[] )
{
const double c_one = MAGMA_D_ONE;
const magma_int_t ione = 1;
magma_device_t dev;
magma_int_t lhwmin = n*ngpu;
/*
* Test the input parameters.
*/
magma_int_t info = 0;
//if ( (! upper) && (uplo != MagmaLower) ) { // unused
// info = -1;
//} else
if ( n < 0 ) {
info = -2;
} else if ( ldda < max(1,n+offset) ) {
info = -5;
} else if ( offset < 0 ) {
info = -6;
} else if ( incx == 0 ) {
info = -8;
} else if ( incy == 0 ) {
info = -11;
} else if ( lhwork < lhwmin ) {
info = -13;
//} else if ( ldwork < ldwmin ) { // unused
// info = -15;
} else if ( ngpu < 1 ) {
info = -16;
} else if ( nb != NB_X ) {
info = -17;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( n == 0 )
return info;
magma_device_t orig_dev;
magma_getdevice( &orig_dev );
// scale y = beta*y
blasf77_dscal( &n, &beta, y, &incy );
// sum reduce, y += sum( hwork )
for (dev=0; dev < ngpu; ++dev) {
magma_setdevice( dev );
magma_queue_sync( queues[dev] );
blasf77_daxpy( &n, &c_one, &hwork[dev*n], &ione, y, &ione );
}
magma_setdevice( orig_dev );
return info;
}
|
87f936f4e23755135f4e549e2373aa359bac5b00.hip
|
// !!! This is a file automatically generated by hipify!!!
// nvcc 001 isamax .c -lcublas
#include <iostream>
#include <hip/hip_runtime.h>
#include <cuda_device_runtime_api.h>
#include "rocblas.h"
#include "hiprand/hiprand.h"
#include "hip/hip_fp16.h"
#include <time.h>
#include <hip/hip_runtime.h>
#include "device_launch_parameters.h"
#include <hip/hip_runtime_api.h>
#include <ctime>
#include "common.h"
using namespace std;
__global__ void convertFp32ToFp16 (half *out, float *in, int n) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < n) {
out[idx] = (in[idx]);
}
}
__global__ void convertFp16ToFp32 (float *out, half *in, int n) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < n) {
out[idx] = (in[idx]);
}
}
/*
__global__ void convertFp32ToFp16 (__half *out, float *in, int rows, int cols) {
for(int i = 0; i < rows; i++){
for(int j = 0; j < cols; j++){
out[i * cols + j] = __float2half(in[i * cols + j]);
}
}
}
*/
void print_matrix(float *A, int nr_rows_A, int nr_cols_A) {
for(int i = 0; i < nr_rows_A; i++){
for(int j = 0; j < nr_cols_A; j++){
std::cout << A[i * nr_cols_A + j] << " ";
}
std::cout << std::endl;
}
std::cout << std::endl;
}
// Fill the array with random numbers on GPU
void GPU_fill_rand(float *A, int nr_rows_A, int nr_cols_A) {
// Create a pseudo-random number generator
hiprandGenerator_t prng;
hiprandCreateGenerator(&prng, HIPRAND_RNG_PSEUDO_DEFAULT);
// Set the seed for the random number generator using the system clock
hiprandSetPseudoRandomGeneratorSeed(prng, (unsigned long long) clock());
// Fill the array with random numbers on the device
hiprandGenerateUniform(prng, A, nr_rows_A * nr_cols_A);
}
void gpu_blas_mmul(__half *A, __half *B, __half *C, int m, int k, int n) {
const float alf = 1.0f;
const float bet = 0.0f;
const float *alpha = &alf;
const float *beta = &bet;
// Create a handle for CUBLAS
hipblasHandle_t handle;
hipblasStatus_t cublasStat = hipblasCreate(&handle);
// Set the math mode to allow cuBLAS to use Tensor Cores:
cublasStat = cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH);
//cublasStat = cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH);
//n maps to the output dimension.
//m is the batch size * seq length.
//k maps to the input dimension.
//leading dimension of B will be cols in B(host) and it will be accessed as T.
//leading dimension of A will be cols in A(host) and it will be accessed as N.
//Leading dimension of C will be cols in C(host) and it will be accesses as N.
//A is m * k in host k * m in device.
//B is n * K in host k * n in device.
//C is m * n in host n * m in device.
//m will be rows A, C.
//k will be cols A, B.
//n will be rows B, cols in C.
int lda = k, ldb = k, ldc = n;
int niter = 10000;
for(int i = 0; i < niter; i++){
// Do the actual multiplication
// cublasStat = hipblasGemmEx(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, n, m, k, alpha, B, HIP_R_16F, ldb, A, HIP_R_16F, lda, beta, C, HIP_R_16F, ldc, HIP_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP);
check_cuda_error(hipblasGemmEx(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, n, m, k, alpha, B, HIP_R_16F, ldb, A, HIP_R_16F, lda, beta, C, HIP_R_16F, ldc, HIP_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP));
//cout<<cublasStat<<endl;
//cublasStat = hipblasGemmEx(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, 1024, 1536, 4096, alpha, B, HIP_R_16F, 1024, A, HIP_R_16F, 4096, beta, C,HIP_R_16F,1024 ,HIP_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP);
}
/*
for(int i = 0; i < 20; i++){
hipblasGemmStridedBatchedEx(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, m, n, k, alpha, A, HIP_R_16F, lda, 384 * 384, B, HIP_R_16F, ldb, 384 * 64, beta, C, HIP_R_16F, ldc, 384 * 64, 4, HIP_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP );
}
*/
// Destroy the handle
hipblasDestroy(handle);
}
int main() {
int nr_rows_A, nr_cols_A, nr_rows_B, nr_cols_B, nr_rows_C, nr_cols_C;
//A is for the Activations. has dimensions m * k, where m is (seq length * batchsize), k is no of inputs to the layer.
//B is for the weights. stored as B' at host. has dimensions n * k. n is the number of outputs k is the not of inputs to the layer.
//C is the output matrix. has dimensions m * n.
//Matmul will be A B'.
//set dims according to operation c = a * b'
nr_rows_A = 4096;
nr_cols_A = 4096;
nr_rows_B = 4096;
nr_cols_B = 4096;
nr_rows_C = 4096;
nr_cols_C = 4096;
// Allocate 6 arrays on GPU.
// array on device of type half.
// float because hiprand generates only fp32 numbers.
// __half arrays for fp16 numbers.
float *df_A, *df_B, *df_C;
__half *d_A, *d_B, *d_C;
check_cuda_error(hipMalloc(&d_A,nr_rows_A * nr_cols_A * sizeof(__half)));
check_cuda_error(hipMalloc(&df_A,nr_rows_A * nr_cols_A * sizeof(float)));
GPU_fill_rand(df_A, nr_rows_A, nr_cols_A);
hipLaunchKernelGGL(( convertFp32ToFp16) , dim3((nr_rows_A * nr_cols_A+ 255) / 256), dim3(256) , 0, 0, d_A, df_A, nr_rows_A * nr_cols_A);
check_cuda_error(hipMalloc(&d_B,nr_rows_B * nr_cols_B * sizeof(__half)));
check_cuda_error(hipMalloc(&df_B,nr_rows_B * nr_cols_B * sizeof(float)));
GPU_fill_rand(df_B, nr_rows_B, nr_cols_B);
hipLaunchKernelGGL(( convertFp32ToFp16) , dim3((nr_rows_B * nr_cols_B + 255) / 256), dim3(256) , 0, 0, d_B, df_B, nr_rows_B * nr_cols_B);
check_cuda_error(hipMalloc(&d_C,nr_rows_C * nr_cols_C * sizeof(__half)));
check_cuda_error(hipMalloc(&df_C,nr_rows_C * nr_cols_C * sizeof(float)));
//m will be rows a.
//k will be cols a.
//n will be rows b.
//call the matmul function.
gpu_blas_mmul(d_A, d_B, d_C, nr_rows_A, nr_cols_A, nr_rows_B);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
hipFree(df_A);
hipFree(df_B);
hipFree(df_C);
return 0;
}
|
87f936f4e23755135f4e549e2373aa359bac5b00.cu
|
// nvcc 001 isamax .c -lcublas
#include <iostream>
#include <cuda_runtime.h>
#include <cuda_device_runtime_api.h>
#include "cublas_v2.h"
#include "curand.h"
#include "cuda_fp16.h"
#include <time.h>
#include <cuda.h>
#include "device_launch_parameters.h"
#include <cuda_profiler_api.h>
#include <ctime>
#include "common.h"
using namespace std;
__global__ void convertFp32ToFp16 (half *out, float *in, int n) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < n) {
out[idx] = (in[idx]);
}
}
__global__ void convertFp16ToFp32 (float *out, half *in, int n) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < n) {
out[idx] = (in[idx]);
}
}
/*
__global__ void convertFp32ToFp16 (__half *out, float *in, int rows, int cols) {
for(int i = 0; i < rows; i++){
for(int j = 0; j < cols; j++){
out[i * cols + j] = __float2half(in[i * cols + j]);
}
}
}
*/
void print_matrix(float *A, int nr_rows_A, int nr_cols_A) {
for(int i = 0; i < nr_rows_A; i++){
for(int j = 0; j < nr_cols_A; j++){
std::cout << A[i * nr_cols_A + j] << " ";
}
std::cout << std::endl;
}
std::cout << std::endl;
}
// Fill the array with random numbers on GPU
void GPU_fill_rand(float *A, int nr_rows_A, int nr_cols_A) {
// Create a pseudo-random number generator
curandGenerator_t prng;
curandCreateGenerator(&prng, CURAND_RNG_PSEUDO_DEFAULT);
// Set the seed for the random number generator using the system clock
curandSetPseudoRandomGeneratorSeed(prng, (unsigned long long) clock());
// Fill the array with random numbers on the device
curandGenerateUniform(prng, A, nr_rows_A * nr_cols_A);
}
void gpu_blas_mmul(__half *A, __half *B, __half *C, int m, int k, int n) {
const float alf = 1.0f;
const float bet = 0.0f;
const float *alpha = &alf;
const float *beta = &bet;
// Create a handle for CUBLAS
cublasHandle_t handle;
cublasStatus_t cublasStat = cublasCreate(&handle);
// Set the math mode to allow cuBLAS to use Tensor Cores:
cublasStat = cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH);
//cublasStat = cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH);
//n maps to the output dimension.
//m is the batch size * seq length.
//k maps to the input dimension.
//leading dimension of B will be cols in B(host) and it will be accessed as T.
//leading dimension of A will be cols in A(host) and it will be accessed as N.
//Leading dimension of C will be cols in C(host) and it will be accesses as N.
//A is m * k in host k * m in device.
//B is n * K in host k * n in device.
//C is m * n in host n * m in device.
//m will be rows A, C.
//k will be cols A, B.
//n will be rows B, cols in C.
int lda = k, ldb = k, ldc = n;
int niter = 10000;
for(int i = 0; i < niter; i++){
// Do the actual multiplication
// cublasStat = cublasGemmEx(handle, CUBLAS_OP_T, CUBLAS_OP_N, n, m, k, alpha, B, CUDA_R_16F, ldb, A, CUDA_R_16F, lda, beta, C, CUDA_R_16F, ldc, CUDA_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP);
check_cuda_error(cublasGemmEx(handle, CUBLAS_OP_T, CUBLAS_OP_N, n, m, k, alpha, B, CUDA_R_16F, ldb, A, CUDA_R_16F, lda, beta, C, CUDA_R_16F, ldc, CUDA_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP));
//cout<<cublasStat<<endl;
//cublasStat = cublasGemmEx(handle, CUBLAS_OP_T, CUBLAS_OP_N, 1024, 1536, 4096, alpha, B, CUDA_R_16F, 1024, A, CUDA_R_16F, 4096, beta, C,CUDA_R_16F,1024 ,CUDA_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP);
}
/*
for(int i = 0; i < 20; i++){
cublasGemmStridedBatchedEx(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, alpha, A, CUDA_R_16F, lda, 384 * 384, B, CUDA_R_16F, ldb, 384 * 64, beta, C, CUDA_R_16F, ldc, 384 * 64, 4, CUDA_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP );
}
*/
// Destroy the handle
cublasDestroy(handle);
}
int main() {
int nr_rows_A, nr_cols_A, nr_rows_B, nr_cols_B, nr_rows_C, nr_cols_C;
//A is for the Activations. has dimensions m * k, where m is (seq length * batchsize), k is no of inputs to the layer.
//B is for the weights. stored as B' at host. has dimensions n * k. n is the number of outputs k is the not of inputs to the layer.
//C is the output matrix. has dimensions m * n.
//Matmul will be A B'.
//set dims according to operation c = a * b'
nr_rows_A = 4096;
nr_cols_A = 4096;
nr_rows_B = 4096;
nr_cols_B = 4096;
nr_rows_C = 4096;
nr_cols_C = 4096;
// Allocate 6 arrays on GPU.
// array on device of type half.
// float because curand generates only fp32 numbers.
// __half arrays for fp16 numbers.
float *df_A, *df_B, *df_C;
__half *d_A, *d_B, *d_C;
check_cuda_error(cudaMalloc(&d_A,nr_rows_A * nr_cols_A * sizeof(__half)));
check_cuda_error(cudaMalloc(&df_A,nr_rows_A * nr_cols_A * sizeof(float)));
GPU_fill_rand(df_A, nr_rows_A, nr_cols_A);
convertFp32ToFp16 <<< (nr_rows_A * nr_cols_A+ 255) / 256, 256 >>> (d_A, df_A, nr_rows_A * nr_cols_A);
check_cuda_error(cudaMalloc(&d_B,nr_rows_B * nr_cols_B * sizeof(__half)));
check_cuda_error(cudaMalloc(&df_B,nr_rows_B * nr_cols_B * sizeof(float)));
GPU_fill_rand(df_B, nr_rows_B, nr_cols_B);
convertFp32ToFp16 <<< (nr_rows_B * nr_cols_B + 255) / 256, 256 >>> (d_B, df_B, nr_rows_B * nr_cols_B);
check_cuda_error(cudaMalloc(&d_C,nr_rows_C * nr_cols_C * sizeof(__half)));
check_cuda_error(cudaMalloc(&df_C,nr_rows_C * nr_cols_C * sizeof(float)));
//m will be rows a.
//k will be cols a.
//n will be rows b.
//call the matmul function.
gpu_blas_mmul(d_A, d_B, d_C, nr_rows_A, nr_cols_A, nr_rows_B);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
cudaFree(df_A);
cudaFree(df_B);
cudaFree(df_C);
return 0;
}
|
4b15af94ba7dc8ca8b574fad5a890283df6e4bef.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* a simple test
*/
__shared__ float data1[32];
__shared__ float data2[32];
__shared__ float data3[32];
__device__ void mult(__shared__ float d1[32],
__shared__ float d2[32],
__shared__ float d3[32],
int idx)
{
int i;
int j, k, l;
j = -1;
k = 0;
l = 1;
for (i = 0; i < 1024; i++) {
j++;
k++;
l++;
d1[i+j+k] = 1.0;
}
}
__global__ void doit(int start, int end) {
int i = 99;
mult(data1, data2, data3, i);
}
|
4b15af94ba7dc8ca8b574fad5a890283df6e4bef.cu
|
/*
* a simple test
*/
__shared__ float data1[32];
__shared__ float data2[32];
__shared__ float data3[32];
__device__ void mult(__shared__ float d1[32],
__shared__ float d2[32],
__shared__ float d3[32],
int idx)
{
int i;
int j, k, l;
j = -1;
k = 0;
l = 1;
for (i = 0; i < 1024; i++) {
j++;
k++;
l++;
d1[i+j+k] = 1.0;
}
}
__global__ void doit(int start, int end) {
int i = 99;
mult(data1, data2, data3, i);
}
|
b443190581e25f43c97a4431ca375bd093f37462.hip
|
// !!! This is a file automatically generated by hipify!!!
// CUDA Device Query
#include <stdio.h>
// Print device properties
void printDevProp(hipDeviceProp_t devProp)
{
printf("Major revision number: %d\n", devProp.major);
printf("Minor revision number: %d\n", devProp.minor);
printf("Name: %s\n", devProp.name);
printf("Total global memory: %u\n", devProp.totalGlobalMem);
printf("Total shared memory per block: %u\n", devProp.sharedMemPerBlock);
printf("Total registers per block: %d\n", devProp.regsPerBlock);
printf("Warp size: %d\n", devProp.warpSize);
printf("Maximum memory pitch: %u\n", devProp.memPitch);
printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]);
printf("Clock rate: %d\n", devProp.clockRate);
printf("Total constant memory: %u\n", devProp.totalConstMem);
printf("Texture alignment: %u\n", devProp.textureAlignment);
printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No"));
printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount);
printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No"));
return;
}
int main()
{
// Number of CUDA devices
int devCount;
hipGetDeviceCount(&devCount);
printf("CUDA Device Query...\n");
printf("There are %d CUDA devices.\n", devCount);
// Iterate through devices
for (int i = 0; i < devCount; ++i)
{
// Get device properties
printf("\nCUDA Device #%d\n", i);
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp, i);
printDevProp(devProp);
}
printf("\nPress any key to exit...");
char c;
scanf("%c", &c);
return 0;
}
|
b443190581e25f43c97a4431ca375bd093f37462.cu
|
// CUDA Device Query
#include <stdio.h>
// Print device properties
void printDevProp(cudaDeviceProp devProp)
{
printf("Major revision number: %d\n", devProp.major);
printf("Minor revision number: %d\n", devProp.minor);
printf("Name: %s\n", devProp.name);
printf("Total global memory: %u\n", devProp.totalGlobalMem);
printf("Total shared memory per block: %u\n", devProp.sharedMemPerBlock);
printf("Total registers per block: %d\n", devProp.regsPerBlock);
printf("Warp size: %d\n", devProp.warpSize);
printf("Maximum memory pitch: %u\n", devProp.memPitch);
printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]);
printf("Clock rate: %d\n", devProp.clockRate);
printf("Total constant memory: %u\n", devProp.totalConstMem);
printf("Texture alignment: %u\n", devProp.textureAlignment);
printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No"));
printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount);
printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No"));
return;
}
int main()
{
// Number of CUDA devices
int devCount;
cudaGetDeviceCount(&devCount);
printf("CUDA Device Query...\n");
printf("There are %d CUDA devices.\n", devCount);
// Iterate through devices
for (int i = 0; i < devCount; ++i)
{
// Get device properties
printf("\nCUDA Device #%d\n", i);
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, i);
printDevProp(devProp);
}
printf("\nPress any key to exit...");
char c;
scanf("%c", &c);
return 0;
}
|
0634e9012c1956e612a8c00487361442eee73e0f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <THH/THHAtomics.cuh>
#include <THH/THHNumerics.cuh>
namespace at {
namespace cuda {
#define THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM 100
#define THRESH_NUMBER_BINS_FOR_GLOBAL_MEM 1000
#define FOR_KERNEL_LOOP(i, lim) \
for (IndexType i = blockIdx.x * blockDim.x + threadIdx.x; i < lim; \
i += gridDim.x * blockDim.x)
/*
Memory types used for the 3 histogram implementations.
See `CUDA_tensor_histogram` below.
*/
enum class CUDAHistogramMemoryType { SHARED, MULTI_BLOCK, GLOBAL };
namespace {
template<typename input_t, typename IndexType>
__device__ static IndexType getBin(input_t bVal, input_t minvalue, input_t maxvalue, int64_t nbins) {
IndexType bin = (int)((bVal - minvalue) * nbins / (maxvalue - minvalue));
// (only applicable for histc)
// while each bin is inclusive at the lower end and exclusive at the higher, i.e. [start, end)
// the last bin is inclusive at both, i.e. [start, end], in order to include maxvalue if exists
// therefore when bin == nbins, adjust bin to the last bin
if (bin == nbins) bin -= 1;
return bin;
}
}
/*
Kernel for computing the histogram of the input.
*/
template <
typename output_t,
typename input_t,
typename IndexType,
int ADims,
int PDims,
int BDims,
CUDAHistogramMemoryType MemoryType = CUDAHistogramMemoryType::MULTI_BLOCK,
typename Op>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(512)
#endif
__global__ void kernelHistogram1D(
detail::TensorInfo<output_t, IndexType> a, /* output */
detail::TensorInfo<output_t, IndexType> p, /* partial output */
detail::TensorInfo<input_t, IndexType> b, /* input */
int64_t nbins,
input_t minvalue,
input_t maxvalue,
IndexType totalElements,
Op getOp) {
extern __shared__ unsigned char my_smem[];
output_t* smem = nullptr;
if (MemoryType == CUDAHistogramMemoryType::SHARED) {
////////////////////////// Shared memory //////////////////////////
// atomically add to block specific shared memory
// then atomically add to the global output tensor
smem = reinterpret_cast<output_t*>(my_smem);
for (IndexType i = threadIdx.x; i < a.sizes[0]; i += blockDim.x) {
smem[i] = 0;
}
__syncthreads();
FOR_KERNEL_LOOP(linearIndex, totalElements) {
// Convert `linearIndex` into an offset of `b`
const IndexType bOffset =
detail::IndexToOffset<input_t, IndexType, BDims>::get(linearIndex, b);
const auto bVal = b.data[bOffset];
if (bVal >= minvalue && bVal <= maxvalue) {
// Use value at `b` as an offset of `smem`
const IndexType bin = getBin<input_t, IndexType>(bVal, minvalue, maxvalue, nbins);
gpuAtomicAdd(&smem[bin], getOp(linearIndex));
}
}
__syncthreads();
// NOTE: atomically update output bin count.
// Atomic update is imp since __syncthread() will only synchronize threads
// in a given block, not across blocks.
for (IndexType i = threadIdx.x; i < a.sizes[0]; i += blockDim.x) {
const IndexType aOffset =
detail::IndexToOffset<output_t, IndexType, ADims>::get(i, a);
gpuAtomicAdd(&a.data[aOffset], smem[i]);
}
} else if (MemoryType == CUDAHistogramMemoryType::MULTI_BLOCK) {
////////////////////////// Multi Block memory //////////////////////////
// atomically add to block specific global tensor
// then atomically add to the global output tensor
// compute histogram for the block
FOR_KERNEL_LOOP(linearIndex, totalElements) {
// Convert `linearIndex` into an offset of `b`
const IndexType bOffset =
detail::IndexToOffset<input_t, IndexType, BDims>::get(linearIndex, b);
const auto bVal = b.data[bOffset];
if (bVal >= minvalue && bVal <= maxvalue) {
// Use value at `b` as an offset of `p`
const IndexType bin = getBin<input_t, IndexType>(bVal, minvalue, maxvalue, nbins);
const IndexType pIdx = p.strides[0] * blockIdx.x + bin;
const IndexType pOffset =
detail::IndexToOffset<output_t, IndexType, PDims>::get(pIdx, p);
gpuAtomicAdd(&p.data[pOffset], getOp(linearIndex));
}
}
__syncthreads();
// NOTE: atomically update output bin count.
// Atomic update is imp since __syncthread() will only synchronize threads
// in a given block, not across blocks.
const IndexType pIdx = p.strides[0] * blockIdx.x;
const IndexType pOffset =
detail::IndexToOffset<output_t, IndexType, PDims>::get(pIdx, p);
for (IndexType i = threadIdx.x; i < a.sizes[0]; i += blockDim.x) {
const IndexType aOffset =
detail::IndexToOffset<output_t, IndexType, ADims>::get(i, a);
gpuAtomicAdd(&a.data[aOffset], p.data[pOffset + i]);
}
} else {
////////////////////////// Global memory //////////////////////////
// atomically add to the output tensor
// compute histogram for the block
FOR_KERNEL_LOOP(linearIndex, totalElements) {
// Convert `linearIndex` into an offset of `b`
const IndexType bOffset =
detail::IndexToOffset<input_t, IndexType, BDims>::get(linearIndex, b);
const auto bVal = b.data[bOffset];
if (bVal >= minvalue && bVal <= maxvalue) {
// Use value at `b` as an offset of `a`
const IndexType bin = getBin<input_t, IndexType>(bVal, minvalue, maxvalue, nbins);
const IndexType aOffset =
detail::IndexToOffset<output_t, IndexType, ADims>::get(bin, a);
gpuAtomicAdd(&a.data[aOffset], getOp(linearIndex));
}
}
}
}
#define HANDLE_CASE(MEMORY_TYPE, WEIGHTS_OP, SHARED_MEM) \
hipLaunchKernelGGL(( kernelHistogram1D<output_t, input_t, IndexType, 1, 2, -1, MEMORY_TYPE>) \
, dim3(grid), \
block, \
SHARED_MEM, \
getCurrentHIPStreamMasqueradingAsCUDA(), \
aInfo, pInfo, bInfo, nbins, minvalue, maxvalue, totalElements, WEIGHTS_OP); \
C10_HIP_KERNEL_LAUNCH_CHECK();
#define HANDLE_SWITCH_CASE(mType, getOp) \
switch (mType) { \
case CUDAHistogramMemoryType::SHARED: \
HANDLE_CASE(CUDAHistogramMemoryType::SHARED, getOp, sharedMem); \
break; \
case CUDAHistogramMemoryType::MULTI_BLOCK: \
HANDLE_CASE(CUDAHistogramMemoryType::MULTI_BLOCK, getOp, 0); \
break; \
default: \
HANDLE_CASE(CUDAHistogramMemoryType::GLOBAL, getOp, 0); \
}
inline int64_t getFreeGlobalMemory() {
// no need to use `hipSetDevice`
size_t free_mem, total_mem;
hipMemGetInfo(&free_mem, &total_mem);
TORCH_INTERNAL_ASSERT(
hipGetLastError() == hipSuccess,
"CUDA_tensor_histogram failed to get free global memory");
return static_cast<int64_t>(free_mem);
}
/*
Calculate the frequency of the input values.
`a` contains the final output or the histogram.
Input `b` is assumed to be 1-D non-negative int array.
`c` optionally contains the weight vector.
See `help torch.bincount` for details on the math.
3 implementations based of input size and memory usage:
case: #bins < THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM and enough shared mem
SHARED: Each block atomically adds to it's own **shared** hist copy,
then atomically updates the global tensor.
case: #bins < THRESH_NUMBER_BINS_FOR_GLOBAL_MEM and enough global mem
MULTI_BLOCK: Each block atomically adds to it's own **global** hist
copy, then atomically updates the global tensor.
case: THRESH_NUMBER_BINS_FOR_GLOBAL_MEM <= #bins
GLOBAL: all threads atomically update to a single **global** hist copy.
*/
template <typename output_t, typename input_t, bool HasWeights>
bool CUDA_tensor_histogram(
at::Tensor a, /* output */
at::Tensor b, /* input */
at::Tensor c, /* weights(optional) */
int64_t nbins,
input_t minvalue,
input_t maxvalue,
TensorArgType aType = TensorArgType::ReadWrite,
TensorArgType bType = TensorArgType::ReadOnly,
TensorArgType cType = TensorArgType::ReadOnly) {
checkBackend("CUDA_tensor_histogram", {a, b}, Backend::CUDA);
if (HasWeights) {
checkBackend("CUDA_tensor_histogram", {c}, Backend::CUDA);
}
auto totalElements = b.numel();
if (totalElements == 0) {
return false;
}
const dim3 block = getApplyBlock();
dim3 grid;
int64_t curDevice = current_device();
if (curDevice == -1 || !getApplyGrid(totalElements, grid, curDevice)) {
return false;
}
CUDAHistogramMemoryType memType = CUDAHistogramMemoryType::GLOBAL;
auto maxSharedMem = getCurrentDeviceProperties()->sharedMemPerBlock;
auto sharedMem = nbins * sizeof(output_t) + 8; // 8 guard bytes
auto maxGlobalMem = getFreeGlobalMemory();
auto multiBlockMem = nbins * grid.x * sizeof(output_t) + 8; // 8 guard bytes
// determine memory type to use in the kernel
if (nbins < THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM &&
sharedMem < maxSharedMem) {
memType = CUDAHistogramMemoryType::SHARED;
} else if (
nbins < THRESH_NUMBER_BINS_FOR_GLOBAL_MEM &&
multiBlockMem < (maxGlobalMem / 2)) {
// check against half of free mem to be extra safe
// due to cached allocator, we may anyway have slightly more free mem
memType = CUDAHistogramMemoryType::MULTI_BLOCK;
}
// alloc memory for MULTI_BLOCK
using IndexType = int64_t;
auto aInfo = detail::getTensorInfo<output_t, IndexType>(a);
auto bInfo = detail::getTensorInfo<input_t, IndexType>(b);
detail::TensorInfo<output_t, IndexType> pInfo(nullptr, 0, {}, {});
Tensor partial_output;
if (memType == CUDAHistogramMemoryType::MULTI_BLOCK) {
partial_output = native::zeros(
{grid.x, nbins},
optTypeMetaToScalarType(a.options().dtype_opt()),
a.options().layout_opt(),
a.options().device_opt(),
a.options().pinned_memory_opt());
pInfo = detail::getTensorInfo<output_t, IndexType>(partial_output);
}
if (HasWeights) {
auto cInfo = detail::getTensorInfo<output_t, IndexType>(c);
const auto getWeightsOp = [cInfo] __device__(IndexType cIndex) {
const IndexType cOffset =
detail::IndexToOffset<output_t, IndexType, 1>::get(cIndex, cInfo);
return cInfo.data[cOffset];
};
HANDLE_SWITCH_CASE(memType, getWeightsOp)
} else {
static const auto getDummyOp = [] __device__(IndexType) { return 1L; };
HANDLE_SWITCH_CASE(memType, getDummyOp)
}
return true;
}
#undef HANDLE_CASE
#undef HANDLE_SWITCH_CASE
#undef FOR_KERNEL_LOOP
#undef THRESH_NUMBER_BINS_FOR_GLOBAL_MEM
#undef THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM
} // namespace cuda
namespace {
///////////////// bincount /////////////////
template <typename input_t, typename weights_t>
Tensor _bincount_cuda_template(
const Tensor& self,
const Tensor& weights,
int64_t minlength) {
if (minlength < 0) {
AT_ERROR("minlength should be >= 0");
}
if (self.dim() == 1 && self.numel() == 0) {
return native::zeros(
{minlength},
kLong,
c10::nullopt /* layout */,
kCUDA,
c10::nullopt /* pin_memory */);
}
if (self.dim() != 1 ||
(!std::is_same<input_t, uint8_t>::value &&
*self.min().cpu().data_ptr<input_t>() < 0)) {
AT_ERROR("bincount only supports 1-d non-negative integral inputs.");
}
bool has_weights = weights.defined();
if (has_weights && weights.size(0) != self.size(0)) {
AT_ERROR("input and weights should have the same length");
}
const int64_t nbins = ::max(*self.max().cpu().data_ptr<input_t>() + (int64_t)1, minlength);
const input_t minvalue = 0;
const input_t maxvalue = nbins;
// alloc output counter on GPU
Tensor output;
if (has_weights) {
output = native::zeros(
{nbins},
optTypeMetaToScalarType(weights.options().dtype_opt()),
weights.options().layout_opt(),
weights.options().device_opt(),
weights.options().pinned_memory_opt());
auto ret = cuda::CUDA_tensor_histogram<weights_t, input_t, true>(
output, self, weights, nbins, minvalue, maxvalue);
} else {
output = native::zeros(
{nbins},
kLong,
c10::nullopt /* layout */,
DeviceType::CUDA,
c10::nullopt /* pin_memory */);
auto ret = cuda::CUDA_tensor_histogram<int64_t, input_t, false>(
output, self, weights, nbins, minvalue, maxvalue);
}
return output;
}
///////////////// histc /////////////////
template <typename input_t>
Tensor _histc_cuda_template(
const Tensor& self,
int64_t nbins,
input_t min,
input_t max) {
if (nbins <= 0) {
AT_ERROR("bins must be > 0");
}
Tensor output = native::zeros(
{nbins},
self.scalar_type(),
c10::nullopt /* layout */,
DeviceType::CUDA,
c10::nullopt /* pin_memory */);
input_t minvalue = min;
input_t maxvalue = max;
if (min == max) {
minvalue = *self.min().cpu().data_ptr<input_t>();
maxvalue = *self.max().cpu().data_ptr<input_t>();
}
if (minvalue == maxvalue) {
minvalue = minvalue - 1;
maxvalue = maxvalue + 1;
}
#ifndef __HIP_PLATFORM_HCC__
TORCH_CHECK(
!(THCNumerics<input_t>::isinf(minvalue) ||
THCNumerics<input_t>::isinf(maxvalue) ||
THCNumerics<input_t>::isnan(minvalue) ||
THCNumerics<input_t>::isnan(maxvalue)),
"range of [",
minvalue,
", ",
maxvalue,
"] is not finite");
#else
TORCH_CHECK(
!(std::isinf(minvalue) || std::isinf(maxvalue) || std::isnan(minvalue) ||
std::isnan(maxvalue)),
"range of [",
minvalue,
", ",
maxvalue,
"] is not finite");
#endif
TORCH_CHECK(minvalue < maxvalue, "max must be larger than min");
auto ret = cuda::CUDA_tensor_histogram<input_t, input_t, false>(
output, self, Tensor(), nbins, minvalue, maxvalue);
return output;
}
} // namespace
namespace native {
Tensor _bincount_cuda(
const Tensor& self, const c10::optional<Tensor>& weights_opt,
int64_t minlength) {
// See [Note: hacky wrapper removal for optional tensor]
const Tensor& weights = c10::value_or_else(weights_opt, [] {return Tensor();});
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("_bincount_cuda");
return AT_DISPATCH_INTEGRAL_TYPES(self.scalar_type(), "bincount_cuda", [&] {
const auto scalar = weights.scalar_type();
if (scalar == ScalarType::Undefined || scalar == ScalarType::Float)
return _bincount_cuda_template<scalar_t, float>(self, weights, minlength);
return _bincount_cuda_template<scalar_t, double>(
self, weights.to(kDouble), minlength);
});
}
Tensor _histc_cuda(
const Tensor& self,
int64_t nbins,
const Scalar& min,
const Scalar& max) {
if (self.scalar_type() == ScalarType::Half) {
AT_ERROR("HalfTensor is not supported");
}
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("_histc_cuda");
return AT_DISPATCH_ALL_TYPES(self.scalar_type(), "histc", [&] {
return _histc_cuda_template<scalar_t>(self, nbins, min.to<scalar_t>(), max.to<scalar_t>());
});
}
Tensor& _histc_out_cuda(const Tensor& self, int64_t bins, const Scalar& min, const Scalar& max, Tensor& result) {
auto ret = _histc_cuda(self, bins, min, max);
result.resize_as_(ret);
result.copy_(ret);
return result;
}
} // namespace native
} // namespace at
|
0634e9012c1956e612a8c00487361442eee73e0f.cu
|
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <THC/THCAtomics.cuh>
#include <THC/THCNumerics.cuh>
namespace at {
namespace cuda {
#define THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM 100
#define THRESH_NUMBER_BINS_FOR_GLOBAL_MEM 1000
#define FOR_KERNEL_LOOP(i, lim) \
for (IndexType i = blockIdx.x * blockDim.x + threadIdx.x; i < lim; \
i += gridDim.x * blockDim.x)
/*
Memory types used for the 3 histogram implementations.
See `CUDA_tensor_histogram` below.
*/
enum class CUDAHistogramMemoryType { SHARED, MULTI_BLOCK, GLOBAL };
namespace {
template<typename input_t, typename IndexType>
__device__ static IndexType getBin(input_t bVal, input_t minvalue, input_t maxvalue, int64_t nbins) {
IndexType bin = (int)((bVal - minvalue) * nbins / (maxvalue - minvalue));
// (only applicable for histc)
// while each bin is inclusive at the lower end and exclusive at the higher, i.e. [start, end)
// the last bin is inclusive at both, i.e. [start, end], in order to include maxvalue if exists
// therefore when bin == nbins, adjust bin to the last bin
if (bin == nbins) bin -= 1;
return bin;
}
}
/*
Kernel for computing the histogram of the input.
*/
template <
typename output_t,
typename input_t,
typename IndexType,
int ADims,
int PDims,
int BDims,
CUDAHistogramMemoryType MemoryType = CUDAHistogramMemoryType::MULTI_BLOCK,
typename Op>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(512)
#endif
__global__ void kernelHistogram1D(
detail::TensorInfo<output_t, IndexType> a, /* output */
detail::TensorInfo<output_t, IndexType> p, /* partial output */
detail::TensorInfo<input_t, IndexType> b, /* input */
int64_t nbins,
input_t minvalue,
input_t maxvalue,
IndexType totalElements,
Op getOp) {
extern __shared__ unsigned char my_smem[];
output_t* smem = nullptr;
if (MemoryType == CUDAHistogramMemoryType::SHARED) {
////////////////////////// Shared memory //////////////////////////
// atomically add to block specific shared memory
// then atomically add to the global output tensor
smem = reinterpret_cast<output_t*>(my_smem);
for (IndexType i = threadIdx.x; i < a.sizes[0]; i += blockDim.x) {
smem[i] = 0;
}
__syncthreads();
FOR_KERNEL_LOOP(linearIndex, totalElements) {
// Convert `linearIndex` into an offset of `b`
const IndexType bOffset =
detail::IndexToOffset<input_t, IndexType, BDims>::get(linearIndex, b);
const auto bVal = b.data[bOffset];
if (bVal >= minvalue && bVal <= maxvalue) {
// Use value at `b` as an offset of `smem`
const IndexType bin = getBin<input_t, IndexType>(bVal, minvalue, maxvalue, nbins);
gpuAtomicAdd(&smem[bin], getOp(linearIndex));
}
}
__syncthreads();
// NOTE: atomically update output bin count.
// Atomic update is imp since __syncthread() will only synchronize threads
// in a given block, not across blocks.
for (IndexType i = threadIdx.x; i < a.sizes[0]; i += blockDim.x) {
const IndexType aOffset =
detail::IndexToOffset<output_t, IndexType, ADims>::get(i, a);
gpuAtomicAdd(&a.data[aOffset], smem[i]);
}
} else if (MemoryType == CUDAHistogramMemoryType::MULTI_BLOCK) {
////////////////////////// Multi Block memory //////////////////////////
// atomically add to block specific global tensor
// then atomically add to the global output tensor
// compute histogram for the block
FOR_KERNEL_LOOP(linearIndex, totalElements) {
// Convert `linearIndex` into an offset of `b`
const IndexType bOffset =
detail::IndexToOffset<input_t, IndexType, BDims>::get(linearIndex, b);
const auto bVal = b.data[bOffset];
if (bVal >= minvalue && bVal <= maxvalue) {
// Use value at `b` as an offset of `p`
const IndexType bin = getBin<input_t, IndexType>(bVal, minvalue, maxvalue, nbins);
const IndexType pIdx = p.strides[0] * blockIdx.x + bin;
const IndexType pOffset =
detail::IndexToOffset<output_t, IndexType, PDims>::get(pIdx, p);
gpuAtomicAdd(&p.data[pOffset], getOp(linearIndex));
}
}
__syncthreads();
// NOTE: atomically update output bin count.
// Atomic update is imp since __syncthread() will only synchronize threads
// in a given block, not across blocks.
const IndexType pIdx = p.strides[0] * blockIdx.x;
const IndexType pOffset =
detail::IndexToOffset<output_t, IndexType, PDims>::get(pIdx, p);
for (IndexType i = threadIdx.x; i < a.sizes[0]; i += blockDim.x) {
const IndexType aOffset =
detail::IndexToOffset<output_t, IndexType, ADims>::get(i, a);
gpuAtomicAdd(&a.data[aOffset], p.data[pOffset + i]);
}
} else {
////////////////////////// Global memory //////////////////////////
// atomically add to the output tensor
// compute histogram for the block
FOR_KERNEL_LOOP(linearIndex, totalElements) {
// Convert `linearIndex` into an offset of `b`
const IndexType bOffset =
detail::IndexToOffset<input_t, IndexType, BDims>::get(linearIndex, b);
const auto bVal = b.data[bOffset];
if (bVal >= minvalue && bVal <= maxvalue) {
// Use value at `b` as an offset of `a`
const IndexType bin = getBin<input_t, IndexType>(bVal, minvalue, maxvalue, nbins);
const IndexType aOffset =
detail::IndexToOffset<output_t, IndexType, ADims>::get(bin, a);
gpuAtomicAdd(&a.data[aOffset], getOp(linearIndex));
}
}
}
}
#define HANDLE_CASE(MEMORY_TYPE, WEIGHTS_OP, SHARED_MEM) \
kernelHistogram1D<output_t, input_t, IndexType, 1, 2, -1, MEMORY_TYPE> \
<<<grid, \
block, \
SHARED_MEM, \
getCurrentCUDAStream()>>>( \
aInfo, pInfo, bInfo, nbins, minvalue, maxvalue, totalElements, WEIGHTS_OP); \
C10_CUDA_KERNEL_LAUNCH_CHECK();
#define HANDLE_SWITCH_CASE(mType, getOp) \
switch (mType) { \
case CUDAHistogramMemoryType::SHARED: \
HANDLE_CASE(CUDAHistogramMemoryType::SHARED, getOp, sharedMem); \
break; \
case CUDAHistogramMemoryType::MULTI_BLOCK: \
HANDLE_CASE(CUDAHistogramMemoryType::MULTI_BLOCK, getOp, 0); \
break; \
default: \
HANDLE_CASE(CUDAHistogramMemoryType::GLOBAL, getOp, 0); \
}
inline int64_t getFreeGlobalMemory() {
// no need to use `cudaSetDevice`
size_t free_mem, total_mem;
cudaMemGetInfo(&free_mem, &total_mem);
TORCH_INTERNAL_ASSERT(
cudaGetLastError() == cudaSuccess,
"CUDA_tensor_histogram failed to get free global memory");
return static_cast<int64_t>(free_mem);
}
/*
Calculate the frequency of the input values.
`a` contains the final output or the histogram.
Input `b` is assumed to be 1-D non-negative int array.
`c` optionally contains the weight vector.
See `help torch.bincount` for details on the math.
3 implementations based of input size and memory usage:
case: #bins < THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM and enough shared mem
SHARED: Each block atomically adds to it's own **shared** hist copy,
then atomically updates the global tensor.
case: #bins < THRESH_NUMBER_BINS_FOR_GLOBAL_MEM and enough global mem
MULTI_BLOCK: Each block atomically adds to it's own **global** hist
copy, then atomically updates the global tensor.
case: THRESH_NUMBER_BINS_FOR_GLOBAL_MEM <= #bins
GLOBAL: all threads atomically update to a single **global** hist copy.
*/
template <typename output_t, typename input_t, bool HasWeights>
bool CUDA_tensor_histogram(
at::Tensor a, /* output */
at::Tensor b, /* input */
at::Tensor c, /* weights(optional) */
int64_t nbins,
input_t minvalue,
input_t maxvalue,
TensorArgType aType = TensorArgType::ReadWrite,
TensorArgType bType = TensorArgType::ReadOnly,
TensorArgType cType = TensorArgType::ReadOnly) {
checkBackend("CUDA_tensor_histogram", {a, b}, Backend::CUDA);
if (HasWeights) {
checkBackend("CUDA_tensor_histogram", {c}, Backend::CUDA);
}
auto totalElements = b.numel();
if (totalElements == 0) {
return false;
}
const dim3 block = getApplyBlock();
dim3 grid;
int64_t curDevice = current_device();
if (curDevice == -1 || !getApplyGrid(totalElements, grid, curDevice)) {
return false;
}
CUDAHistogramMemoryType memType = CUDAHistogramMemoryType::GLOBAL;
auto maxSharedMem = getCurrentDeviceProperties()->sharedMemPerBlock;
auto sharedMem = nbins * sizeof(output_t) + 8; // 8 guard bytes
auto maxGlobalMem = getFreeGlobalMemory();
auto multiBlockMem = nbins * grid.x * sizeof(output_t) + 8; // 8 guard bytes
// determine memory type to use in the kernel
if (nbins < THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM &&
sharedMem < maxSharedMem) {
memType = CUDAHistogramMemoryType::SHARED;
} else if (
nbins < THRESH_NUMBER_BINS_FOR_GLOBAL_MEM &&
multiBlockMem < (maxGlobalMem / 2)) {
// check against half of free mem to be extra safe
// due to cached allocator, we may anyway have slightly more free mem
memType = CUDAHistogramMemoryType::MULTI_BLOCK;
}
// alloc memory for MULTI_BLOCK
using IndexType = int64_t;
auto aInfo = detail::getTensorInfo<output_t, IndexType>(a);
auto bInfo = detail::getTensorInfo<input_t, IndexType>(b);
detail::TensorInfo<output_t, IndexType> pInfo(nullptr, 0, {}, {});
Tensor partial_output;
if (memType == CUDAHistogramMemoryType::MULTI_BLOCK) {
partial_output = native::zeros(
{grid.x, nbins},
optTypeMetaToScalarType(a.options().dtype_opt()),
a.options().layout_opt(),
a.options().device_opt(),
a.options().pinned_memory_opt());
pInfo = detail::getTensorInfo<output_t, IndexType>(partial_output);
}
if (HasWeights) {
auto cInfo = detail::getTensorInfo<output_t, IndexType>(c);
const auto getWeightsOp = [cInfo] __device__(IndexType cIndex) {
const IndexType cOffset =
detail::IndexToOffset<output_t, IndexType, 1>::get(cIndex, cInfo);
return cInfo.data[cOffset];
};
HANDLE_SWITCH_CASE(memType, getWeightsOp)
} else {
static const auto getDummyOp = [] __device__(IndexType) { return 1L; };
HANDLE_SWITCH_CASE(memType, getDummyOp)
}
return true;
}
#undef HANDLE_CASE
#undef HANDLE_SWITCH_CASE
#undef FOR_KERNEL_LOOP
#undef THRESH_NUMBER_BINS_FOR_GLOBAL_MEM
#undef THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM
} // namespace cuda
namespace {
///////////////// bincount /////////////////
template <typename input_t, typename weights_t>
Tensor _bincount_cuda_template(
const Tensor& self,
const Tensor& weights,
int64_t minlength) {
if (minlength < 0) {
AT_ERROR("minlength should be >= 0");
}
if (self.dim() == 1 && self.numel() == 0) {
return native::zeros(
{minlength},
kLong,
c10::nullopt /* layout */,
kCUDA,
c10::nullopt /* pin_memory */);
}
if (self.dim() != 1 ||
(!std::is_same<input_t, uint8_t>::value &&
*self.min().cpu().data_ptr<input_t>() < 0)) {
AT_ERROR("bincount only supports 1-d non-negative integral inputs.");
}
bool has_weights = weights.defined();
if (has_weights && weights.size(0) != self.size(0)) {
AT_ERROR("input and weights should have the same length");
}
const int64_t nbins = std::max(*self.max().cpu().data_ptr<input_t>() + (int64_t)1, minlength);
const input_t minvalue = 0;
const input_t maxvalue = nbins;
// alloc output counter on GPU
Tensor output;
if (has_weights) {
output = native::zeros(
{nbins},
optTypeMetaToScalarType(weights.options().dtype_opt()),
weights.options().layout_opt(),
weights.options().device_opt(),
weights.options().pinned_memory_opt());
auto ret = cuda::CUDA_tensor_histogram<weights_t, input_t, true>(
output, self, weights, nbins, minvalue, maxvalue);
} else {
output = native::zeros(
{nbins},
kLong,
c10::nullopt /* layout */,
DeviceType::CUDA,
c10::nullopt /* pin_memory */);
auto ret = cuda::CUDA_tensor_histogram<int64_t, input_t, false>(
output, self, weights, nbins, minvalue, maxvalue);
}
return output;
}
///////////////// histc /////////////////
template <typename input_t>
Tensor _histc_cuda_template(
const Tensor& self,
int64_t nbins,
input_t min,
input_t max) {
if (nbins <= 0) {
AT_ERROR("bins must be > 0");
}
Tensor output = native::zeros(
{nbins},
self.scalar_type(),
c10::nullopt /* layout */,
DeviceType::CUDA,
c10::nullopt /* pin_memory */);
input_t minvalue = min;
input_t maxvalue = max;
if (min == max) {
minvalue = *self.min().cpu().data_ptr<input_t>();
maxvalue = *self.max().cpu().data_ptr<input_t>();
}
if (minvalue == maxvalue) {
minvalue = minvalue - 1;
maxvalue = maxvalue + 1;
}
#ifndef __HIP_PLATFORM_HCC__
TORCH_CHECK(
!(THCNumerics<input_t>::isinf(minvalue) ||
THCNumerics<input_t>::isinf(maxvalue) ||
THCNumerics<input_t>::isnan(minvalue) ||
THCNumerics<input_t>::isnan(maxvalue)),
"range of [",
minvalue,
", ",
maxvalue,
"] is not finite");
#else
TORCH_CHECK(
!(std::isinf(minvalue) || std::isinf(maxvalue) || std::isnan(minvalue) ||
std::isnan(maxvalue)),
"range of [",
minvalue,
", ",
maxvalue,
"] is not finite");
#endif
TORCH_CHECK(minvalue < maxvalue, "max must be larger than min");
auto ret = cuda::CUDA_tensor_histogram<input_t, input_t, false>(
output, self, Tensor(), nbins, minvalue, maxvalue);
return output;
}
} // namespace
namespace native {
Tensor _bincount_cuda(
const Tensor& self, const c10::optional<Tensor>& weights_opt,
int64_t minlength) {
// See [Note: hacky wrapper removal for optional tensor]
const Tensor& weights = c10::value_or_else(weights_opt, [] {return Tensor();});
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("_bincount_cuda");
return AT_DISPATCH_INTEGRAL_TYPES(self.scalar_type(), "bincount_cuda", [&] {
const auto scalar = weights.scalar_type();
if (scalar == ScalarType::Undefined || scalar == ScalarType::Float)
return _bincount_cuda_template<scalar_t, float>(self, weights, minlength);
return _bincount_cuda_template<scalar_t, double>(
self, weights.to(kDouble), minlength);
});
}
Tensor _histc_cuda(
const Tensor& self,
int64_t nbins,
const Scalar& min,
const Scalar& max) {
if (self.scalar_type() == ScalarType::Half) {
AT_ERROR("HalfTensor is not supported");
}
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("_histc_cuda");
return AT_DISPATCH_ALL_TYPES(self.scalar_type(), "histc", [&] {
return _histc_cuda_template<scalar_t>(self, nbins, min.to<scalar_t>(), max.to<scalar_t>());
});
}
Tensor& _histc_out_cuda(const Tensor& self, int64_t bins, const Scalar& min, const Scalar& max, Tensor& result) {
auto ret = _histc_cuda(self, bins, min, max);
result.resize_as_(ret);
result.copy_(ret);
return result;
}
} // namespace native
} // namespace at
|
6c2a1e0d8cae124af2f41d8690c8a4b858c693cc.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "InverseDistance.h"
#include <stdlib.h>
#include <cutil.h>
#include <cutil_inline.h>
static int h_datanum_id[3], h_dst_total_id;
__constant__ int d_datanum_id[3], d_total_id[1], d_dst_total_id[1];
__constant__ float d_min_id[3], d_max_id[3], d_interval_id[3], d_power_id[1];
float *d_xary_id, *d_yary_id, *d_zary_id, *d_data_ary_id, *d_out_ary_id;
static int sh_total_id;
// use by internal
__device__ inline void InverseDistance_GetDistance(float x1, float x2, float y1, float y2, float z1, float z2, float *res)
{
*res = sqrt((x1-x2)*(x1-x2) + (y1-y2)*(y1-y2) + (z1-z2)*(z1-z2));
}
__device__ inline void InverseDistance_GetNearestPoint(float x, float y, float z, float *res, float *data_ary
, float *xary, float *yary, float *zary)
{
float now, sum = 0, sum2 = 0, tmp;
for (int i = 0;i < *d_total_id;i++)
{
InverseDistance_GetDistance(x, xary[i], y, yary[i], z, zary[i], &now);
if (now<0.001)
{
*res = data_ary[i];
return ;
}
tmp = pow(now, -d_power_id[0]);
sum += tmp;
sum2 += tmp*data_ary[i];
}
sum2 /= sum;
*res = sum2;
}
__device__ inline void InverseDistance_GetNearestPointN(float x, float y, float z, float *res, float *data_ary
, float *xary, float *yary, float *zary, int tid)
{
float now, sum = 0, sum2 = 0, tmp;
int idx, i, j;
int shid, dataid;
__shared__ float shx[512];
__shared__ float shy[512];
__shared__ float shz[512];
__shared__ float shd[512];
for ( i=0; i < *d_total_id;i+=512)
{
__syncthreads();
if (tid<256)
{
for (int j=0; j<2; j++)
{
idx = 256*j + tid + i;
if (idx >= *d_total_id)
break;
shid = 256*j + tid;
shx[shid] = xary[idx];
shy[shid] = yary[idx];
shz[shid] = zary[idx];
shd[shid] = data_ary[idx];
}
}
__syncthreads();
for ( j=0; j<512 && i+j<*d_total_id; j++)
{
InverseDistance_GetDistance(x, shx[j], y, shy[j], z, shz[j], &now);
if (now<0.001)
{
*res = shd[j];
return;
}
tmp = pow(now, -d_power_id[0]);
sum += tmp;
sum2 += tmp*shd[j];
}
}
sum2 /= sum;
*res = sum2;
}
//not use shared memory
__global__ void InverseDistance_GetNearest(float *out_ary, float *data_ary, float *xary, float *yary, float *zary)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int inc = blockDim.x * gridDim.x;
int xindex, yindex, zindex;
float x ,y, z, res;
for (int i = idx ; i < *d_dst_total_id ; i += inc)
{
xindex = i % d_datanum_id[0];
yindex = (i / d_datanum_id[0]) % d_datanum_id[1];
zindex = i / (d_datanum_id[0] * d_datanum_id[1]);
x = d_min_id[0] + xindex * d_interval_id[0];
y = d_min_id[1] + yindex * d_interval_id[1];
z = d_min_id[2] + zindex * d_interval_id[2];
InverseDistance_GetNearestPoint(x, y, z, &res, data_ary, xary, yary, zary);
out_ary[i] = res;
}
}
//use shared memory
__global__ void InverseDistance_GetNearestSH(float *out_ary, float *data_ary, float *xary, float *yary, float *zary)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int inc = blockDim.x * gridDim.x;
int xindex, yindex, zindex;
float x ,y, z, res;
for (int i = idx ; i < *d_dst_total_id ; i += inc)
{
xindex = i % d_datanum_id[0];
yindex = (i / d_datanum_id[0]) % d_datanum_id[1];
zindex = i / (d_datanum_id[0] * d_datanum_id[1]);
x = d_min_id[0] + xindex * d_interval_id[0];
y = d_min_id[1] + yindex * d_interval_id[1];
z = d_min_id[2] + zindex * d_interval_id[2];
InverseDistance_GetNearestPointN(x, y, z, &res, data_ary, xary, yary, zary,threadIdx.x);
out_ary[i] = res;
}
}
// use by external, return need float ary size
__host__ int InverseDistance_SetData( const InterpolationInfo *h_info, float power )
{
int sum_of_use_memory = 0;
// set in data
sh_total_id = h_info->m_total;
CUDA_SAFE_CALL(hipMemcpyToSymbol(d_total_id, &sh_total_id, sizeof(int)));
int size = sizeof(float)*sh_total_id;
CUDA_SAFE_CALL(hipMalloc((void**)&d_xary_id, size));
CUDA_SAFE_CALL(hipMalloc((void**)&d_yary_id, size));
CUDA_SAFE_CALL(hipMalloc((void**)&d_zary_id, size));
CUDA_SAFE_CALL(hipMalloc((void**)&d_data_ary_id, size));
sum_of_use_memory += size*4;
printf("size of use input data on gpu: %f MB\n", size*4/1024.0/1024.0);
// get source memory on gpu
CUDA_SAFE_CALL(hipMemcpy(d_xary_id, h_info->m_posAry[0], size, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(d_yary_id, h_info->m_posAry[1], size, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(d_zary_id, h_info->m_posAry[2], size, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(d_data_ary_id, h_info->m_data_ary, size, hipMemcpyHostToDevice));
// set out data
const float *h_min = h_info->min, *h_max = h_info->max, *h_interval = h_info->interval;
for (int i=0;i<3;i++)
h_datanum_id[i] = (int)floor((h_max[i]-h_min[i])/h_interval[i])+1;
h_dst_total_id = h_datanum_id[0] * h_datanum_id[1] * h_datanum_id[2];
CUDA_SAFE_CALL(hipMemcpyToSymbol(d_dst_total_id, &h_dst_total_id, sizeof(int))); //set out total
CUDA_SAFE_CALL(hipMalloc((void**)&d_out_ary_id, sizeof(float)*h_dst_total_id)); // get dst memory on gpu.
sum_of_use_memory += sizeof(float)*h_dst_total_id;
CUDA_SAFE_CALL(hipMemcpyToSymbol(d_power_id, &power, sizeof(float))); //set out power
int size_float3 = sizeof(float)*3;
CUDA_SAFE_CALL(hipMemcpyToSymbol(d_datanum_id, h_datanum_id, size_float3));
CUDA_SAFE_CALL(hipMemcpyToSymbol(d_min_id, h_min, size_float3));
CUDA_SAFE_CALL(hipMemcpyToSymbol(d_max_id, h_max, size_float3));
CUDA_SAFE_CALL(hipMemcpyToSymbol(d_interval_id, h_interval, size_float3));
printf("size of out data on gpu: %f MB\n", sizeof(float)*h_dst_total_id/1024.0/1024.0);
printf("size of use memory on gpu: %f MB\n", sum_of_use_memory/1024.0/1024.0);
return h_dst_total_id;
}
__host__ void InverseDistance_ComputeData(_out float *dstdata, int th, bool useShMem)
{
int threadsPerBlock = 256;
if (!useShMem)
threadsPerBlock = th;
int blocksPerGrid = (sh_total_id + threadsPerBlock - 1) / threadsPerBlock;
if (useShMem)
hipLaunchKernelGGL(( InverseDistance_GetNearestSH), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0,
d_out_ary_id, d_data_ary_id, d_xary_id, d_yary_id, d_zary_id);
else
hipLaunchKernelGGL(( InverseDistance_GetNearest), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0,
d_out_ary_id, d_data_ary_id, d_xary_id, d_yary_id, d_zary_id);
cutilCheckMsg("kernel launch failure");
#ifdef _DEBUG
CUDA_SAFE_CALL( hipDeviceSynchronize() );
#endif
CUDA_SAFE_CALL( hipMemcpy(dstdata, d_out_ary_id, sizeof(float)*h_dst_total_id, hipMemcpyDeviceToHost) );
if (d_xary_id) hipFree(d_xary_id);
if (d_yary_id) hipFree(d_yary_id);
if (d_zary_id) hipFree(d_zary_id);
if (d_data_ary_id) hipFree(d_data_ary_id);
if (d_out_ary_id) hipFree(d_out_ary_id);
CUDA_SAFE_CALL( hipDeviceReset() );
}
|
6c2a1e0d8cae124af2f41d8690c8a4b858c693cc.cu
|
#include "InverseDistance.h"
#include <stdlib.h>
#include <cutil.h>
#include <cutil_inline.h>
static int h_datanum_id[3], h_dst_total_id;
__constant__ int d_datanum_id[3], d_total_id[1], d_dst_total_id[1];
__constant__ float d_min_id[3], d_max_id[3], d_interval_id[3], d_power_id[1];
float *d_xary_id, *d_yary_id, *d_zary_id, *d_data_ary_id, *d_out_ary_id;
static int sh_total_id;
// use by internal
__device__ inline void InverseDistance_GetDistance(float x1, float x2, float y1, float y2, float z1, float z2, float *res)
{
*res = sqrt((x1-x2)*(x1-x2) + (y1-y2)*(y1-y2) + (z1-z2)*(z1-z2));
}
__device__ inline void InverseDistance_GetNearestPoint(float x, float y, float z, float *res, float *data_ary
, float *xary, float *yary, float *zary)
{
float now, sum = 0, sum2 = 0, tmp;
for (int i = 0;i < *d_total_id;i++)
{
InverseDistance_GetDistance(x, xary[i], y, yary[i], z, zary[i], &now);
if (now<0.001)
{
*res = data_ary[i];
return ;
}
tmp = pow(now, -d_power_id[0]);
sum += tmp;
sum2 += tmp*data_ary[i];
}
sum2 /= sum;
*res = sum2;
}
__device__ inline void InverseDistance_GetNearestPointN(float x, float y, float z, float *res, float *data_ary
, float *xary, float *yary, float *zary, int tid)
{
float now, sum = 0, sum2 = 0, tmp;
int idx, i, j;
int shid, dataid;
__shared__ float shx[512];
__shared__ float shy[512];
__shared__ float shz[512];
__shared__ float shd[512];
for ( i=0; i < *d_total_id;i+=512)
{
__syncthreads();
if (tid<256)
{
for (int j=0; j<2; j++)
{
idx = 256*j + tid + i;
if (idx >= *d_total_id)
break;
shid = 256*j + tid;
shx[shid] = xary[idx];
shy[shid] = yary[idx];
shz[shid] = zary[idx];
shd[shid] = data_ary[idx];
}
}
__syncthreads();
for ( j=0; j<512 && i+j<*d_total_id; j++)
{
InverseDistance_GetDistance(x, shx[j], y, shy[j], z, shz[j], &now);
if (now<0.001)
{
*res = shd[j];
return;
}
tmp = pow(now, -d_power_id[0]);
sum += tmp;
sum2 += tmp*shd[j];
}
}
sum2 /= sum;
*res = sum2;
}
//not use shared memory
__global__ void InverseDistance_GetNearest(float *out_ary, float *data_ary, float *xary, float *yary, float *zary)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int inc = blockDim.x * gridDim.x;
int xindex, yindex, zindex;
float x ,y, z, res;
for (int i = idx ; i < *d_dst_total_id ; i += inc)
{
xindex = i % d_datanum_id[0];
yindex = (i / d_datanum_id[0]) % d_datanum_id[1];
zindex = i / (d_datanum_id[0] * d_datanum_id[1]);
x = d_min_id[0] + xindex * d_interval_id[0];
y = d_min_id[1] + yindex * d_interval_id[1];
z = d_min_id[2] + zindex * d_interval_id[2];
InverseDistance_GetNearestPoint(x, y, z, &res, data_ary, xary, yary, zary);
out_ary[i] = res;
}
}
//use shared memory
__global__ void InverseDistance_GetNearestSH(float *out_ary, float *data_ary, float *xary, float *yary, float *zary)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int inc = blockDim.x * gridDim.x;
int xindex, yindex, zindex;
float x ,y, z, res;
for (int i = idx ; i < *d_dst_total_id ; i += inc)
{
xindex = i % d_datanum_id[0];
yindex = (i / d_datanum_id[0]) % d_datanum_id[1];
zindex = i / (d_datanum_id[0] * d_datanum_id[1]);
x = d_min_id[0] + xindex * d_interval_id[0];
y = d_min_id[1] + yindex * d_interval_id[1];
z = d_min_id[2] + zindex * d_interval_id[2];
InverseDistance_GetNearestPointN(x, y, z, &res, data_ary, xary, yary, zary,threadIdx.x);
out_ary[i] = res;
}
}
// use by external, return need float ary size
__host__ int InverseDistance_SetData( const InterpolationInfo *h_info, float power )
{
int sum_of_use_memory = 0;
// set in data
sh_total_id = h_info->m_total;
CUDA_SAFE_CALL(cudaMemcpyToSymbol(d_total_id, &sh_total_id, sizeof(int)));
int size = sizeof(float)*sh_total_id;
CUDA_SAFE_CALL(cudaMalloc((void**)&d_xary_id, size));
CUDA_SAFE_CALL(cudaMalloc((void**)&d_yary_id, size));
CUDA_SAFE_CALL(cudaMalloc((void**)&d_zary_id, size));
CUDA_SAFE_CALL(cudaMalloc((void**)&d_data_ary_id, size));
sum_of_use_memory += size*4;
printf("size of use input data on gpu: %f MB\n", size*4/1024.0/1024.0);
// get source memory on gpu
CUDA_SAFE_CALL(cudaMemcpy(d_xary_id, h_info->m_posAry[0], size, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(d_yary_id, h_info->m_posAry[1], size, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(d_zary_id, h_info->m_posAry[2], size, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(d_data_ary_id, h_info->m_data_ary, size, cudaMemcpyHostToDevice));
// set out data
const float *h_min = h_info->min, *h_max = h_info->max, *h_interval = h_info->interval;
for (int i=0;i<3;i++)
h_datanum_id[i] = (int)floor((h_max[i]-h_min[i])/h_interval[i])+1;
h_dst_total_id = h_datanum_id[0] * h_datanum_id[1] * h_datanum_id[2];
CUDA_SAFE_CALL(cudaMemcpyToSymbol(d_dst_total_id, &h_dst_total_id, sizeof(int))); //set out total
CUDA_SAFE_CALL(cudaMalloc((void**)&d_out_ary_id, sizeof(float)*h_dst_total_id)); // get dst memory on gpu.
sum_of_use_memory += sizeof(float)*h_dst_total_id;
CUDA_SAFE_CALL(cudaMemcpyToSymbol(d_power_id, &power, sizeof(float))); //set out power
int size_float3 = sizeof(float)*3;
CUDA_SAFE_CALL(cudaMemcpyToSymbol(d_datanum_id, h_datanum_id, size_float3));
CUDA_SAFE_CALL(cudaMemcpyToSymbol(d_min_id, h_min, size_float3));
CUDA_SAFE_CALL(cudaMemcpyToSymbol(d_max_id, h_max, size_float3));
CUDA_SAFE_CALL(cudaMemcpyToSymbol(d_interval_id, h_interval, size_float3));
printf("size of out data on gpu: %f MB\n", sizeof(float)*h_dst_total_id/1024.0/1024.0);
printf("size of use memory on gpu: %f MB\n", sum_of_use_memory/1024.0/1024.0);
return h_dst_total_id;
}
__host__ void InverseDistance_ComputeData(_out float *dstdata, int th, bool useShMem)
{
int threadsPerBlock = 256;
if (!useShMem)
threadsPerBlock = th;
int blocksPerGrid = (sh_total_id + threadsPerBlock - 1) / threadsPerBlock;
if (useShMem)
InverseDistance_GetNearestSH<<<blocksPerGrid, threadsPerBlock>>>
(d_out_ary_id, d_data_ary_id, d_xary_id, d_yary_id, d_zary_id);
else
InverseDistance_GetNearest<<<blocksPerGrid, threadsPerBlock>>>
(d_out_ary_id, d_data_ary_id, d_xary_id, d_yary_id, d_zary_id);
cutilCheckMsg("kernel launch failure");
#ifdef _DEBUG
CUDA_SAFE_CALL( cudaThreadSynchronize() );
#endif
CUDA_SAFE_CALL( cudaMemcpy(dstdata, d_out_ary_id, sizeof(float)*h_dst_total_id, cudaMemcpyDeviceToHost) );
if (d_xary_id) cudaFree(d_xary_id);
if (d_yary_id) cudaFree(d_yary_id);
if (d_zary_id) cudaFree(d_zary_id);
if (d_data_ary_id) cudaFree(d_data_ary_id);
if (d_out_ary_id) cudaFree(d_out_ary_id);
CUDA_SAFE_CALL( cudaThreadExit() );
}
|
71fc61241665fabf9dea6f01b9fa49bd21109566.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
__global__
void brute_merge_results(int* ar0, int* ar1, int* ar2, int* ar3, int array_len) {
int threadNum = blockIdx.x * blockDim.x + threadIdx.x;
if(threadNum < array_len){
int val = 0;
if(ar0[threadNum] !=0 && ar1[threadNum] !=0 && ar2[threadNum] !=0 && ar3[threadNum] !=0){
val = 1;
}
ar0[threadNum] = val;
}
}
|
71fc61241665fabf9dea6f01b9fa49bd21109566.cu
|
#include <cuda.h>
#include <cuda_runtime_api.h>
__global__
void brute_merge_results(int* ar0, int* ar1, int* ar2, int* ar3, int array_len) {
int threadNum = blockIdx.x * blockDim.x + threadIdx.x;
if(threadNum < array_len){
int val = 0;
if(ar0[threadNum] !=0 && ar1[threadNum] !=0 && ar2[threadNum] !=0 && ar3[threadNum] !=0){
val = 1;
}
ar0[threadNum] = val;
}
}
|
178b85d4e46eb2648b63f7b2c8ccde92ef20d220.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
///
/// @file rdwt53.cu
/// @brief CUDA implementation of reverse 5/3 2D DWT.
/// @author Martin Jirman ([email protected])
/// @date 2011-02-04 14:19
///
///
/// Copyright (c) 2011 Martin Jirman
/// All rights reserved.
///
/// Redistribution and use in source and binary forms, with or without
/// modification, are permitted provided that the following conditions are met:
///
/// * Redistributions of source code must retain the above copyright
/// notice, this list of conditions and the following disclaimer.
/// * Redistributions in binary form must reproduce the above copyright
/// notice, this list of conditions and the following disclaimer in the
/// documentation and/or other materials provided with the distribution.
///
/// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
/// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
/// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
/// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
/// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
/// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
/// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
/// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
/// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
/// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
/// POSSIBILITY OF SUCH DAMAGE.
///
#include "cudacommon.h"
#include "common.h"
#include "transform_buffer.h"
#include "io.h"
namespace dwt_cuda {
/// Wraps shared momory buffer and algorithms needed for computing 5/3 RDWT
/// using sliding window and lifting schema.
/// @tparam WIN_SIZE_X width of sliding window
/// @tparam WIN_SIZE_Y height of sliding window
template <int WIN_SIZE_X, int WIN_SIZE_Y>
class RDWT53 {
private:
/// Shared memory buffer used for 5/3 DWT transforms.
typedef TransformBuffer<int, WIN_SIZE_X, WIN_SIZE_Y + 3, 2> RDWT53Buffer;
/// Shared buffer used for reverse 5/3 DWT.
RDWT53Buffer buffer;
/// Difference between indices of two vertically neighboring items in buffer.
enum { STRIDE = RDWT53Buffer::VERTICAL_STRIDE };
/// Info needed for loading of one input column from input image.
/// @tparam CHECKED true if loader should check boundaries
template <bool CHECKED>
struct RDWT53Column {
/// loader of pixels from column in input image
VerticalDWTBandLoader<int, CHECKED> loader;
/// Offset of corresponding column in shared buffer.
int offset;
/// Sets all fields to some values to avoid 'uninitialized' warnings.
__device__ void clear() {
offset = 0;
loader.clear();
}
};
/// 5/3 DWT reverse update operation.
struct Reverse53Update {
__device__ void operator() (const int p, int & c, const int n) const {
c -= (p + n + 2) / 4; // F.3, page 118, ITU-T Rec. T.800 final draft
}
};
/// 5/3 DWT reverse predict operation.
struct Reverse53Predict {
__device__ void operator() (const int p, int & c, const int n) const {
c += (p + n) / 2; // F.4, page 118, ITU-T Rec. T.800 final draft
}
};
/// Horizontal 5/3 RDWT on specified lines of transform buffer.
/// @param lines number of lines to be transformed
/// @param firstLine index of the first line to be transformed
__device__ void horizontalTransform(const int lines, const int firstLine) {
__syncthreads();
buffer.forEachHorizontalEven(firstLine, lines, Reverse53Update());
__syncthreads();
buffer.forEachHorizontalOdd(firstLine, lines, Reverse53Predict());
__syncthreads();
}
/// Using given loader, it loads another WIN_SIZE_Y coefficients
/// into specified column.
/// @tparam CHECKED true if loader should check image boundaries
/// @param input input coefficients to load from
/// @param col info about loaded column
template <bool CHECKED>
inline __device__ void loadWindowIntoColumn(const int * const input,
RDWT53Column<CHECKED> & col) {
for(int i = 3; i < (3 + WIN_SIZE_Y); i += 2) {
buffer[col.offset + i * STRIDE] = col.loader.loadLowFrom(input);
buffer[col.offset + (i + 1) * STRIDE] = col.loader.loadHighFrom(input);
}
}
/// Initializes one column of shared transform buffer with 7 input pixels.
/// Those 7 pixels will not be transformed. Also initializes given loader.
/// @tparam CHECKED true if loader should check image boundaries
/// @param columnX x coordinate of column in shared transform buffer
/// @param input input image
/// @param sizeX width of the input image
/// @param sizeY height of the input image
/// @param loader (uninitialized) info about loaded column
template <bool CHECKED>
__device__ void initColumn(const int columnX, const int * const input,
const int sizeX, const int sizeY,
RDWT53Column<CHECKED> & column,
const int firstY) {
// coordinates of the first coefficient to be loaded
const int firstX = blockIdx.x * WIN_SIZE_X + columnX;
// offset of the column with index 'colIndex' in the transform buffer
column.offset = buffer.getColumnOffset(columnX);
if(blockIdx.y == 0) {
// topmost block - apply mirroring rules when loading first 3 rows
column.loader.init(sizeX, sizeY, firstX, firstY);
// load pixels in mirrored way
buffer[column.offset + 1 * STRIDE] = column.loader.loadLowFrom(input);
buffer[column.offset + 0 * STRIDE] =
buffer[column.offset + 2 * STRIDE] = column.loader.loadHighFrom(input);
} else {
// non-topmost row - regular loading:
column.loader.init(sizeX, sizeY, firstX, firstY - 1);
buffer[column.offset + 0 * STRIDE] = column.loader.loadHighFrom(input);
buffer[column.offset + 1 * STRIDE] = column.loader.loadLowFrom(input);
buffer[column.offset + 2 * STRIDE] = column.loader.loadHighFrom(input);
}
// Now, the next coefficient, which will be loaded by loader, is #2.
}
/// Actual GPU 5/3 RDWT implementation.
/// @tparam CHECKED_LOADS true if boundaries must be checked when reading
/// @tparam CHECKED_WRITES true if boundaries must be checked when writing
/// @param in input image (5/3 transformed coefficients)
/// @param out output buffer (for reverse transformed image)
/// @param sizeX width of the output image
/// @param sizeY height of the output image
/// @param winSteps number of sliding window steps
template<bool CHECKED_LOADS, bool CHECKED_WRITES>
__device__ void transform(const int * const in, int * const out,
const int sizeX, const int sizeY,
const int winSteps) {
// info about one main and one boundary column
RDWT53Column<CHECKED_LOADS> column, boundaryColumn;
// index of first row to be transformed
const int firstY = blockIdx.y * WIN_SIZE_Y * winSteps;
// some threads initialize boundary columns
boundaryColumn.clear();
if(threadIdx.x < 3) {
// First 3 threads also handle boundary columns. Thread #0 gets right
// column #0, thread #1 get right column #1 and thread #2 left column.
const int colId = threadIdx.x + ((threadIdx.x != 2) ? WIN_SIZE_X : -3);
// Thread initializes offset of the boundary column (in shared
// buffer), first 3 pixels of the column and a loader for this column.
initColumn(colId, in, sizeX, sizeY, boundaryColumn, firstY);
}
// All threads initialize central columns.
initColumn(parityIdx<WIN_SIZE_X>(), in, sizeX, sizeY, column, firstY);
// horizontally transform first 3 rows
horizontalTransform(3, 0);
// writer of output pixels - initialize it
const int outX = blockIdx.x * WIN_SIZE_X + threadIdx.x;
VerticalDWTPixelWriter<int, CHECKED_WRITES> writer;
writer.init(sizeX, sizeY, outX, firstY);
// offset of column (in transform buffer) saved by this thread
const int outputColumnOffset = buffer.getColumnOffset(threadIdx.x);
// (Each iteration assumes that first 3 rows of transform buffer are
// already loaded with horizontally transformed pixels.)
for(int w = 0; w < winSteps; w++) {
// Load another WIN_SIZE_Y lines of this thread's column
// into the transform buffer.
loadWindowIntoColumn(in, column);
// possibly load boundary columns
if(threadIdx.x < 3) {
loadWindowIntoColumn(in, boundaryColumn);
}
// horizontally transform all newly loaded lines
horizontalTransform(WIN_SIZE_Y, 3);
// Using 3 registers, remember current values of last 3 rows
// of transform buffer. These rows are transformed horizontally
// only and will be used in next iteration.
int last3Lines[3];
last3Lines[0] = buffer[outputColumnOffset + (WIN_SIZE_Y + 0) * STRIDE];
last3Lines[1] = buffer[outputColumnOffset + (WIN_SIZE_Y + 1) * STRIDE];
last3Lines[2] = buffer[outputColumnOffset + (WIN_SIZE_Y + 2) * STRIDE];
// vertically transform all central columns
buffer.forEachVerticalOdd(outputColumnOffset, Reverse53Update());
buffer.forEachVerticalEven(outputColumnOffset, Reverse53Predict());
// Save all results of current window. Results are in transform buffer
// at rows from #1 to #(1 + WIN_SIZE_Y). Other rows are invalid now.
// (They only served as a boundary for vertical RDWT.)
for(int i = 1; i < (1 + WIN_SIZE_Y); i++) {
writer.writeInto(out, buffer[outputColumnOffset + i * STRIDE]);
}
// Use last 3 remembered lines as first 3 lines for next iteration.
// As expected, these lines are already horizontally transformed.
buffer[outputColumnOffset + 0 * STRIDE] = last3Lines[0];
buffer[outputColumnOffset + 1 * STRIDE] = last3Lines[1];
buffer[outputColumnOffset + 2 * STRIDE] = last3Lines[2];
// Wait for all writing threads before proceeding to loading new
// coeficients in next iteration. (Not to overwrite those which
// are not written yet.)
__syncthreads();
}
}
public:
/// Main GPU 5/3 RDWT entry point.
/// @param in input image (5/3 transformed coefficients)
/// @param out output buffer (for reverse transformed image)
/// @param sizeX width of the output image
/// @param sizeY height of the output image
/// @param winSteps number of sliding window steps
__device__ static void run(const int * const input, int * const output,
const int sx, const int sy, const int steps) {
// prepare instance with buffer in shared memory
__shared__ RDWT53<WIN_SIZE_X, WIN_SIZE_Y> rdwt53;
// Compute limits of this threadblock's block of pixels and use them to
// determine, whether this threadblock will have to deal with boundary.
// (1 in next expressions is for radius of impulse response of 5/3 RDWT.)
const int maxX = (blockIdx.x + 1) * WIN_SIZE_X + 1;
const int maxY = (blockIdx.y + 1) * WIN_SIZE_Y * steps + 1;
const bool atRightBoudary = maxX >= sx;
const bool atBottomBoudary = maxY >= sy;
// Select specialized version of code according to distance of this
// threadblock's pixels from image boundary.
if(atBottomBoudary) {
// near bottom boundary => check both writing and reading
rdwt53.transform<true, true>(input, output, sx, sy, steps);
} else if(atRightBoudary) {
// near right boundary only => check writing only
rdwt53.transform<false, true>(input, output, sx, sy, steps);
} else {
// no nearby boundary => check nothing
rdwt53.transform<false, false>(input, output, sx, sy, steps);
}
}
}; // end of class RDWT53
/// Main GPU 5/3 RDWT entry point.
/// @param in input image (5/3 transformed coefficients)
/// @param out output buffer (for reverse transformed image)
/// @param sizeX width of the output image
/// @param sizeY height of the output image
/// @param winSteps number of sliding window steps
template <int WIN_SX, int WIN_SY>
__launch_bounds__(WIN_SX, CTMIN(SHM_SIZE/sizeof(RDWT53<WIN_SX, WIN_SY>), 8))
__global__ void rdwt53Kernel(const int * const in, int * const out,
const int sx, const int sy, const int steps) {
RDWT53<WIN_SX, WIN_SY>::run(in, out, sx, sy, steps);
}
/// Only computes optimal number of sliding window steps,
/// number of threadblocks and then lanches the 5/3 RDWT kernel.
/// @tparam WIN_SX width of sliding window
/// @tparam WIN_SY height of sliding window
/// @param in input image
/// @param out output buffer
/// @param sx width of the input image
/// @param sy height of the input image
template <int WIN_SX, int WIN_SY>
void launchRDWT53Kernel (int * in, int * out, const int sx, const int sy, float& kernelTime) {
// compute optimal number of steps of each sliding window
const int steps = divRndUp(sy, 15 * WIN_SY);
// prepare grid size
dim3 gSize(divRndUp(sx, WIN_SX), divRndUp(sy, WIN_SY * steps));
// timing events
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float elapsedTime;
// finally transform this level
hipEventRecord(start, 0);
hipLaunchKernelGGL(( rdwt53Kernel<WIN_SX, WIN_SY>), dim3(gSize), dim3(WIN_SX), 0, 0, in, out, sx, sy, steps);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
kernelTime += elapsedTime * 1.e-3;
CHECK_CUDA_ERROR();
}
/// Reverse 5/3 2D DWT. See common rules (above) for more details.
/// @param in Input DWT coefficients. Format described in common rules.
/// Will not be preserved (will be overwritten).
/// @param out output buffer on GPU - will contain original image
/// in normalized range [-128, 127].
/// @param sizeX width of input image (in pixels)
/// @param sizeY height of input image (in pixels)
/// @param levels number of recursive DWT levels
float rdwt53(int * in, int * out, int sizeX, int sizeY, int levels) {
float kernelTime = 0;
if(levels > 1) {
// let this function recursively reverse transform deeper levels first
const int llSizeX = divRndUp(sizeX, 2);
const int llSizeY = divRndUp(sizeY, 2);
kernelTime += rdwt53(in, out, llSizeX, llSizeY, levels - 1);
// copy reverse transformed LL band from output back into the input
memCopy(in, out, llSizeX, llSizeY);
}
// select right width of kernel for the size of the image
if(sizeX >= 960) {
launchRDWT53Kernel<192, 8>(in, out, sizeX, sizeY, kernelTime);
} else if (sizeX >= 480) {
launchRDWT53Kernel<128, 8>(in, out, sizeX, sizeY, kernelTime);
} else {
launchRDWT53Kernel<64, 8>(in, out, sizeX, sizeY, kernelTime);
}
return kernelTime;
}
} // end of namespace dwt_cuda
|
178b85d4e46eb2648b63f7b2c8ccde92ef20d220.cu
|
///
/// @file rdwt53.cu
/// @brief CUDA implementation of reverse 5/3 2D DWT.
/// @author Martin Jirman ([email protected])
/// @date 2011-02-04 14:19
///
///
/// Copyright (c) 2011 Martin Jirman
/// All rights reserved.
///
/// Redistribution and use in source and binary forms, with or without
/// modification, are permitted provided that the following conditions are met:
///
/// * Redistributions of source code must retain the above copyright
/// notice, this list of conditions and the following disclaimer.
/// * Redistributions in binary form must reproduce the above copyright
/// notice, this list of conditions and the following disclaimer in the
/// documentation and/or other materials provided with the distribution.
///
/// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
/// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
/// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
/// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
/// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
/// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
/// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
/// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
/// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
/// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
/// POSSIBILITY OF SUCH DAMAGE.
///
#include "cudacommon.h"
#include "common.h"
#include "transform_buffer.h"
#include "io.h"
namespace dwt_cuda {
/// Wraps shared momory buffer and algorithms needed for computing 5/3 RDWT
/// using sliding window and lifting schema.
/// @tparam WIN_SIZE_X width of sliding window
/// @tparam WIN_SIZE_Y height of sliding window
template <int WIN_SIZE_X, int WIN_SIZE_Y>
class RDWT53 {
private:
/// Shared memory buffer used for 5/3 DWT transforms.
typedef TransformBuffer<int, WIN_SIZE_X, WIN_SIZE_Y + 3, 2> RDWT53Buffer;
/// Shared buffer used for reverse 5/3 DWT.
RDWT53Buffer buffer;
/// Difference between indices of two vertically neighboring items in buffer.
enum { STRIDE = RDWT53Buffer::VERTICAL_STRIDE };
/// Info needed for loading of one input column from input image.
/// @tparam CHECKED true if loader should check boundaries
template <bool CHECKED>
struct RDWT53Column {
/// loader of pixels from column in input image
VerticalDWTBandLoader<int, CHECKED> loader;
/// Offset of corresponding column in shared buffer.
int offset;
/// Sets all fields to some values to avoid 'uninitialized' warnings.
__device__ void clear() {
offset = 0;
loader.clear();
}
};
/// 5/3 DWT reverse update operation.
struct Reverse53Update {
__device__ void operator() (const int p, int & c, const int n) const {
c -= (p + n + 2) / 4; // F.3, page 118, ITU-T Rec. T.800 final draft
}
};
/// 5/3 DWT reverse predict operation.
struct Reverse53Predict {
__device__ void operator() (const int p, int & c, const int n) const {
c += (p + n) / 2; // F.4, page 118, ITU-T Rec. T.800 final draft
}
};
/// Horizontal 5/3 RDWT on specified lines of transform buffer.
/// @param lines number of lines to be transformed
/// @param firstLine index of the first line to be transformed
__device__ void horizontalTransform(const int lines, const int firstLine) {
__syncthreads();
buffer.forEachHorizontalEven(firstLine, lines, Reverse53Update());
__syncthreads();
buffer.forEachHorizontalOdd(firstLine, lines, Reverse53Predict());
__syncthreads();
}
/// Using given loader, it loads another WIN_SIZE_Y coefficients
/// into specified column.
/// @tparam CHECKED true if loader should check image boundaries
/// @param input input coefficients to load from
/// @param col info about loaded column
template <bool CHECKED>
inline __device__ void loadWindowIntoColumn(const int * const input,
RDWT53Column<CHECKED> & col) {
for(int i = 3; i < (3 + WIN_SIZE_Y); i += 2) {
buffer[col.offset + i * STRIDE] = col.loader.loadLowFrom(input);
buffer[col.offset + (i + 1) * STRIDE] = col.loader.loadHighFrom(input);
}
}
/// Initializes one column of shared transform buffer with 7 input pixels.
/// Those 7 pixels will not be transformed. Also initializes given loader.
/// @tparam CHECKED true if loader should check image boundaries
/// @param columnX x coordinate of column in shared transform buffer
/// @param input input image
/// @param sizeX width of the input image
/// @param sizeY height of the input image
/// @param loader (uninitialized) info about loaded column
template <bool CHECKED>
__device__ void initColumn(const int columnX, const int * const input,
const int sizeX, const int sizeY,
RDWT53Column<CHECKED> & column,
const int firstY) {
// coordinates of the first coefficient to be loaded
const int firstX = blockIdx.x * WIN_SIZE_X + columnX;
// offset of the column with index 'colIndex' in the transform buffer
column.offset = buffer.getColumnOffset(columnX);
if(blockIdx.y == 0) {
// topmost block - apply mirroring rules when loading first 3 rows
column.loader.init(sizeX, sizeY, firstX, firstY);
// load pixels in mirrored way
buffer[column.offset + 1 * STRIDE] = column.loader.loadLowFrom(input);
buffer[column.offset + 0 * STRIDE] =
buffer[column.offset + 2 * STRIDE] = column.loader.loadHighFrom(input);
} else {
// non-topmost row - regular loading:
column.loader.init(sizeX, sizeY, firstX, firstY - 1);
buffer[column.offset + 0 * STRIDE] = column.loader.loadHighFrom(input);
buffer[column.offset + 1 * STRIDE] = column.loader.loadLowFrom(input);
buffer[column.offset + 2 * STRIDE] = column.loader.loadHighFrom(input);
}
// Now, the next coefficient, which will be loaded by loader, is #2.
}
/// Actual GPU 5/3 RDWT implementation.
/// @tparam CHECKED_LOADS true if boundaries must be checked when reading
/// @tparam CHECKED_WRITES true if boundaries must be checked when writing
/// @param in input image (5/3 transformed coefficients)
/// @param out output buffer (for reverse transformed image)
/// @param sizeX width of the output image
/// @param sizeY height of the output image
/// @param winSteps number of sliding window steps
template<bool CHECKED_LOADS, bool CHECKED_WRITES>
__device__ void transform(const int * const in, int * const out,
const int sizeX, const int sizeY,
const int winSteps) {
// info about one main and one boundary column
RDWT53Column<CHECKED_LOADS> column, boundaryColumn;
// index of first row to be transformed
const int firstY = blockIdx.y * WIN_SIZE_Y * winSteps;
// some threads initialize boundary columns
boundaryColumn.clear();
if(threadIdx.x < 3) {
// First 3 threads also handle boundary columns. Thread #0 gets right
// column #0, thread #1 get right column #1 and thread #2 left column.
const int colId = threadIdx.x + ((threadIdx.x != 2) ? WIN_SIZE_X : -3);
// Thread initializes offset of the boundary column (in shared
// buffer), first 3 pixels of the column and a loader for this column.
initColumn(colId, in, sizeX, sizeY, boundaryColumn, firstY);
}
// All threads initialize central columns.
initColumn(parityIdx<WIN_SIZE_X>(), in, sizeX, sizeY, column, firstY);
// horizontally transform first 3 rows
horizontalTransform(3, 0);
// writer of output pixels - initialize it
const int outX = blockIdx.x * WIN_SIZE_X + threadIdx.x;
VerticalDWTPixelWriter<int, CHECKED_WRITES> writer;
writer.init(sizeX, sizeY, outX, firstY);
// offset of column (in transform buffer) saved by this thread
const int outputColumnOffset = buffer.getColumnOffset(threadIdx.x);
// (Each iteration assumes that first 3 rows of transform buffer are
// already loaded with horizontally transformed pixels.)
for(int w = 0; w < winSteps; w++) {
// Load another WIN_SIZE_Y lines of this thread's column
// into the transform buffer.
loadWindowIntoColumn(in, column);
// possibly load boundary columns
if(threadIdx.x < 3) {
loadWindowIntoColumn(in, boundaryColumn);
}
// horizontally transform all newly loaded lines
horizontalTransform(WIN_SIZE_Y, 3);
// Using 3 registers, remember current values of last 3 rows
// of transform buffer. These rows are transformed horizontally
// only and will be used in next iteration.
int last3Lines[3];
last3Lines[0] = buffer[outputColumnOffset + (WIN_SIZE_Y + 0) * STRIDE];
last3Lines[1] = buffer[outputColumnOffset + (WIN_SIZE_Y + 1) * STRIDE];
last3Lines[2] = buffer[outputColumnOffset + (WIN_SIZE_Y + 2) * STRIDE];
// vertically transform all central columns
buffer.forEachVerticalOdd(outputColumnOffset, Reverse53Update());
buffer.forEachVerticalEven(outputColumnOffset, Reverse53Predict());
// Save all results of current window. Results are in transform buffer
// at rows from #1 to #(1 + WIN_SIZE_Y). Other rows are invalid now.
// (They only served as a boundary for vertical RDWT.)
for(int i = 1; i < (1 + WIN_SIZE_Y); i++) {
writer.writeInto(out, buffer[outputColumnOffset + i * STRIDE]);
}
// Use last 3 remembered lines as first 3 lines for next iteration.
// As expected, these lines are already horizontally transformed.
buffer[outputColumnOffset + 0 * STRIDE] = last3Lines[0];
buffer[outputColumnOffset + 1 * STRIDE] = last3Lines[1];
buffer[outputColumnOffset + 2 * STRIDE] = last3Lines[2];
// Wait for all writing threads before proceeding to loading new
// coeficients in next iteration. (Not to overwrite those which
// are not written yet.)
__syncthreads();
}
}
public:
/// Main GPU 5/3 RDWT entry point.
/// @param in input image (5/3 transformed coefficients)
/// @param out output buffer (for reverse transformed image)
/// @param sizeX width of the output image
/// @param sizeY height of the output image
/// @param winSteps number of sliding window steps
__device__ static void run(const int * const input, int * const output,
const int sx, const int sy, const int steps) {
// prepare instance with buffer in shared memory
__shared__ RDWT53<WIN_SIZE_X, WIN_SIZE_Y> rdwt53;
// Compute limits of this threadblock's block of pixels and use them to
// determine, whether this threadblock will have to deal with boundary.
// (1 in next expressions is for radius of impulse response of 5/3 RDWT.)
const int maxX = (blockIdx.x + 1) * WIN_SIZE_X + 1;
const int maxY = (blockIdx.y + 1) * WIN_SIZE_Y * steps + 1;
const bool atRightBoudary = maxX >= sx;
const bool atBottomBoudary = maxY >= sy;
// Select specialized version of code according to distance of this
// threadblock's pixels from image boundary.
if(atBottomBoudary) {
// near bottom boundary => check both writing and reading
rdwt53.transform<true, true>(input, output, sx, sy, steps);
} else if(atRightBoudary) {
// near right boundary only => check writing only
rdwt53.transform<false, true>(input, output, sx, sy, steps);
} else {
// no nearby boundary => check nothing
rdwt53.transform<false, false>(input, output, sx, sy, steps);
}
}
}; // end of class RDWT53
/// Main GPU 5/3 RDWT entry point.
/// @param in input image (5/3 transformed coefficients)
/// @param out output buffer (for reverse transformed image)
/// @param sizeX width of the output image
/// @param sizeY height of the output image
/// @param winSteps number of sliding window steps
template <int WIN_SX, int WIN_SY>
__launch_bounds__(WIN_SX, CTMIN(SHM_SIZE/sizeof(RDWT53<WIN_SX, WIN_SY>), 8))
__global__ void rdwt53Kernel(const int * const in, int * const out,
const int sx, const int sy, const int steps) {
RDWT53<WIN_SX, WIN_SY>::run(in, out, sx, sy, steps);
}
/// Only computes optimal number of sliding window steps,
/// number of threadblocks and then lanches the 5/3 RDWT kernel.
/// @tparam WIN_SX width of sliding window
/// @tparam WIN_SY height of sliding window
/// @param in input image
/// @param out output buffer
/// @param sx width of the input image
/// @param sy height of the input image
template <int WIN_SX, int WIN_SY>
void launchRDWT53Kernel (int * in, int * out, const int sx, const int sy, float& kernelTime) {
// compute optimal number of steps of each sliding window
const int steps = divRndUp(sy, 15 * WIN_SY);
// prepare grid size
dim3 gSize(divRndUp(sx, WIN_SX), divRndUp(sy, WIN_SY * steps));
// timing events
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float elapsedTime;
// finally transform this level
cudaEventRecord(start, 0);
rdwt53Kernel<WIN_SX, WIN_SY><<<gSize, WIN_SX>>>(in, out, sx, sy, steps);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
kernelTime += elapsedTime * 1.e-3;
CHECK_CUDA_ERROR();
}
/// Reverse 5/3 2D DWT. See common rules (above) for more details.
/// @param in Input DWT coefficients. Format described in common rules.
/// Will not be preserved (will be overwritten).
/// @param out output buffer on GPU - will contain original image
/// in normalized range [-128, 127].
/// @param sizeX width of input image (in pixels)
/// @param sizeY height of input image (in pixels)
/// @param levels number of recursive DWT levels
float rdwt53(int * in, int * out, int sizeX, int sizeY, int levels) {
float kernelTime = 0;
if(levels > 1) {
// let this function recursively reverse transform deeper levels first
const int llSizeX = divRndUp(sizeX, 2);
const int llSizeY = divRndUp(sizeY, 2);
kernelTime += rdwt53(in, out, llSizeX, llSizeY, levels - 1);
// copy reverse transformed LL band from output back into the input
memCopy(in, out, llSizeX, llSizeY);
}
// select right width of kernel for the size of the image
if(sizeX >= 960) {
launchRDWT53Kernel<192, 8>(in, out, sizeX, sizeY, kernelTime);
} else if (sizeX >= 480) {
launchRDWT53Kernel<128, 8>(in, out, sizeX, sizeY, kernelTime);
} else {
launchRDWT53Kernel<64, 8>(in, out, sizeX, sizeY, kernelTime);
}
return kernelTime;
}
} // end of namespace dwt_cuda
|
c7aa184c7aa885895ee4ff3feb1ea0a9f990595b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/TailLenSoftMax.cu"
#else
#include "../common.h"
void THLENN_(TailLenSoftMax_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
THCIndexTensor *len)
{
THCULENN_assertSameGPU(state, 2, input, output);
if ((input->nDimension != 2) && (len->nDimension != 1))
{
THError("2D tensor expected for input, 1D tensor expected for len");
}
input = THCTensor_(newContiguous)(state, input);
THCTensor_(resizeAs)(state, output, input);
long batchSize = input->size[0], dim = input->size[1];
long blocksY = 1, blocksZ = 1;
dim3 blocks(batchSize, blocksY, blocksZ);
dim3 threads(TAILLENSOFTMAX_THREADS);
hipLaunchKernelGGL(( culenn_TailLenSoftMax_updateOutput_kernel<real, accreal, THCIndex_t>), dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
batchSize, dim, THCIndexTensor_(data)(state, len)
);
THCudaCheck(hipGetLastError());
THCTensor_(free)(state, input);
}
void THLENN_(TailLenSoftMax_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
THCTensor *output,
THCIndexTensor *len)
{
THCULENN_check_nElement(state, input, gradOutput);
THCULENN_assertSameGPU(state, 3, output, gradOutput, gradInput);
if ((gradInput->nDimension != 2) && (len->nDimension != 1))
{
THError("2D tensor expected for input, 1D tensor expected for len");
}
output = THCTensor_(newContiguous)(state, output);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
THCTensor_(resizeAs)(state, gradInput, output);
long batchSize = gradInput->size[0], dim = gradInput->size[1];
long blocksY = 1, blocksZ = 1;
dim3 blocks(batchSize, blocksY, blocksZ);
dim3 threads(TAILLENSOFTMAX_THREADS);
hipLaunchKernelGGL(( culenn_TailLenSoftMax_updateGradInput_kernel<real, accreal, THCIndex_t>), dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, gradInput),
THCTensor_(data)(state, output),
THCTensor_(data)(state, gradOutput),
batchSize, dim, THCIndexTensor_(data)(state, len)
);
THCudaCheck(hipGetLastError());
THCTensor_(free)(state, gradOutput);
THCTensor_(free)(state, output);
}
#endif
|
c7aa184c7aa885895ee4ff3feb1ea0a9f990595b.cu
|
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/TailLenSoftMax.cu"
#else
#include "../common.h"
void THLENN_(TailLenSoftMax_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
THCIndexTensor *len)
{
THCULENN_assertSameGPU(state, 2, input, output);
if ((input->nDimension != 2) && (len->nDimension != 1))
{
THError("2D tensor expected for input, 1D tensor expected for len");
}
input = THCTensor_(newContiguous)(state, input);
THCTensor_(resizeAs)(state, output, input);
long batchSize = input->size[0], dim = input->size[1];
long blocksY = 1, blocksZ = 1;
dim3 blocks(batchSize, blocksY, blocksZ);
dim3 threads(TAILLENSOFTMAX_THREADS);
culenn_TailLenSoftMax_updateOutput_kernel<real, accreal, THCIndex_t><<<blocks, threads, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
batchSize, dim, THCIndexTensor_(data)(state, len)
);
THCudaCheck(cudaGetLastError());
THCTensor_(free)(state, input);
}
void THLENN_(TailLenSoftMax_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
THCTensor *output,
THCIndexTensor *len)
{
THCULENN_check_nElement(state, input, gradOutput);
THCULENN_assertSameGPU(state, 3, output, gradOutput, gradInput);
if ((gradInput->nDimension != 2) && (len->nDimension != 1))
{
THError("2D tensor expected for input, 1D tensor expected for len");
}
output = THCTensor_(newContiguous)(state, output);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
THCTensor_(resizeAs)(state, gradInput, output);
long batchSize = gradInput->size[0], dim = gradInput->size[1];
long blocksY = 1, blocksZ = 1;
dim3 blocks(batchSize, blocksY, blocksZ);
dim3 threads(TAILLENSOFTMAX_THREADS);
culenn_TailLenSoftMax_updateGradInput_kernel<real, accreal, THCIndex_t><<<blocks, threads, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, gradInput),
THCTensor_(data)(state, output),
THCTensor_(data)(state, gradOutput),
batchSize, dim, THCIndexTensor_(data)(state, len)
);
THCudaCheck(cudaGetLastError());
THCTensor_(free)(state, gradOutput);
THCTensor_(free)(state, output);
}
#endif
|
9fa2b5d350b61cce5012e3b2699fc551b17b1372.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernels.h"
/* This kernel is identical to the last one.
*/
__global__ void reduce(float *input, float *output, unsigned int n)
{
// Determine this thread's various ids
unsigned int block_size = blockDim.x;
unsigned int thread_id = threadIdx.x;
unsigned int block_id = blockIdx.x;
// Calculate the index that this block's chunk of values starts at.
// (Same as last time)
unsigned int block_start = block_id * block_size * 2 + thread_id;
for (unsigned int stride = block_size; stride > 0; stride /= 2)
{
if (thread_id < stride && // On first iteration, this will be true for all threads.
// On subsequent iterations, it will ensure that we
// always use the threads in the lower half of the
// block (the ones with the lowest ids). This guarentees
// that the remaining values will always be
// contiguous in memory
block_start + stride < n) // If we're the last block, we may be running more threads
// than we need - this condition makes sure they don't
// interfere.
{
input[block_start] += input[block_start + stride];
}
// Sync threads to prevent anyone from reading on the next iteration before everybody's
// done writing on this one
__syncthreads();
}
// Thread 0 writes this block's partial result to the output buffer.
if (!thread_id)
{
output[block_id] = input[block_start];
}
}
|
9fa2b5d350b61cce5012e3b2699fc551b17b1372.cu
|
#include "kernels.h"
/* This kernel is identical to the last one.
*/
__global__ void reduce(float *input, float *output, unsigned int n)
{
// Determine this thread's various ids
unsigned int block_size = blockDim.x;
unsigned int thread_id = threadIdx.x;
unsigned int block_id = blockIdx.x;
// Calculate the index that this block's chunk of values starts at.
// (Same as last time)
unsigned int block_start = block_id * block_size * 2 + thread_id;
for (unsigned int stride = block_size; stride > 0; stride /= 2)
{
if (thread_id < stride && // On first iteration, this will be true for all threads.
// On subsequent iterations, it will ensure that we
// always use the threads in the lower half of the
// block (the ones with the lowest ids). This guarentees
// that the remaining values will always be
// contiguous in memory
block_start + stride < n) // If we're the last block, we may be running more threads
// than we need - this condition makes sure they don't
// interfere.
{
input[block_start] += input[block_start + stride];
}
// Sync threads to prevent anyone from reading on the next iteration before everybody's
// done writing on this one
__syncthreads();
}
// Thread 0 writes this block's partial result to the output buffer.
if (!thread_id)
{
output[block_id] = input[block_start];
}
}
|
097e92e5e5ab3225242cf4220fc645e7b1e35f3d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/****
* Copyright (c) 2011-2014, NVIDIA Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
****/
#include "common.h"
#include "pack_strong.h"
__constant__ long long int size_d;
__constant__ long long int sizex_d;
__constant__ long long int sizey_d;
__constant__ long long int sizexinterior_d;
__constant__ long long int sizeyinterior_d;
__constant__ long long int sizexy_d;
__constant__ long long int sizexy_log_d;
__constant__ long long int sizex_log_d;
__constant__ long long int sizey_log_d;
__constant__ long long int sizexm1_d;
__constant__ long long int sizeym1_d;
__constant__ long long int boundary_d;
__constant__ long long int boundary_log_d;
__constant__ long long int sizexp2_d;
__constant__ long long int sizeyp2_d;
__constant__ long long int elemsinterior_d;
__constant__ long long int elemsxboundary_d;
__constant__ long long int elemsyboundary_d;
/*Note that the matrix dimensions (Z contiguous) and thread dimensions (X contiguous) are reversed in order*/
__global__ void pack(float *tbuf_x, float *tbuf_y, float *sbuf)
{
int sidx, tidx, thidx;
thidx = blockIdx.x * blockDim.x + threadIdx.x;
if (thidx < sizex_d) {
/*pack y*/
/*pack left boundary*/
sidx = (thidx + 1)*sizeyp2_d + 1;
tidx = thidx;
tbuf_y[tidx] = sbuf[sidx];
/*pack right boundary*/
sidx = (thidx + 1)*sizeyp2_d + sizey_d;
tidx = size_d + thidx;
tbuf_y[tidx] = sbuf[sidx];
}
if (thidx < sizey_d) {
/*pack x*/
/*pack bottom boundary*/
sidx = sizeyp2_d + thidx + 1;
tidx = thidx;
tbuf_x[tidx] = sbuf[sidx];
/*pack top boundary*/
sidx = sizex_d*sizeyp2_d + thidx + 1;
tidx = size_d + thidx;
tbuf_x[tidx] = sbuf[sidx];
}
}
__global__ void unpack(float *tbuf, float *sbuf_x, float *sbuf_y) {
int sidx, tidx, thidx;
thidx = blockIdx.x * blockDim.x + threadIdx.x;
if (thidx < sizex_d) {
/*unpack y*/
/*unpack left boundary*/
tidx = (thidx + 1)*sizeyp2_d;
sidx = thidx;
tbuf[tidx] = sbuf_y[sidx];
/*pack right boundary*/
tidx = (thidx + 1)*sizeyp2_d + sizey_d + 1;
sidx = size_d + thidx;
tbuf[tidx] = sbuf_y[sidx];
}
if (thidx < sizey_d) {
/*unpack_x*/
/*unpack bottom boundary*/
tidx = thidx + 1;
sidx = thidx;
tbuf[tidx] = sbuf_x[sidx];
/*pack top boundary*/
tidx = (sizex_d + 1)*sizeyp2_d + thidx + 1;
sidx = size_d + thidx;
tbuf[tidx] = sbuf_x[sidx];
}
}
__global__ void compute_xboundary(float *tbuf, float *sbuf) {
int i, thidx, numthreads, x, y, tmp;
float *addr, *x_paddr, *x_maddr, *y_paddr, *y_maddr;
thidx = blockIdx.x * blockDim.x + threadIdx.x;
numthreads = gridDim.x * blockDim.x;
for (i = thidx; i < elemsxboundary_d; i += numthreads) {
x = i >> sizey_log_d;
tmp = x << sizey_log_d;
y = (i - tmp);
/*note the added 1 to count for the ghost cells*/
addr = tbuf + (x + 1)*sizeyp2_d + (y + 1);
x_paddr = sbuf + (x + 2)*sizeyp2_d + (y + 1);
x_maddr = sbuf + (x)*sizeyp2_d + (y + 1);
y_paddr = sbuf + (x + 1)*sizeyp2_d + (y + 2);
y_maddr = sbuf + (x + 1)*sizeyp2_d + (y);
/*sommmmeeee computation*/
*addr = *addr + ((*x_paddr) + (*x_maddr) +
(*y_paddr) + (*y_maddr)) / 4.0;
/*recalculate x dimension for the other boundary*/
x = x + (sizex_d - boundary_d);
/*note the added 1 to count for the ghost cells*/
addr = tbuf + (x + 1)*sizeyp2_d + (y + 1);
x_paddr = sbuf + (x + 2)*sizeyp2_d + (y + 1);
x_maddr = sbuf + (x)*sizeyp2_d + (y + 1);
y_paddr = sbuf + (x + 1)*sizeyp2_d + (y + 2);
y_maddr = sbuf + (x + 1)*sizeyp2_d + (y);
/*sommmmeeee computation*/
*addr = *addr + ((*x_paddr) + (*x_maddr) +
(*y_paddr) + (*y_maddr)) / 4.0;
}
}
__global__ void compute_yboundary(float *tbuf, float *sbuf) {
int i, thidx, numthreads, x, y, tmp;
float *addr, *x_paddr, *x_maddr, *y_paddr, *y_maddr;
thidx = blockIdx.x * blockDim.x + threadIdx.x;
numthreads = gridDim.x * blockDim.x;
for (i = thidx; i < elemsyboundary_d; i += numthreads) {
x = i >> boundary_log_d;
tmp = x << boundary_log_d;
y = (i - tmp);
/*note the added value to count for the ghost cells*/
x = x + boundary_d + 1;
y = y + 1;
addr = tbuf + x*sizeyp2_d + y;
x_paddr = sbuf + (x + 1)*sizeyp2_d + y;
x_maddr = sbuf + (x - 1)*sizeyp2_d + y;
y_paddr = sbuf + x*sizeyp2_d + (y + 1);
y_maddr = sbuf + x*sizeyp2_d + (y - 1);
/*sommmmeeee computation*/
*addr = *addr + ((*x_paddr) + (*x_maddr) +
(*y_paddr) + (*y_maddr)) / 4.0;
/*recalculate x dimension for the other boundary*/
y = y + (sizey_d - boundary_d);
addr = tbuf + x*sizeyp2_d + y;
x_paddr = sbuf + (x + 1)*sizeyp2_d + y;
x_maddr = sbuf + (x - 1)*sizeyp2_d + y;
y_paddr = sbuf + x*sizeyp2_d + (y + 1);
y_maddr = sbuf + x*sizeyp2_d + (y - 1);
/*sommmmeeee computation*/
*addr = *addr + ((*x_paddr) + (*x_maddr) +
(*y_paddr) + (*y_maddr)) / 4.0;
}
}
__global__ void compute_interior (float *tbuf, float *sbuf) {
int i, thidx, numthreads, x, y, tmp;
float *addr, *x_paddr, *x_maddr, *y_paddr, *y_maddr;
thidx = blockIdx.x * blockDim.x + threadIdx.x;
numthreads = gridDim.x * blockDim.x;
for (i = thidx; i < elemsinterior_d; i += numthreads) {
x = i / sizeyinterior_d;
tmp = x * sizeyinterior_d;
y = (i - tmp);
x = x + boundary_d + 1;
y = y + boundary_d + 1;
/*note the added value to count for the boundary and ghost cells*/
addr = tbuf + x*sizeyp2_d + y;
x_paddr = sbuf + (x + 1)*sizeyp2_d + y;
x_maddr = sbuf + (x - 1)*sizeyp2_d + y;
y_paddr = sbuf + x*sizeyp2_d + (y + 1);
y_maddr = sbuf + x*sizeyp2_d + (y - 1);
/*sommmmeeee computation*/
*addr = *addr + ((*x_paddr) + (*x_maddr) +
(*y_paddr) + (*y_maddr)) / 4.0;
}
}
extern "C" void boundary_pack (float *tbuf, float * sbuf, long long int lenx, long long int leny, int threadsperblock, hipStream_t stream)
{
int gridsize;
int len;
len = lenx > leny ? lenx : leny;
if (threadsperblock > len) {
threadsperblock = len;
gridsize = 1;
} else {
gridsize = len/threadsperblock + (len&(threadsperblock - 1) > 0);
}
hipLaunchKernelGGL(( pack), dim3(gridsize), dim3(threadsperblock), 0, stream, tbuf + 2*len, tbuf, sbuf);
CUDA_CHECK(hipGetLastError());
}
extern "C" void boundary_unpack (float *tbuf, float * sbuf, long long int lenx, long long int leny, int threadsperblock, hipStream_t stream)
{
int gridsize;
int len;
len = lenx > leny ? lenx : leny;
if (threadsperblock > len) {
threadsperblock = len;
gridsize = 1;
} else {
gridsize = len/threadsperblock + (len&(threadsperblock - 1) > 0);
}
hipLaunchKernelGGL(( unpack), dim3(gridsize), dim3(threadsperblock), 0, stream, tbuf, sbuf + 2*len, sbuf);
CUDA_CHECK(hipGetLastError());
}
extern "C" void boundary_compute (float *tbuf, float * sbuf,
long long int sizex, long long int sizey, long long int boundary, int threadsperblock, int gridsize, hipStream_t stream)
{
int numelems = sizey*boundary;
if (threadsperblock > numelems) {
threadsperblock = numelems;
gridsize = 1;
} else {
if (gridsize > (numelems)/threadsperblock) {
gridsize = (numelems)/threadsperblock + (numelems&(threadsperblock - 1) > 0);
}
}
/*top and bottom*/
hipLaunchKernelGGL(( compute_xboundary), dim3(gridsize), dim3(threadsperblock), 0, stream, tbuf, sbuf);
CUDA_CHECK(hipGetLastError());
numelems = (sizex-2*boundary)*boundary;
if (threadsperblock > numelems) {
threadsperblock = numelems;
gridsize = 1;
} else {
if (gridsize > (numelems)/threadsperblock) {
gridsize = (numelems)/threadsperblock + (numelems&(threadsperblock - 1) > 0);
}
}
/*left and right*/
hipLaunchKernelGGL(( compute_yboundary), dim3(gridsize), dim3(threadsperblock), 0, stream, tbuf, sbuf);
CUDA_CHECK(hipGetLastError());
}
extern "C" void interior_compute (float *tbuf, float *sbuf, long long int sizex, long long int sizey, long long int boundary, int threadsperblock,
int gridsize, hipStream_t stream) {
int numelems = (sizex - 2*boundary)*(sizey - 2*boundary);
if (threadsperblock > numelems) {
threadsperblock = numelems;
gridsize = 1;
} else {
if (gridsize > (numelems)/threadsperblock) {
gridsize = (numelems)/threadsperblock + (numelems&(threadsperblock - 1) > 0);
}
}
hipLaunchKernelGGL(( compute_interior), dim3(gridsize), dim3(threadsperblock), 0, stream, tbuf, sbuf);
CUDA_CHECK(hipGetLastError());
}
extern "C" void copytosymbol (long long int sizex, long long int sizey, long long int boundary, long long int sizex_log,
long long int sizey_log, long long int boundary_log, long long int sizexy_log) {
long long int sizexy = sizex*sizey;
long long int sizexp2 = sizex + 2;
long long int sizeyp2 = sizey + 2;
long long int sizexinterior = sizex - 2*boundary;
long long int sizeyinterior = sizey - 2*boundary;
long long int elemsinterior = sizexinterior * sizeyinterior;
long long int elemsxboundary = sizey*boundary;
long long int elemsyboundary = (sizex-2*boundary)*boundary;
long long int size = (sizex > sizey) ? sizex : sizey;
CUDA_CHECK(hipMemcpyToSymbol(size_d, &size, sizeof(long long int), 0, hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpyToSymbol(sizex_d, &sizex, sizeof(long long int), 0, hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpyToSymbol(sizey_d, &sizey, sizeof(long long int), 0, hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpyToSymbol(sizexinterior_d, &sizexinterior, sizeof(long long int), 0, hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpyToSymbol(sizeyinterior_d, &sizeyinterior, sizeof(long long int), 0, hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpyToSymbol(sizexp2_d, &sizexp2, sizeof(long long int), 0, hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpyToSymbol(sizeyp2_d, &sizeyp2, sizeof(long long int), 0, hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpyToSymbol(sizexy_d, &sizexy, sizeof(long long int), 0, hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpyToSymbol(sizex_log_d, &sizex_log, sizeof(long long int), 0, hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpyToSymbol(sizey_log_d, &sizey_log, sizeof(long long int), 0, hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpyToSymbol(sizexy_log_d, &sizexy_log, sizeof(long long int), 0, hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpyToSymbol(boundary_d, &boundary, sizeof(long long int), 0, hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpyToSymbol(boundary_log_d, &boundary_log, sizeof(long long int), 0, hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpyToSymbol(elemsinterior_d, &elemsinterior, sizeof(long long int), 0, hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpyToSymbol(elemsxboundary_d, &elemsxboundary, sizeof(long long int), 0, hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpyToSymbol(elemsyboundary_d, &elemsyboundary, sizeof(long long int), 0, hipMemcpyHostToDevice));
}
|
097e92e5e5ab3225242cf4220fc645e7b1e35f3d.cu
|
/****
* Copyright (c) 2011-2014, NVIDIA Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
****/
#include "common.h"
#include "pack_strong.h"
__constant__ long long int size_d;
__constant__ long long int sizex_d;
__constant__ long long int sizey_d;
__constant__ long long int sizexinterior_d;
__constant__ long long int sizeyinterior_d;
__constant__ long long int sizexy_d;
__constant__ long long int sizexy_log_d;
__constant__ long long int sizex_log_d;
__constant__ long long int sizey_log_d;
__constant__ long long int sizexm1_d;
__constant__ long long int sizeym1_d;
__constant__ long long int boundary_d;
__constant__ long long int boundary_log_d;
__constant__ long long int sizexp2_d;
__constant__ long long int sizeyp2_d;
__constant__ long long int elemsinterior_d;
__constant__ long long int elemsxboundary_d;
__constant__ long long int elemsyboundary_d;
/*Note that the matrix dimensions (Z contiguous) and thread dimensions (X contiguous) are reversed in order*/
__global__ void pack(float *tbuf_x, float *tbuf_y, float *sbuf)
{
int sidx, tidx, thidx;
thidx = blockIdx.x * blockDim.x + threadIdx.x;
if (thidx < sizex_d) {
/*pack y*/
/*pack left boundary*/
sidx = (thidx + 1)*sizeyp2_d + 1;
tidx = thidx;
tbuf_y[tidx] = sbuf[sidx];
/*pack right boundary*/
sidx = (thidx + 1)*sizeyp2_d + sizey_d;
tidx = size_d + thidx;
tbuf_y[tidx] = sbuf[sidx];
}
if (thidx < sizey_d) {
/*pack x*/
/*pack bottom boundary*/
sidx = sizeyp2_d + thidx + 1;
tidx = thidx;
tbuf_x[tidx] = sbuf[sidx];
/*pack top boundary*/
sidx = sizex_d*sizeyp2_d + thidx + 1;
tidx = size_d + thidx;
tbuf_x[tidx] = sbuf[sidx];
}
}
__global__ void unpack(float *tbuf, float *sbuf_x, float *sbuf_y) {
int sidx, tidx, thidx;
thidx = blockIdx.x * blockDim.x + threadIdx.x;
if (thidx < sizex_d) {
/*unpack y*/
/*unpack left boundary*/
tidx = (thidx + 1)*sizeyp2_d;
sidx = thidx;
tbuf[tidx] = sbuf_y[sidx];
/*pack right boundary*/
tidx = (thidx + 1)*sizeyp2_d + sizey_d + 1;
sidx = size_d + thidx;
tbuf[tidx] = sbuf_y[sidx];
}
if (thidx < sizey_d) {
/*unpack_x*/
/*unpack bottom boundary*/
tidx = thidx + 1;
sidx = thidx;
tbuf[tidx] = sbuf_x[sidx];
/*pack top boundary*/
tidx = (sizex_d + 1)*sizeyp2_d + thidx + 1;
sidx = size_d + thidx;
tbuf[tidx] = sbuf_x[sidx];
}
}
__global__ void compute_xboundary(float *tbuf, float *sbuf) {
int i, thidx, numthreads, x, y, tmp;
float *addr, *x_paddr, *x_maddr, *y_paddr, *y_maddr;
thidx = blockIdx.x * blockDim.x + threadIdx.x;
numthreads = gridDim.x * blockDim.x;
for (i = thidx; i < elemsxboundary_d; i += numthreads) {
x = i >> sizey_log_d;
tmp = x << sizey_log_d;
y = (i - tmp);
/*note the added 1 to count for the ghost cells*/
addr = tbuf + (x + 1)*sizeyp2_d + (y + 1);
x_paddr = sbuf + (x + 2)*sizeyp2_d + (y + 1);
x_maddr = sbuf + (x)*sizeyp2_d + (y + 1);
y_paddr = sbuf + (x + 1)*sizeyp2_d + (y + 2);
y_maddr = sbuf + (x + 1)*sizeyp2_d + (y);
/*sommmmeeee computation*/
*addr = *addr + ((*x_paddr) + (*x_maddr) +
(*y_paddr) + (*y_maddr)) / 4.0;
/*recalculate x dimension for the other boundary*/
x = x + (sizex_d - boundary_d);
/*note the added 1 to count for the ghost cells*/
addr = tbuf + (x + 1)*sizeyp2_d + (y + 1);
x_paddr = sbuf + (x + 2)*sizeyp2_d + (y + 1);
x_maddr = sbuf + (x)*sizeyp2_d + (y + 1);
y_paddr = sbuf + (x + 1)*sizeyp2_d + (y + 2);
y_maddr = sbuf + (x + 1)*sizeyp2_d + (y);
/*sommmmeeee computation*/
*addr = *addr + ((*x_paddr) + (*x_maddr) +
(*y_paddr) + (*y_maddr)) / 4.0;
}
}
__global__ void compute_yboundary(float *tbuf, float *sbuf) {
int i, thidx, numthreads, x, y, tmp;
float *addr, *x_paddr, *x_maddr, *y_paddr, *y_maddr;
thidx = blockIdx.x * blockDim.x + threadIdx.x;
numthreads = gridDim.x * blockDim.x;
for (i = thidx; i < elemsyboundary_d; i += numthreads) {
x = i >> boundary_log_d;
tmp = x << boundary_log_d;
y = (i - tmp);
/*note the added value to count for the ghost cells*/
x = x + boundary_d + 1;
y = y + 1;
addr = tbuf + x*sizeyp2_d + y;
x_paddr = sbuf + (x + 1)*sizeyp2_d + y;
x_maddr = sbuf + (x - 1)*sizeyp2_d + y;
y_paddr = sbuf + x*sizeyp2_d + (y + 1);
y_maddr = sbuf + x*sizeyp2_d + (y - 1);
/*sommmmeeee computation*/
*addr = *addr + ((*x_paddr) + (*x_maddr) +
(*y_paddr) + (*y_maddr)) / 4.0;
/*recalculate x dimension for the other boundary*/
y = y + (sizey_d - boundary_d);
addr = tbuf + x*sizeyp2_d + y;
x_paddr = sbuf + (x + 1)*sizeyp2_d + y;
x_maddr = sbuf + (x - 1)*sizeyp2_d + y;
y_paddr = sbuf + x*sizeyp2_d + (y + 1);
y_maddr = sbuf + x*sizeyp2_d + (y - 1);
/*sommmmeeee computation*/
*addr = *addr + ((*x_paddr) + (*x_maddr) +
(*y_paddr) + (*y_maddr)) / 4.0;
}
}
__global__ void compute_interior (float *tbuf, float *sbuf) {
int i, thidx, numthreads, x, y, tmp;
float *addr, *x_paddr, *x_maddr, *y_paddr, *y_maddr;
thidx = blockIdx.x * blockDim.x + threadIdx.x;
numthreads = gridDim.x * blockDim.x;
for (i = thidx; i < elemsinterior_d; i += numthreads) {
x = i / sizeyinterior_d;
tmp = x * sizeyinterior_d;
y = (i - tmp);
x = x + boundary_d + 1;
y = y + boundary_d + 1;
/*note the added value to count for the boundary and ghost cells*/
addr = tbuf + x*sizeyp2_d + y;
x_paddr = sbuf + (x + 1)*sizeyp2_d + y;
x_maddr = sbuf + (x - 1)*sizeyp2_d + y;
y_paddr = sbuf + x*sizeyp2_d + (y + 1);
y_maddr = sbuf + x*sizeyp2_d + (y - 1);
/*sommmmeeee computation*/
*addr = *addr + ((*x_paddr) + (*x_maddr) +
(*y_paddr) + (*y_maddr)) / 4.0;
}
}
extern "C" void boundary_pack (float *tbuf, float * sbuf, long long int lenx, long long int leny, int threadsperblock, cudaStream_t stream)
{
int gridsize;
int len;
len = lenx > leny ? lenx : leny;
if (threadsperblock > len) {
threadsperblock = len;
gridsize = 1;
} else {
gridsize = len/threadsperblock + (len&(threadsperblock - 1) > 0);
}
pack<<<gridsize, threadsperblock, 0, stream>>>(tbuf + 2*len, tbuf, sbuf);
CUDA_CHECK(cudaGetLastError());
}
extern "C" void boundary_unpack (float *tbuf, float * sbuf, long long int lenx, long long int leny, int threadsperblock, cudaStream_t stream)
{
int gridsize;
int len;
len = lenx > leny ? lenx : leny;
if (threadsperblock > len) {
threadsperblock = len;
gridsize = 1;
} else {
gridsize = len/threadsperblock + (len&(threadsperblock - 1) > 0);
}
unpack<<<gridsize, threadsperblock, 0, stream>>>(tbuf, sbuf + 2*len, sbuf);
CUDA_CHECK(cudaGetLastError());
}
extern "C" void boundary_compute (float *tbuf, float * sbuf,
long long int sizex, long long int sizey, long long int boundary, int threadsperblock, int gridsize, cudaStream_t stream)
{
int numelems = sizey*boundary;
if (threadsperblock > numelems) {
threadsperblock = numelems;
gridsize = 1;
} else {
if (gridsize > (numelems)/threadsperblock) {
gridsize = (numelems)/threadsperblock + (numelems&(threadsperblock - 1) > 0);
}
}
/*top and bottom*/
compute_xboundary<<<gridsize, threadsperblock, 0, stream>>>(tbuf, sbuf);
CUDA_CHECK(cudaGetLastError());
numelems = (sizex-2*boundary)*boundary;
if (threadsperblock > numelems) {
threadsperblock = numelems;
gridsize = 1;
} else {
if (gridsize > (numelems)/threadsperblock) {
gridsize = (numelems)/threadsperblock + (numelems&(threadsperblock - 1) > 0);
}
}
/*left and right*/
compute_yboundary<<<gridsize, threadsperblock, 0, stream>>>(tbuf, sbuf);
CUDA_CHECK(cudaGetLastError());
}
extern "C" void interior_compute (float *tbuf, float *sbuf, long long int sizex, long long int sizey, long long int boundary, int threadsperblock,
int gridsize, cudaStream_t stream) {
int numelems = (sizex - 2*boundary)*(sizey - 2*boundary);
if (threadsperblock > numelems) {
threadsperblock = numelems;
gridsize = 1;
} else {
if (gridsize > (numelems)/threadsperblock) {
gridsize = (numelems)/threadsperblock + (numelems&(threadsperblock - 1) > 0);
}
}
compute_interior<<<gridsize, threadsperblock, 0, stream>>>(tbuf, sbuf);
CUDA_CHECK(cudaGetLastError());
}
extern "C" void copytosymbol (long long int sizex, long long int sizey, long long int boundary, long long int sizex_log,
long long int sizey_log, long long int boundary_log, long long int sizexy_log) {
long long int sizexy = sizex*sizey;
long long int sizexp2 = sizex + 2;
long long int sizeyp2 = sizey + 2;
long long int sizexinterior = sizex - 2*boundary;
long long int sizeyinterior = sizey - 2*boundary;
long long int elemsinterior = sizexinterior * sizeyinterior;
long long int elemsxboundary = sizey*boundary;
long long int elemsyboundary = (sizex-2*boundary)*boundary;
long long int size = (sizex > sizey) ? sizex : sizey;
CUDA_CHECK(cudaMemcpyToSymbol(size_d, &size, sizeof(long long int), 0, cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpyToSymbol(sizex_d, &sizex, sizeof(long long int), 0, cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpyToSymbol(sizey_d, &sizey, sizeof(long long int), 0, cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpyToSymbol(sizexinterior_d, &sizexinterior, sizeof(long long int), 0, cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpyToSymbol(sizeyinterior_d, &sizeyinterior, sizeof(long long int), 0, cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpyToSymbol(sizexp2_d, &sizexp2, sizeof(long long int), 0, cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpyToSymbol(sizeyp2_d, &sizeyp2, sizeof(long long int), 0, cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpyToSymbol(sizexy_d, &sizexy, sizeof(long long int), 0, cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpyToSymbol(sizex_log_d, &sizex_log, sizeof(long long int), 0, cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpyToSymbol(sizey_log_d, &sizey_log, sizeof(long long int), 0, cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpyToSymbol(sizexy_log_d, &sizexy_log, sizeof(long long int), 0, cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpyToSymbol(boundary_d, &boundary, sizeof(long long int), 0, cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpyToSymbol(boundary_log_d, &boundary_log, sizeof(long long int), 0, cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpyToSymbol(elemsinterior_d, &elemsinterior, sizeof(long long int), 0, cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpyToSymbol(elemsxboundary_d, &elemsxboundary, sizeof(long long int), 0, cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpyToSymbol(elemsyboundary_d, &elemsyboundary, sizeof(long long int), 0, cudaMemcpyHostToDevice));
}
|
6f05ed62417e94bfe84b6cdd48403a34ec8cbea6.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#include "../common/book.h"
int main( void ) {
hipDeviceProp_t prop;
int count;
HANDLE_ERROR( hipGetDeviceCount( &count ) );
for (int i=0; i< count; i++) {
HANDLE_ERROR( hipGetDeviceProperties( &prop, i ) );
printf( " --- General Information for device %d ---\n", i );
printf( "Name: %s\n", prop.name );
printf( "Compute capability: %d.%d\n", prop.major, prop.minor );
printf( "Clock rate: %d\n", prop.clockRate );
printf( "Device copy overlap: " );
if (prop.deviceOverlap)
printf( "Enabled\n" );
else
printf( "Disabled\n");
printf( "Kernel execution timeout : " );
if (prop.kernelExecTimeoutEnabled)
printf( "Enabled\n" );
else
printf( "Disabled\n" );
printf( " --- Memory Information for device %d ---\n", i );
printf( "Total global mem: %ld\n", prop.totalGlobalMem );
printf( "Total constant Mem: %ld\n", prop.totalConstMem );
printf( "Max mem pitch: %ld\n", prop.memPitch );
printf( "Texture Alignment: %ld\n", prop.textureAlignment );
printf( " --- MP Information for device %d ---\n", i );
printf( "Multiprocessor count: %d\n",
prop.multiProcessorCount );
printf( "Shared mem per mp: %ld\n", prop.sharedMemPerBlock );
printf( "Registers per mp: %d\n", prop.regsPerBlock );
printf( "Threads in warp: %d\n", prop.warpSize );
printf( "Max threads per block: %d\n",
prop.maxThreadsPerBlock );
printf( "Max thread dimensions: (%d, %d, %d)\n",
prop.maxThreadsDim[0], prop.maxThreadsDim[1],
prop.maxThreadsDim[2] );
printf( "Max grid dimensions: (%d, %d, %d)\n",
prop.maxGridSize[0], prop.maxGridSize[1],
prop.maxGridSize[2] );
printf( "\n" );
}
}
|
6f05ed62417e94bfe84b6cdd48403a34ec8cbea6.cu
|
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#include "../common/book.h"
int main( void ) {
cudaDeviceProp prop;
int count;
HANDLE_ERROR( cudaGetDeviceCount( &count ) );
for (int i=0; i< count; i++) {
HANDLE_ERROR( cudaGetDeviceProperties( &prop, i ) );
printf( " --- General Information for device %d ---\n", i );
printf( "Name: %s\n", prop.name );
printf( "Compute capability: %d.%d\n", prop.major, prop.minor );
printf( "Clock rate: %d\n", prop.clockRate );
printf( "Device copy overlap: " );
if (prop.deviceOverlap)
printf( "Enabled\n" );
else
printf( "Disabled\n");
printf( "Kernel execution timeout : " );
if (prop.kernelExecTimeoutEnabled)
printf( "Enabled\n" );
else
printf( "Disabled\n" );
printf( " --- Memory Information for device %d ---\n", i );
printf( "Total global mem: %ld\n", prop.totalGlobalMem );
printf( "Total constant Mem: %ld\n", prop.totalConstMem );
printf( "Max mem pitch: %ld\n", prop.memPitch );
printf( "Texture Alignment: %ld\n", prop.textureAlignment );
printf( " --- MP Information for device %d ---\n", i );
printf( "Multiprocessor count: %d\n",
prop.multiProcessorCount );
printf( "Shared mem per mp: %ld\n", prop.sharedMemPerBlock );
printf( "Registers per mp: %d\n", prop.regsPerBlock );
printf( "Threads in warp: %d\n", prop.warpSize );
printf( "Max threads per block: %d\n",
prop.maxThreadsPerBlock );
printf( "Max thread dimensions: (%d, %d, %d)\n",
prop.maxThreadsDim[0], prop.maxThreadsDim[1],
prop.maxThreadsDim[2] );
printf( "Max grid dimensions: (%d, %d, %d)\n",
prop.maxGridSize[0], prop.maxGridSize[1],
prop.maxGridSize[2] );
printf( "\n" );
}
}
|
ac33efc758476c360191ebf178aec62ea3eed708.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/Exceptions.h>
#include "multi_tensor_apply.cuh"
#include "compat.h"
#include <assert.h>
#include <hip/hip_runtime.h>
#define BLOCK_SIZE 512
#define ILP 4
/**
* Perform fused SGD on multiple buffers
* N: number of tensors
* tl[0] : gradients
* tl[1] : weights
* tl[2] : momentum buffers
* tl[3] : fp16 weights (if appropriate)
* wd : weight_decay (scalar)
* momentum : momentum (scalar)
* dampening : momentum dampening (scalar)
* lr : learning rate (scalar)
* nesterov : enable nesterov (bool)
* first run : necessary for proper momentum handling & init
* wd_after_momentum : apply weight decay _after_ momentum instead of before
**/
template<int N, typename T_grad, typename T_weight>
struct SGDFunctor
{
__device__ __forceinline__ void operator()(
int chunk_size,
volatile int* noop_gmem,
TensorListMetadata<N>& tl,
float wd,
float momentum,
float dampening,
float lr,
bool nesterov,
bool first_run,
bool wd_after_momentum,
float scale)
{
// Early exit if we don't need to do anything
if (*noop_gmem) return;
int tensor_loc = tl.block_to_tensor[blockIdx.x];
int chunk_idx = tl.block_to_chunk[blockIdx.x];
int n = tl.sizes[tensor_loc];
T_grad* grad_in = (T_grad*)tl.addresses[0][tensor_loc];
grad_in += chunk_idx*chunk_size;
T_weight* weight_in = (T_weight*)tl.addresses[1][tensor_loc];
weight_in += chunk_idx*chunk_size;
T_weight* mom_in = (T_weight*)tl.addresses[2][tensor_loc];
mom_in += chunk_idx*chunk_size;
at::Half *model_weights_out = nullptr;
if(N == 4)
{
model_weights_out = (at::Half*)tl.addresses[3][tensor_loc];
model_weights_out += chunk_idx*chunk_size;
}
n -= chunk_idx*chunk_size;
// Non-divergent exit condition for the __syncthreads
float incoming_grads[ILP];
float incoming_weights[ILP];
float incoming_moms[ILP];
for(int i_start = 0;
i_start < n && i_start < chunk_size;
i_start += blockDim.x*ILP)
{
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
incoming_grads[ii] = 0;
incoming_weights[ii] = 0;
incoming_moms[ii] = 0;
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
{
incoming_grads[ii] = static_cast<float>(grad_in[i])*scale;
incoming_weights[ii] = static_cast<float>(weight_in[i]);
incoming_moms[ii] = static_cast<float>(mom_in[i]);
}
}
// note for clarification to future michael:
// From a pure memory dependency perspective, there's likely no point unrolling
// the write loop, since writes just fire off once their LDGs arrive.
// Put another way, the STGs are dependent on the LDGs, but not on each other.
// There is still compute ILP benefit from unrolling the loop though.
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
{
// apply weight decay before momentum if necessary
if(wd != 0.f && !wd_after_momentum)
incoming_grads[ii] += wd * incoming_weights[ii];
if(momentum != 0.f)
{
if(!first_run)
incoming_moms[ii] = incoming_moms[ii] * momentum + (1.f - dampening) * incoming_grads[ii];
else // initialize momentums to current incoming grads
incoming_moms[ii] = incoming_grads[ii];
if(nesterov)
incoming_grads[ii] += momentum * incoming_moms[ii];
else
incoming_grads[ii] = incoming_moms[ii];
}
// Apply WD after momentum if desired
if(wd != 0.f && wd_after_momentum)
incoming_grads[ii] += wd * incoming_weights[ii];
// adjust the weight and write out
weight_in[i] += (-lr * incoming_grads[ii]);
// if necessary, write out an fp16 copy of the weights
if(N == 4)
model_weights_out[i] = static_cast<at::Half>(weight_in[i]);
// also write out the new momentum
if(momentum != 0.f)
mom_in[i] = incoming_moms[ii];
}
}
}
}
};
void multi_tensor_sgd_cuda(
int chunk_size,
at::Tensor noop_flag,
std::vector<std::vector<at::Tensor>> tensor_lists,
float wd,
float momentum,
float dampening,
float lr,
bool nesterov,
bool first_run,
bool wd_after_momentum,
float scale)
{
auto num_tensors = tensor_lists.size();
auto grad_type = tensor_lists[0][0].scalar_type();
auto weight_type = tensor_lists[1][0].scalar_type();
if(num_tensors == 4)
for(int i = 0; i < tensor_lists[3].size(); i++)
TORCH_CHECK(tensor_lists[3][i].scalar_type() == at::ScalarType::Half,
"Additional output tensors should always be fp16.");
// We have 3 possibilities to handle here, in terms of
// grad_type, param_type, momentum_type, requires_fp16_copy
// 1. fp16, fp16, fp16, No
// 2. fp32, fp32, fp32, No
// 3. fp16, fp32, fp32, Yes
// 4. fp32, fp32, fp32, Yes // this is the materialize_master_grads=True case
// It's easier to hardcode these possibilities than to use
// switches etc. to handle the cross-product of cases where
// we don't want the majority of them.
// Case 1. fp16, fp16, fp16, No
if(grad_type == at::ScalarType::Half &&
weight_type == at::ScalarType::Half &&
num_tensors == 3)
{
multi_tensor_apply<3>(
BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
SGDFunctor<3, at::Half, at::Half>(),
wd,
momentum,
dampening,
lr,
nesterov,
first_run,
wd_after_momentum,
scale);
}
// Case 2. fp16, fp32, fp32, No
// else if (grad_type == at::ScalarType::Half &&
// weight_type == at::ScalarType::Float &&
// num_tensors == 3) {
// multi_tensor_apply<3>(
// BLOCK_SIZE,
// chunk_size,
// noop_flag,
// tensor_lists,
// SGDFunctor<3, at::Half, float>(),
// wd,
// momentum,
// dampening,
// lr,
// nesterov,
// first_run,
// wd_after_momentum);
// }
// Case 2. fp32, fp32, fp32, No
else if(grad_type == at::ScalarType::Float &&
weight_type == at::ScalarType::Float &&
num_tensors == 3)
{
multi_tensor_apply<3>(
BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
SGDFunctor<3, float, float>(),
wd,
momentum,
dampening,
lr,
nesterov,
first_run,
wd_after_momentum,
scale);
}
// Case 3. fp16, fp32, fp32, Yes
else if(grad_type == at::ScalarType::Half &&
weight_type == at::ScalarType::Float &&
num_tensors == 4)
{
multi_tensor_apply<4>(
BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
SGDFunctor<4, at::Half, float>(),
wd,
momentum,
dampening,
lr,
nesterov,
first_run,
wd_after_momentum,
scale);
}
// Case 4. fp32, fp32, fp32, Yes
else if(grad_type == at::ScalarType::Float &&
weight_type == at::ScalarType::Float &&
num_tensors == 4)
{
multi_tensor_apply<4>(
BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
SGDFunctor<4, float, float>(),
wd,
momentum,
dampening,
lr,
nesterov,
first_run,
wd_after_momentum,
scale);
}
else
{
AT_ERROR("multi_tensor_sgd only supports some combinations of gradient & weight types. Given: ",
"gradient: ", grad_type, ", weight: ", weight_type, ", num_lists: ", num_tensors);
}
AT_CUDA_CHECK(hipGetLastError());
}
|
ac33efc758476c360191ebf178aec62ea3eed708.cu
|
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/Exceptions.h>
#include "multi_tensor_apply.cuh"
#include "compat.h"
#include <assert.h>
#include <cuda_runtime.h>
#define BLOCK_SIZE 512
#define ILP 4
/**
* Perform fused SGD on multiple buffers
* N: number of tensors
* tl[0] : gradients
* tl[1] : weights
* tl[2] : momentum buffers
* tl[3] : fp16 weights (if appropriate)
* wd : weight_decay (scalar)
* momentum : momentum (scalar)
* dampening : momentum dampening (scalar)
* lr : learning rate (scalar)
* nesterov : enable nesterov (bool)
* first run : necessary for proper momentum handling & init
* wd_after_momentum : apply weight decay _after_ momentum instead of before
**/
template<int N, typename T_grad, typename T_weight>
struct SGDFunctor
{
__device__ __forceinline__ void operator()(
int chunk_size,
volatile int* noop_gmem,
TensorListMetadata<N>& tl,
float wd,
float momentum,
float dampening,
float lr,
bool nesterov,
bool first_run,
bool wd_after_momentum,
float scale)
{
// Early exit if we don't need to do anything
if (*noop_gmem) return;
int tensor_loc = tl.block_to_tensor[blockIdx.x];
int chunk_idx = tl.block_to_chunk[blockIdx.x];
int n = tl.sizes[tensor_loc];
T_grad* grad_in = (T_grad*)tl.addresses[0][tensor_loc];
grad_in += chunk_idx*chunk_size;
T_weight* weight_in = (T_weight*)tl.addresses[1][tensor_loc];
weight_in += chunk_idx*chunk_size;
T_weight* mom_in = (T_weight*)tl.addresses[2][tensor_loc];
mom_in += chunk_idx*chunk_size;
at::Half *model_weights_out = nullptr;
if(N == 4)
{
model_weights_out = (at::Half*)tl.addresses[3][tensor_loc];
model_weights_out += chunk_idx*chunk_size;
}
n -= chunk_idx*chunk_size;
// Non-divergent exit condition for the __syncthreads
float incoming_grads[ILP];
float incoming_weights[ILP];
float incoming_moms[ILP];
for(int i_start = 0;
i_start < n && i_start < chunk_size;
i_start += blockDim.x*ILP)
{
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
incoming_grads[ii] = 0;
incoming_weights[ii] = 0;
incoming_moms[ii] = 0;
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
{
incoming_grads[ii] = static_cast<float>(grad_in[i])*scale;
incoming_weights[ii] = static_cast<float>(weight_in[i]);
incoming_moms[ii] = static_cast<float>(mom_in[i]);
}
}
// note for clarification to future michael:
// From a pure memory dependency perspective, there's likely no point unrolling
// the write loop, since writes just fire off once their LDGs arrive.
// Put another way, the STGs are dependent on the LDGs, but not on each other.
// There is still compute ILP benefit from unrolling the loop though.
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
{
// apply weight decay before momentum if necessary
if(wd != 0.f && !wd_after_momentum)
incoming_grads[ii] += wd * incoming_weights[ii];
if(momentum != 0.f)
{
if(!first_run)
incoming_moms[ii] = incoming_moms[ii] * momentum + (1.f - dampening) * incoming_grads[ii];
else // initialize momentums to current incoming grads
incoming_moms[ii] = incoming_grads[ii];
if(nesterov)
incoming_grads[ii] += momentum * incoming_moms[ii];
else
incoming_grads[ii] = incoming_moms[ii];
}
// Apply WD after momentum if desired
if(wd != 0.f && wd_after_momentum)
incoming_grads[ii] += wd * incoming_weights[ii];
// adjust the weight and write out
weight_in[i] += (-lr * incoming_grads[ii]);
// if necessary, write out an fp16 copy of the weights
if(N == 4)
model_weights_out[i] = static_cast<at::Half>(weight_in[i]);
// also write out the new momentum
if(momentum != 0.f)
mom_in[i] = incoming_moms[ii];
}
}
}
}
};
void multi_tensor_sgd_cuda(
int chunk_size,
at::Tensor noop_flag,
std::vector<std::vector<at::Tensor>> tensor_lists,
float wd,
float momentum,
float dampening,
float lr,
bool nesterov,
bool first_run,
bool wd_after_momentum,
float scale)
{
auto num_tensors = tensor_lists.size();
auto grad_type = tensor_lists[0][0].scalar_type();
auto weight_type = tensor_lists[1][0].scalar_type();
if(num_tensors == 4)
for(int i = 0; i < tensor_lists[3].size(); i++)
TORCH_CHECK(tensor_lists[3][i].scalar_type() == at::ScalarType::Half,
"Additional output tensors should always be fp16.");
// We have 3 possibilities to handle here, in terms of
// grad_type, param_type, momentum_type, requires_fp16_copy
// 1. fp16, fp16, fp16, No
// 2. fp32, fp32, fp32, No
// 3. fp16, fp32, fp32, Yes
// 4. fp32, fp32, fp32, Yes // this is the materialize_master_grads=True case
// It's easier to hardcode these possibilities than to use
// switches etc. to handle the cross-product of cases where
// we don't want the majority of them.
// Case 1. fp16, fp16, fp16, No
if(grad_type == at::ScalarType::Half &&
weight_type == at::ScalarType::Half &&
num_tensors == 3)
{
multi_tensor_apply<3>(
BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
SGDFunctor<3, at::Half, at::Half>(),
wd,
momentum,
dampening,
lr,
nesterov,
first_run,
wd_after_momentum,
scale);
}
// Case 2. fp16, fp32, fp32, No
// else if (grad_type == at::ScalarType::Half &&
// weight_type == at::ScalarType::Float &&
// num_tensors == 3) {
// multi_tensor_apply<3>(
// BLOCK_SIZE,
// chunk_size,
// noop_flag,
// tensor_lists,
// SGDFunctor<3, at::Half, float>(),
// wd,
// momentum,
// dampening,
// lr,
// nesterov,
// first_run,
// wd_after_momentum);
// }
// Case 2. fp32, fp32, fp32, No
else if(grad_type == at::ScalarType::Float &&
weight_type == at::ScalarType::Float &&
num_tensors == 3)
{
multi_tensor_apply<3>(
BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
SGDFunctor<3, float, float>(),
wd,
momentum,
dampening,
lr,
nesterov,
first_run,
wd_after_momentum,
scale);
}
// Case 3. fp16, fp32, fp32, Yes
else if(grad_type == at::ScalarType::Half &&
weight_type == at::ScalarType::Float &&
num_tensors == 4)
{
multi_tensor_apply<4>(
BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
SGDFunctor<4, at::Half, float>(),
wd,
momentum,
dampening,
lr,
nesterov,
first_run,
wd_after_momentum,
scale);
}
// Case 4. fp32, fp32, fp32, Yes
else if(grad_type == at::ScalarType::Float &&
weight_type == at::ScalarType::Float &&
num_tensors == 4)
{
multi_tensor_apply<4>(
BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
SGDFunctor<4, float, float>(),
wd,
momentum,
dampening,
lr,
nesterov,
first_run,
wd_after_momentum,
scale);
}
else
{
AT_ERROR("multi_tensor_sgd only supports some combinations of gradient & weight types. Given: ",
"gradient: ", grad_type, ", weight: ", weight_type, ", num_lists: ", num_tensors);
}
AT_CUDA_CHECK(cudaGetLastError());
}
|
34e54d1fe55601fa902f9fba3f920b30bc8647f5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
enum {VelScale, ForceScale, NoiseScale, MaxParams};
/**
* Perform the first step of Langevin integration.
*/
extern "C" __global__ void integrateLangevinPart1(int numAtoms, int paddedNumAtoms, mixed4* __restrict__ velm, const long long* __restrict__ force, mixed4* __restrict__ posDelta,
const mixed* __restrict__ paramBuffer, const mixed2* __restrict__ dt, const float4* __restrict__ random, unsigned int randomIndex) {
mixed vscale = paramBuffer[VelScale];
mixed fscale = paramBuffer[ForceScale]/(mixed) 0x100000000;
mixed noisescale = paramBuffer[NoiseScale];
mixed stepSize = dt[0].y;
int index = blockIdx.x*blockDim.x+threadIdx.x;
randomIndex += index;
while (index < numAtoms) {
mixed4 velocity = velm[index];
if (velocity.w != 0) {
mixed sqrtInvMass = SQRT(velocity.w);
velocity.x = vscale*velocity.x + fscale*velocity.w*force[index] + noisescale*sqrtInvMass*random[randomIndex].x;
velocity.y = vscale*velocity.y + fscale*velocity.w*force[index+paddedNumAtoms] + noisescale*sqrtInvMass*random[randomIndex].y;
velocity.z = vscale*velocity.z + fscale*velocity.w*force[index+paddedNumAtoms*2] + noisescale*sqrtInvMass*random[randomIndex].z;
velm[index] = velocity;
posDelta[index] = make_mixed4(stepSize*velocity.x, stepSize*velocity.y, stepSize*velocity.z, 0);
}
randomIndex += blockDim.x*gridDim.x;
index += blockDim.x*gridDim.x;
}
}
/**
* Perform the second step of Langevin integration.
*/
extern "C" __global__ void integrateLangevinPart2(int numAtoms, real4* __restrict__ posq, real4* __restrict__ posqCorrection, const mixed4* __restrict__ posDelta, mixed4* __restrict__ velm, const mixed2* __restrict__ dt) {
#if __CUDA_ARCH__ >= 130
double invStepSize = 1.0/dt[0].y;
#else
float invStepSize = 1.0f/dt[0].y;
float correction = (1.0f-invStepSize*dt[0].y)/dt[0].y;
#endif
int index = blockIdx.x*blockDim.x+threadIdx.x;
while (index < numAtoms) {
mixed4 vel = velm[index];
if (vel.w != 0) {
#ifdef USE_MIXED_PRECISION
real4 pos1 = posq[index];
real4 pos2 = posqCorrection[index];
mixed4 pos = make_mixed4(pos1.x+(mixed)pos2.x, pos1.y+(mixed)pos2.y, pos1.z+(mixed)pos2.z, pos1.w);
#else
real4 pos = posq[index];
#endif
mixed4 delta = posDelta[index];
pos.x += delta.x;
pos.y += delta.y;
pos.z += delta.z;
#if __CUDA_ARCH__ >= 130
vel.x = (mixed) (invStepSize*delta.x);
vel.y = (mixed) (invStepSize*delta.y);
vel.z = (mixed) (invStepSize*delta.z);
#else
vel.x = invStepSize*delta.x + correction*delta.x;
vel.y = invStepSize*delta.y + correction*delta.x;
vel.z = invStepSize*delta.z + correction*delta.x;
#endif
#ifdef USE_MIXED_PRECISION
posq[index] = make_real4((real) pos.x, (real) pos.y, (real) pos.z, (real) pos.w);
posqCorrection[index] = make_real4(pos.x-(real) pos.x, pos.y-(real) pos.y, pos.z-(real) pos.z, 0);
#else
posq[index] = pos;
#endif
velm[index] = vel;
}
index += blockDim.x*gridDim.x;
}
}
/**
* Select the step size to use for the next step.
*/
extern "C" __global__ void selectLangevinStepSize(int numAtoms, int paddedNumAtoms, mixed maxStepSize, mixed errorTol, mixed friction, mixed kT, mixed2* __restrict__ dt,
const mixed4* __restrict__ velm, const long long* __restrict__ force, mixed* __restrict__ paramBuffer) {
// Calculate the error.
extern __shared__ mixed params[];
mixed* error = ¶ms[MaxParams];
mixed err = 0;
unsigned int index = threadIdx.x;
const mixed scale = RECIP((mixed) 0x100000000);
while (index < numAtoms) {
mixed3 f = make_mixed3(scale*force[index], scale*force[index+paddedNumAtoms], scale*force[index+paddedNumAtoms*2]);
mixed invMass = velm[index].w;
err += (f.x*f.x + f.y*f.y + f.z*f.z)*invMass*invMass;
index += blockDim.x*gridDim.x;
}
error[threadIdx.x] = err;
__syncthreads();
// Sum the errors from all threads.
for (unsigned int offset = 1; offset < blockDim.x; offset *= 2) {
if (threadIdx.x+offset < blockDim.x && (threadIdx.x&(2*offset-1)) == 0)
error[threadIdx.x] += error[threadIdx.x+offset];
__syncthreads();
}
if (blockIdx.x*blockDim.x+threadIdx.x == 0) {
// Select the new step size.
mixed totalError = SQRT(error[0]/(numAtoms*3));
mixed newStepSize = SQRT(errorTol/totalError);
mixed oldStepSize = dt[0].y;
if (oldStepSize > 0.0f)
newStepSize = min(newStepSize, oldStepSize*2.0f); // For safety, limit how quickly dt can increase.
if (newStepSize > oldStepSize && newStepSize < 1.1f*oldStepSize)
newStepSize = oldStepSize; // Keeping dt constant between steps improves the behavior of the integrator.
if (newStepSize > maxStepSize)
newStepSize = maxStepSize;
dt[0].y = newStepSize;
// Recalculate the integration parameters.
mixed vscale = exp(-newStepSize*friction);
mixed fscale = (friction == 0 ? newStepSize : (1-vscale)/friction);
mixed noisescale = sqrt(kT*(1-vscale*vscale));
params[VelScale] = vscale;
params[ForceScale] = fscale;
params[NoiseScale] = noisescale;
}
__syncthreads();
if (threadIdx.x < MaxParams)
paramBuffer[threadIdx.x] = params[threadIdx.x];
}
|
34e54d1fe55601fa902f9fba3f920b30bc8647f5.cu
|
enum {VelScale, ForceScale, NoiseScale, MaxParams};
/**
* Perform the first step of Langevin integration.
*/
extern "C" __global__ void integrateLangevinPart1(int numAtoms, int paddedNumAtoms, mixed4* __restrict__ velm, const long long* __restrict__ force, mixed4* __restrict__ posDelta,
const mixed* __restrict__ paramBuffer, const mixed2* __restrict__ dt, const float4* __restrict__ random, unsigned int randomIndex) {
mixed vscale = paramBuffer[VelScale];
mixed fscale = paramBuffer[ForceScale]/(mixed) 0x100000000;
mixed noisescale = paramBuffer[NoiseScale];
mixed stepSize = dt[0].y;
int index = blockIdx.x*blockDim.x+threadIdx.x;
randomIndex += index;
while (index < numAtoms) {
mixed4 velocity = velm[index];
if (velocity.w != 0) {
mixed sqrtInvMass = SQRT(velocity.w);
velocity.x = vscale*velocity.x + fscale*velocity.w*force[index] + noisescale*sqrtInvMass*random[randomIndex].x;
velocity.y = vscale*velocity.y + fscale*velocity.w*force[index+paddedNumAtoms] + noisescale*sqrtInvMass*random[randomIndex].y;
velocity.z = vscale*velocity.z + fscale*velocity.w*force[index+paddedNumAtoms*2] + noisescale*sqrtInvMass*random[randomIndex].z;
velm[index] = velocity;
posDelta[index] = make_mixed4(stepSize*velocity.x, stepSize*velocity.y, stepSize*velocity.z, 0);
}
randomIndex += blockDim.x*gridDim.x;
index += blockDim.x*gridDim.x;
}
}
/**
* Perform the second step of Langevin integration.
*/
extern "C" __global__ void integrateLangevinPart2(int numAtoms, real4* __restrict__ posq, real4* __restrict__ posqCorrection, const mixed4* __restrict__ posDelta, mixed4* __restrict__ velm, const mixed2* __restrict__ dt) {
#if __CUDA_ARCH__ >= 130
double invStepSize = 1.0/dt[0].y;
#else
float invStepSize = 1.0f/dt[0].y;
float correction = (1.0f-invStepSize*dt[0].y)/dt[0].y;
#endif
int index = blockIdx.x*blockDim.x+threadIdx.x;
while (index < numAtoms) {
mixed4 vel = velm[index];
if (vel.w != 0) {
#ifdef USE_MIXED_PRECISION
real4 pos1 = posq[index];
real4 pos2 = posqCorrection[index];
mixed4 pos = make_mixed4(pos1.x+(mixed)pos2.x, pos1.y+(mixed)pos2.y, pos1.z+(mixed)pos2.z, pos1.w);
#else
real4 pos = posq[index];
#endif
mixed4 delta = posDelta[index];
pos.x += delta.x;
pos.y += delta.y;
pos.z += delta.z;
#if __CUDA_ARCH__ >= 130
vel.x = (mixed) (invStepSize*delta.x);
vel.y = (mixed) (invStepSize*delta.y);
vel.z = (mixed) (invStepSize*delta.z);
#else
vel.x = invStepSize*delta.x + correction*delta.x;
vel.y = invStepSize*delta.y + correction*delta.x;
vel.z = invStepSize*delta.z + correction*delta.x;
#endif
#ifdef USE_MIXED_PRECISION
posq[index] = make_real4((real) pos.x, (real) pos.y, (real) pos.z, (real) pos.w);
posqCorrection[index] = make_real4(pos.x-(real) pos.x, pos.y-(real) pos.y, pos.z-(real) pos.z, 0);
#else
posq[index] = pos;
#endif
velm[index] = vel;
}
index += blockDim.x*gridDim.x;
}
}
/**
* Select the step size to use for the next step.
*/
extern "C" __global__ void selectLangevinStepSize(int numAtoms, int paddedNumAtoms, mixed maxStepSize, mixed errorTol, mixed friction, mixed kT, mixed2* __restrict__ dt,
const mixed4* __restrict__ velm, const long long* __restrict__ force, mixed* __restrict__ paramBuffer) {
// Calculate the error.
extern __shared__ mixed params[];
mixed* error = ¶ms[MaxParams];
mixed err = 0;
unsigned int index = threadIdx.x;
const mixed scale = RECIP((mixed) 0x100000000);
while (index < numAtoms) {
mixed3 f = make_mixed3(scale*force[index], scale*force[index+paddedNumAtoms], scale*force[index+paddedNumAtoms*2]);
mixed invMass = velm[index].w;
err += (f.x*f.x + f.y*f.y + f.z*f.z)*invMass*invMass;
index += blockDim.x*gridDim.x;
}
error[threadIdx.x] = err;
__syncthreads();
// Sum the errors from all threads.
for (unsigned int offset = 1; offset < blockDim.x; offset *= 2) {
if (threadIdx.x+offset < blockDim.x && (threadIdx.x&(2*offset-1)) == 0)
error[threadIdx.x] += error[threadIdx.x+offset];
__syncthreads();
}
if (blockIdx.x*blockDim.x+threadIdx.x == 0) {
// Select the new step size.
mixed totalError = SQRT(error[0]/(numAtoms*3));
mixed newStepSize = SQRT(errorTol/totalError);
mixed oldStepSize = dt[0].y;
if (oldStepSize > 0.0f)
newStepSize = min(newStepSize, oldStepSize*2.0f); // For safety, limit how quickly dt can increase.
if (newStepSize > oldStepSize && newStepSize < 1.1f*oldStepSize)
newStepSize = oldStepSize; // Keeping dt constant between steps improves the behavior of the integrator.
if (newStepSize > maxStepSize)
newStepSize = maxStepSize;
dt[0].y = newStepSize;
// Recalculate the integration parameters.
mixed vscale = exp(-newStepSize*friction);
mixed fscale = (friction == 0 ? newStepSize : (1-vscale)/friction);
mixed noisescale = sqrt(kT*(1-vscale*vscale));
params[VelScale] = vscale;
params[ForceScale] = fscale;
params[NoiseScale] = noisescale;
}
__syncthreads();
if (threadIdx.x < MaxParams)
paramBuffer[threadIdx.x] = params[threadIdx.x];
}
|
6467152a8a0af50c1a5371f15e0256084cd183bc.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/utils/math/reduce.h"
#include <algorithm>
#include <functional>
#include <limits>
#include <numeric>
#include <vector>
#include <hipcub/hipcub.hpp>
#include <hipcub/hipcub.hpp>
#include <thrust/execution_policy.h>
#include <thrust/reduce.h>
#include <thrust/transform.h>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/math/elementwise.h"
#include "caffe2/utils/math/reduce.cuh"
#include "caffe2/utils/math/utils.h"
namespace caffe2 {
namespace math {
namespace {
template <typename T, class Reducer>
__global__ void RowwiseReduceCUDAKernel(
const int cols,
const Reducer reducer,
const T init,
const T alpha,
const T* X,
T* Y) {
__shared__ typename BlockReduce<T>::TempStorage temp_storage;
const int r = blockIdx.x;
T val = init;
for (int c = threadIdx.x; c < cols; c += blockDim.x) {
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
val = reducer(val, __ldg(X + r * cols + c));
#else
val = reducer(val, X[r * cols + c]);
#endif
}
val = BlockReduce<T>(temp_storage).Reduce(val, reducer);
if (threadIdx.x == 0) {
Y[r] = val * alpha;
}
}
template <typename T, class Reducer>
__global__ void ColwiseReduceCUDAKernel(
const int rows,
const int cols,
const Reducer reducer,
const T init,
const T alpha,
const T* X,
T* Y) {
__shared__ typename BlockReduce<T>::TempStorage temp_storage;
const int c = blockIdx.x;
T val = init;
for (int r = threadIdx.x; r < rows; r += blockDim.x) {
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
val = reducer(val, __ldg(X + r * cols + c));
#else
val = reducer(val, X[r * cols + c]);
#endif
}
val = BlockReduce<T>(temp_storage).Reduce(val, reducer);
if (threadIdx.x == 0) {
Y[c] = val * alpha;
}
}
template <typename T, class Reducer, int kBlockDimX, int kBlockDimY>
__global__ void BothEndsReduceCUDAKernel(
const int M,
const int N,
const int K,
const Reducer reducer,
const T init,
const T alpha,
const T* X,
T* Y) {
__shared__ typename BlockReduce2D<T, kBlockDimX, kBlockDimY>::TempStorage
temp_storage;
const int n = blockIdx.x;
T val = init;
for (int m = threadIdx.x; m < M; m += blockDim.x) {
for (int k = threadIdx.y; k < K; k += blockDim.y) {
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
val = reducer(val, __ldg(X + (m * N + n) * K + k));
#else
val = reducer(val, X[(m * N + n) * K + k]);
#endif
}
}
val = BlockReduce2D<T, kBlockDimX, kBlockDimY>(temp_storage)
.Reduce(val, reducer);
if (threadIdx.x == 0 && threadIdx.y == 0) {
Y[n] = val * alpha;
}
}
template <typename T, class Reducer, int D>
__global__ void ReduceTensorCUDAKernel(
const int inner_size,
const SimpleArray<int, D> X_strides,
const SimpleArray<int, D> Y_dims,
const Reducer reducer,
const T init,
const T alpha,
const T* X,
T* Y) {
__shared__ typename BlockReduce<T>::TempStorage temp_storage;
const int x = blockIdx.x;
T val = init;
for (int y = threadIdx.x; y < inner_size; y += blockDim.x) {
int X_index = 0;
int Y_index = x * inner_size + y;
#pragma unroll
for (int d = D - 1; d >= 0; --d) {
X_index += Y_index % Y_dims.data[d] * X_strides.data[d];
Y_index /= Y_dims.data[d];
}
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
val = reducer(val, __ldg(X + X_index));
#else
val = reducer(val, X[X_index]);
#endif
}
val = BlockReduce<T>(temp_storage).Reduce(val, reducer);
if (threadIdx.x == 0) {
Y[x] = val * alpha;
}
}
template <typename T, class Reducer, int D>
void ReduceTensorCUDAImpl(
const int outer_size,
const int inner_size,
const int* dims,
const int* axes,
const Reducer& reducer,
const T init,
const T alpha,
const T* X,
T* Y,
CUDAContext* context) {
SimpleArray<int, D> X_strides;
SimpleArray<int, D> Y_dims;
utils::ComputeTransposedStrides(D, dims, axes, X_strides.data);
for (int i = 0; i < D; ++i) {
Y_dims.data[i] = dims[axes[i]];
}
hipLaunchKernelGGL(( ReduceTensorCUDAKernel<T, Reducer, D>)
, dim3(outer_size), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(),
inner_size, X_strides, Y_dims, reducer, init, alpha, X, Y);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
template <typename T, class Reducer>
void ReduceTensorCUDA(
const int ndim,
const int* X_dims,
const int* Y_dims,
const Reducer& reducer,
const T init,
const T alpha,
const T* X,
T* Y,
CUDAContext* context) {
CAFFE_ENFORCE(utils::CheckReduceDims(ndim, X_dims, Y_dims));
const int X_size =
std::accumulate(X_dims, X_dims + ndim, 1, std::multiplies<int>());
const int Y_size =
std::accumulate(Y_dims, Y_dims + ndim, 1, std::multiplies<int>());
if (X_size == 0) {
Set<T, CUDAContext>(Y_size, init * alpha, Y, context);
return;
}
if (std::equal(X_dims, X_dims + ndim, Y_dims)) {
Scale<T, T, CUDAContext>(X_size, alpha, X, Y, context);
return;
}
int rows;
int cols;
if (utils::IsRowwiseReduce(ndim, X_dims, Y_dims, &rows, &cols)) {
hipLaunchKernelGGL(( RowwiseReduceCUDAKernel<T, Reducer>)
, dim3(rows), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(),
cols, reducer, init, alpha, X, Y);
C10_HIP_KERNEL_LAUNCH_CHECK();
return;
}
if (utils::IsColwiseReduce(ndim, X_dims, Y_dims, &rows, &cols)) {
hipLaunchKernelGGL(( ColwiseReduceCUDAKernel<T, Reducer>)
, dim3(cols), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(),
rows, cols, reducer, init, alpha, X, Y);
C10_HIP_KERNEL_LAUNCH_CHECK();
return;
}
int M;
int N;
int K;
if (utils::IsBothEndsReduce(ndim, X_dims, Y_dims, &M, &N, &K)) {
DISPATCH_REDUCE_KERNEL_BY_2D_BLOCK_WITH_TYPE_2(
K,
BothEndsReduceCUDAKernel,
T,
Reducer,
N,
context->cuda_stream(),
M,
N,
K,
reducer,
init,
alpha,
X,
Y);
return;
}
std::vector<int> axes(ndim);
utils::ComputeTransposeAxesForReduceOp(ndim, Y_dims, axes.data());
const int outer_size = Y_size;
const int inner_size = X_size / Y_size;
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_2(
ndim,
ReduceTensorCUDAImpl,
T,
Reducer,
outer_size,
inner_size,
X_dims,
axes.data(),
reducer,
init,
alpha,
X,
Y,
context);
}
template <typename T>
__global__ void
RowwiseMomentsCUDAKernel(const int cols, const T* X, T* mean, T* var) {
__shared__ typename BlockReduce<T>::TempStorage m_storage;
__shared__ typename BlockReduce<T>::TempStorage v_storage;
const T scale = T(1) / static_cast<T>(cols);
const int r = blockIdx.x;
T m_val = 0;
T v_val = 0;
for (int c = threadIdx.x; c < cols; c += blockDim.x) {
const int X_index = r * cols + c;
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
m_val += __ldg(X + X_index);
v_val += __ldg(X + X_index) * __ldg(X + X_index);
#else
m_val += X[X_index];
v_val += X[X_index] * X[X_index];
#endif
}
m_val = BlockReduce<T>(m_storage).Sum(m_val);
v_val = BlockReduce<T>(v_storage).Sum(v_val);
if (threadIdx.x == 0) {
const T mu = m_val * scale;
mean[r] = mu;
var[r] = v_val * scale - mu * mu;
}
}
template <typename T>
__global__ void ColwiseMomentsCUDAKernel(
const int rows,
const int cols,
const T* X,
T* mean,
T* var) {
__shared__ typename BlockReduce<T>::TempStorage m_storage;
__shared__ typename BlockReduce<T>::TempStorage v_storage;
const T scale = T(1) / static_cast<T>(rows);
const int c = blockIdx.x;
T m_val = 0;
T v_val = 0;
for (int r = threadIdx.x; r < rows; r += blockDim.x) {
const int X_index = r * cols + c;
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
m_val += __ldg(X + X_index);
v_val += __ldg(X + X_index) * __ldg(X + X_index);
#else
m_val += X[X_index];
v_val += X[X_index] * X[X_index];
#endif
}
m_val = BlockReduce<T>(m_storage).Sum(m_val);
v_val = BlockReduce<T>(v_storage).Sum(v_val);
if (threadIdx.x == 0) {
const T mu = m_val * scale;
mean[c] = mu;
var[c] = v_val * scale - mu * mu;
}
}
template <typename T, int kBlockDimX, int kBlockDimY>
__global__ void BothEndsMomentsCUDAKernel(
const int M,
const int N,
const int K,
const T* X,
T* mean,
T* var) {
__shared__
typename BlockReduce2D<T, kBlockDimX, kBlockDimY>::TempStorage m_storage;
__shared__
typename BlockReduce2D<T, kBlockDimX, kBlockDimY>::TempStorage v_storage;
const T scale = T(1) / static_cast<T>(M * K);
const int n = blockIdx.x;
T m_val = 0;
T v_val = 0;
for (int m = threadIdx.x; m < M; m += blockDim.x) {
for (int k = threadIdx.y; k < K; k += blockDim.y) {
const int X_index = (m * N + n) * K + k;
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
m_val += __ldg(X + X_index);
v_val += __ldg(X + X_index) * __ldg(X + X_index);
#else
m_val += X[X_index];
v_val += X[X_index] * X[X_index];
#endif
}
}
m_val = BlockReduce2D<T, kBlockDimX, kBlockDimY>(m_storage).Sum(m_val);
v_val = BlockReduce2D<T, kBlockDimX, kBlockDimY>(v_storage).Sum(v_val);
if (threadIdx.x == 0 && threadIdx.y == 0) {
const T mu = m_val * scale;
mean[n] = mu;
var[n] = v_val * scale - mu * mu;
}
}
template <typename T, int D>
__global__ void MomentsCUDAKernel(
const int inner_size,
const SimpleArray<int, D> X_strides,
const SimpleArray<int, D> Y_dims,
const T* X,
T* mean,
T* var) {
__shared__ typename BlockReduce<T>::TempStorage m_storage;
__shared__ typename BlockReduce<T>::TempStorage v_storage;
const T scale = T(1) / static_cast<T>(inner_size);
const int x = blockIdx.x;
T m_val = 0;
T v_val = 0;
for (int y = threadIdx.x; y < inner_size; y += blockDim.x) {
int X_index = 0;
int Y_index = x * inner_size + y;
#pragma unroll
for (int d = D - 1; d >= 0; --d) {
X_index += Y_index % Y_dims.data[d] * X_strides.data[d];
Y_index /= Y_dims.data[d];
}
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
m_val += __ldg(X + X_index);
v_val += __ldg(X + X_index) * __ldg(X + X_index);
#else
m_val += X[X_index];
v_val += X[X_index] * X[X_index];
#endif
}
m_val = BlockReduce<T>(m_storage).Sum(m_val);
v_val = BlockReduce<T>(v_storage).Sum(v_val);
if (threadIdx.x == 0) {
const T mu = m_val * scale;
mean[x] = mu;
var[x] = v_val * scale - mu * mu;
}
}
template <typename T, int D>
void MomentsCUDAImpl(
const int outer_size,
const int inner_size,
const int* dims,
const int* axes,
const T* X,
T* mean,
T* var,
CUDAContext* context) {
SimpleArray<int, D> X_strides;
SimpleArray<int, D> Y_dims;
utils::ComputeTransposedStrides(D, dims, axes, X_strides.data);
for (int i = 0; i < D; ++i) {
Y_dims.data[i] = dims[axes[i]];
}
hipLaunchKernelGGL(( MomentsCUDAKernel<T, D>)
, dim3(outer_size), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(),
inner_size, X_strides, Y_dims, X, mean, var);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
template <typename T>
void MomentsCUDA(
const int ndim,
const int* X_dims,
const int* Y_dims,
const T* X,
T* mean,
T* var,
CUDAContext* context) {
CAFFE_ENFORCE(utils::CheckReduceDims(ndim, X_dims, Y_dims));
const int X_size =
std::accumulate(X_dims, X_dims + ndim, 1, std::multiplies<int>());
const int Y_size =
std::accumulate(Y_dims, Y_dims + ndim, 1, std::multiplies<int>());
if (X_size == 0) {
Set<T, CUDAContext>(Y_size, T(0), mean, context);
Set<T, CUDAContext>(Y_size, T(0), var, context);
return;
}
if (std::equal(X_dims, X_dims + ndim, Y_dims)) {
hipMemcpyAsync(
mean,
X,
sizeof(T) * X_size,
hipMemcpyDeviceToDevice,
context->cuda_stream());
Set<T, CUDAContext>(Y_size, T(0), var, context);
return;
}
int rows;
int cols;
if (utils::IsRowwiseReduce(ndim, X_dims, Y_dims, &rows, &cols)) {
hipLaunchKernelGGL(( RowwiseMomentsCUDAKernel<T>)
, dim3(rows), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(),
cols, X, mean, var);
C10_HIP_KERNEL_LAUNCH_CHECK();
return;
}
if (utils::IsColwiseReduce(ndim, X_dims, Y_dims, &rows, &cols)) {
hipLaunchKernelGGL(( ColwiseMomentsCUDAKernel<T>)
, dim3(cols), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(),
rows, cols, X, mean, var);
C10_HIP_KERNEL_LAUNCH_CHECK();
return;
}
int M;
int N;
int K;
if (utils::IsBothEndsReduce(ndim, X_dims, Y_dims, &M, &N, &K)) {
DISPATCH_REDUCE_KERNEL_BY_2D_BLOCK_WITH_TYPE_1(
K,
BothEndsMomentsCUDAKernel,
T,
N,
context->cuda_stream(),
M,
N,
K,
X,
mean,
var);
return;
}
std::vector<int> axes(ndim);
utils::ComputeTransposeAxesForReduceOp(ndim, Y_dims, axes.data());
const int outer_size = Y_size;
const int inner_size = X_size / Y_size;
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1(
ndim,
MomentsCUDAImpl,
T,
outer_size,
inner_size,
X_dims,
axes.data(),
X,
mean,
var,
context);
}
} // namespace
#define DELEGATE_CUDA_REDUCE_FUNCTION(T, Func, Reducer, kInit) \
template <> \
CAFFE2_CUDA_EXPORT void Func<T, CUDAContext>( \
const int ndim, \
const int* X_dims, \
const int* Y_dims, \
const T alpha, \
const T* X, \
T* Y, \
CUDAContext* context) { \
ReduceTensorCUDA<T, Reducer>( \
ndim, X_dims, Y_dims, Reducer(), kInit, alpha, X, Y, context); \
}
DELEGATE_CUDA_REDUCE_FUNCTION(
std::int32_t,
ReduceMin,
hipcub::Min,
std::numeric_limits<std::int32_t>::max())
DELEGATE_CUDA_REDUCE_FUNCTION(
std::int64_t,
ReduceMin,
hipcub::Min,
std::numeric_limits<std::int64_t>::max())
DELEGATE_CUDA_REDUCE_FUNCTION(
float,
ReduceMin,
hipcub::Min,
std::numeric_limits<float>::max())
DELEGATE_CUDA_REDUCE_FUNCTION(
double,
ReduceMin,
hipcub::Min,
std::numeric_limits<double>::max())
DELEGATE_CUDA_REDUCE_FUNCTION(
std::int32_t,
ReduceMax,
hipcub::Max,
std::numeric_limits<std::int32_t>::lowest())
DELEGATE_CUDA_REDUCE_FUNCTION(
std::int64_t,
ReduceMax,
hipcub::Max,
std::numeric_limits<std::int64_t>::lowest())
DELEGATE_CUDA_REDUCE_FUNCTION(
float,
ReduceMax,
hipcub::Max,
std::numeric_limits<float>::lowest())
DELEGATE_CUDA_REDUCE_FUNCTION(
double,
ReduceMax,
hipcub::Max,
std::numeric_limits<double>::lowest())
DELEGATE_CUDA_REDUCE_FUNCTION(std::int32_t, ReduceSum, hipcub::Sum, 0)
DELEGATE_CUDA_REDUCE_FUNCTION(std::int64_t, ReduceSum, hipcub::Sum, 0LL)
DELEGATE_CUDA_REDUCE_FUNCTION(float, ReduceSum, hipcub::Sum, 0.0f)
DELEGATE_CUDA_REDUCE_FUNCTION(double, ReduceSum, hipcub::Sum, 0.0)
#undef DELEGATE_CUDA_REDUCE_FUNCTION
#define CAFFE2_SPECIALIZED_CUDA_REDUCE_MEAN(T) \
template <> \
CAFFE2_CUDA_EXPORT void ReduceMean<T, CUDAContext>( \
const int ndim, \
const int* X_dims, \
const int* Y_dims, \
const T alpha, \
const T* X, \
T* Y, \
CUDAContext* context) { \
int scale = 1; \
for (int i = 0; i < ndim; ++i) { \
if (Y_dims[i] == 1) { \
scale *= X_dims[i]; \
} \
} \
ReduceTensorCUDA<T, hipcub::Sum>( \
ndim, \
X_dims, \
Y_dims, \
hipcub::Sum(), \
T(0), \
alpha / static_cast<T>(scale), \
X, \
Y, \
context); \
}
CAFFE2_SPECIALIZED_CUDA_REDUCE_MEAN(float)
#undef CAFFE2_SPECIALIZED_CUDA_REDUCE_MEAN
#define CAFFE2_SPECIALIZED_CUDA_MOMENTS(T) \
template <> \
CAFFE2_CUDA_EXPORT void Moments<T, CUDAContext>( \
const int ndim, \
const int* X_dims, \
const int* Y_dims, \
const T* X, \
T* mean, \
T* var, \
CUDAContext* context) { \
MomentsCUDA<T>(ndim, X_dims, Y_dims, X, mean, var, context); \
}
CAFFE2_SPECIALIZED_CUDA_MOMENTS(float)
CAFFE2_SPECIALIZED_CUDA_MOMENTS(double)
#undef CAFFE2_SPECIALIZED_CUDA_MOMENTS
} // namespace math
} // namespace caffe2
|
6467152a8a0af50c1a5371f15e0256084cd183bc.cu
|
#include "caffe2/utils/math/reduce.h"
#include <algorithm>
#include <functional>
#include <limits>
#include <numeric>
#include <vector>
#include <cub/block/block_reduce.cuh>
#include <cub/cub.cuh>
#include <thrust/execution_policy.h>
#include <thrust/reduce.h>
#include <thrust/transform.h>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/math/elementwise.h"
#include "caffe2/utils/math/reduce.cuh"
#include "caffe2/utils/math/utils.h"
namespace caffe2 {
namespace math {
namespace {
template <typename T, class Reducer>
__global__ void RowwiseReduceCUDAKernel(
const int cols,
const Reducer reducer,
const T init,
const T alpha,
const T* X,
T* Y) {
__shared__ typename BlockReduce<T>::TempStorage temp_storage;
const int r = blockIdx.x;
T val = init;
for (int c = threadIdx.x; c < cols; c += blockDim.x) {
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
val = reducer(val, __ldg(X + r * cols + c));
#else
val = reducer(val, X[r * cols + c]);
#endif
}
val = BlockReduce<T>(temp_storage).Reduce(val, reducer);
if (threadIdx.x == 0) {
Y[r] = val * alpha;
}
}
template <typename T, class Reducer>
__global__ void ColwiseReduceCUDAKernel(
const int rows,
const int cols,
const Reducer reducer,
const T init,
const T alpha,
const T* X,
T* Y) {
__shared__ typename BlockReduce<T>::TempStorage temp_storage;
const int c = blockIdx.x;
T val = init;
for (int r = threadIdx.x; r < rows; r += blockDim.x) {
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
val = reducer(val, __ldg(X + r * cols + c));
#else
val = reducer(val, X[r * cols + c]);
#endif
}
val = BlockReduce<T>(temp_storage).Reduce(val, reducer);
if (threadIdx.x == 0) {
Y[c] = val * alpha;
}
}
template <typename T, class Reducer, int kBlockDimX, int kBlockDimY>
__global__ void BothEndsReduceCUDAKernel(
const int M,
const int N,
const int K,
const Reducer reducer,
const T init,
const T alpha,
const T* X,
T* Y) {
__shared__ typename BlockReduce2D<T, kBlockDimX, kBlockDimY>::TempStorage
temp_storage;
const int n = blockIdx.x;
T val = init;
for (int m = threadIdx.x; m < M; m += blockDim.x) {
for (int k = threadIdx.y; k < K; k += blockDim.y) {
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
val = reducer(val, __ldg(X + (m * N + n) * K + k));
#else
val = reducer(val, X[(m * N + n) * K + k]);
#endif
}
}
val = BlockReduce2D<T, kBlockDimX, kBlockDimY>(temp_storage)
.Reduce(val, reducer);
if (threadIdx.x == 0 && threadIdx.y == 0) {
Y[n] = val * alpha;
}
}
template <typename T, class Reducer, int D>
__global__ void ReduceTensorCUDAKernel(
const int inner_size,
const SimpleArray<int, D> X_strides,
const SimpleArray<int, D> Y_dims,
const Reducer reducer,
const T init,
const T alpha,
const T* X,
T* Y) {
__shared__ typename BlockReduce<T>::TempStorage temp_storage;
const int x = blockIdx.x;
T val = init;
for (int y = threadIdx.x; y < inner_size; y += blockDim.x) {
int X_index = 0;
int Y_index = x * inner_size + y;
#pragma unroll
for (int d = D - 1; d >= 0; --d) {
X_index += Y_index % Y_dims.data[d] * X_strides.data[d];
Y_index /= Y_dims.data[d];
}
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
val = reducer(val, __ldg(X + X_index));
#else
val = reducer(val, X[X_index]);
#endif
}
val = BlockReduce<T>(temp_storage).Reduce(val, reducer);
if (threadIdx.x == 0) {
Y[x] = val * alpha;
}
}
template <typename T, class Reducer, int D>
void ReduceTensorCUDAImpl(
const int outer_size,
const int inner_size,
const int* dims,
const int* axes,
const Reducer& reducer,
const T init,
const T alpha,
const T* X,
T* Y,
CUDAContext* context) {
SimpleArray<int, D> X_strides;
SimpleArray<int, D> Y_dims;
utils::ComputeTransposedStrides(D, dims, axes, X_strides.data);
for (int i = 0; i < D; ++i) {
Y_dims.data[i] = dims[axes[i]];
}
ReduceTensorCUDAKernel<T, Reducer, D>
<<<outer_size, CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(
inner_size, X_strides, Y_dims, reducer, init, alpha, X, Y);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
template <typename T, class Reducer>
void ReduceTensorCUDA(
const int ndim,
const int* X_dims,
const int* Y_dims,
const Reducer& reducer,
const T init,
const T alpha,
const T* X,
T* Y,
CUDAContext* context) {
CAFFE_ENFORCE(utils::CheckReduceDims(ndim, X_dims, Y_dims));
const int X_size =
std::accumulate(X_dims, X_dims + ndim, 1, std::multiplies<int>());
const int Y_size =
std::accumulate(Y_dims, Y_dims + ndim, 1, std::multiplies<int>());
if (X_size == 0) {
Set<T, CUDAContext>(Y_size, init * alpha, Y, context);
return;
}
if (std::equal(X_dims, X_dims + ndim, Y_dims)) {
Scale<T, T, CUDAContext>(X_size, alpha, X, Y, context);
return;
}
int rows;
int cols;
if (utils::IsRowwiseReduce(ndim, X_dims, Y_dims, &rows, &cols)) {
RowwiseReduceCUDAKernel<T, Reducer>
<<<rows, CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(
cols, reducer, init, alpha, X, Y);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return;
}
if (utils::IsColwiseReduce(ndim, X_dims, Y_dims, &rows, &cols)) {
ColwiseReduceCUDAKernel<T, Reducer>
<<<cols, CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(
rows, cols, reducer, init, alpha, X, Y);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return;
}
int M;
int N;
int K;
if (utils::IsBothEndsReduce(ndim, X_dims, Y_dims, &M, &N, &K)) {
DISPATCH_REDUCE_KERNEL_BY_2D_BLOCK_WITH_TYPE_2(
K,
BothEndsReduceCUDAKernel,
T,
Reducer,
N,
context->cuda_stream(),
M,
N,
K,
reducer,
init,
alpha,
X,
Y);
return;
}
std::vector<int> axes(ndim);
utils::ComputeTransposeAxesForReduceOp(ndim, Y_dims, axes.data());
const int outer_size = Y_size;
const int inner_size = X_size / Y_size;
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_2(
ndim,
ReduceTensorCUDAImpl,
T,
Reducer,
outer_size,
inner_size,
X_dims,
axes.data(),
reducer,
init,
alpha,
X,
Y,
context);
}
template <typename T>
__global__ void
RowwiseMomentsCUDAKernel(const int cols, const T* X, T* mean, T* var) {
__shared__ typename BlockReduce<T>::TempStorage m_storage;
__shared__ typename BlockReduce<T>::TempStorage v_storage;
const T scale = T(1) / static_cast<T>(cols);
const int r = blockIdx.x;
T m_val = 0;
T v_val = 0;
for (int c = threadIdx.x; c < cols; c += blockDim.x) {
const int X_index = r * cols + c;
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
m_val += __ldg(X + X_index);
v_val += __ldg(X + X_index) * __ldg(X + X_index);
#else
m_val += X[X_index];
v_val += X[X_index] * X[X_index];
#endif
}
m_val = BlockReduce<T>(m_storage).Sum(m_val);
v_val = BlockReduce<T>(v_storage).Sum(v_val);
if (threadIdx.x == 0) {
const T mu = m_val * scale;
mean[r] = mu;
var[r] = v_val * scale - mu * mu;
}
}
template <typename T>
__global__ void ColwiseMomentsCUDAKernel(
const int rows,
const int cols,
const T* X,
T* mean,
T* var) {
__shared__ typename BlockReduce<T>::TempStorage m_storage;
__shared__ typename BlockReduce<T>::TempStorage v_storage;
const T scale = T(1) / static_cast<T>(rows);
const int c = blockIdx.x;
T m_val = 0;
T v_val = 0;
for (int r = threadIdx.x; r < rows; r += blockDim.x) {
const int X_index = r * cols + c;
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
m_val += __ldg(X + X_index);
v_val += __ldg(X + X_index) * __ldg(X + X_index);
#else
m_val += X[X_index];
v_val += X[X_index] * X[X_index];
#endif
}
m_val = BlockReduce<T>(m_storage).Sum(m_val);
v_val = BlockReduce<T>(v_storage).Sum(v_val);
if (threadIdx.x == 0) {
const T mu = m_val * scale;
mean[c] = mu;
var[c] = v_val * scale - mu * mu;
}
}
template <typename T, int kBlockDimX, int kBlockDimY>
__global__ void BothEndsMomentsCUDAKernel(
const int M,
const int N,
const int K,
const T* X,
T* mean,
T* var) {
__shared__
typename BlockReduce2D<T, kBlockDimX, kBlockDimY>::TempStorage m_storage;
__shared__
typename BlockReduce2D<T, kBlockDimX, kBlockDimY>::TempStorage v_storage;
const T scale = T(1) / static_cast<T>(M * K);
const int n = blockIdx.x;
T m_val = 0;
T v_val = 0;
for (int m = threadIdx.x; m < M; m += blockDim.x) {
for (int k = threadIdx.y; k < K; k += blockDim.y) {
const int X_index = (m * N + n) * K + k;
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
m_val += __ldg(X + X_index);
v_val += __ldg(X + X_index) * __ldg(X + X_index);
#else
m_val += X[X_index];
v_val += X[X_index] * X[X_index];
#endif
}
}
m_val = BlockReduce2D<T, kBlockDimX, kBlockDimY>(m_storage).Sum(m_val);
v_val = BlockReduce2D<T, kBlockDimX, kBlockDimY>(v_storage).Sum(v_val);
if (threadIdx.x == 0 && threadIdx.y == 0) {
const T mu = m_val * scale;
mean[n] = mu;
var[n] = v_val * scale - mu * mu;
}
}
template <typename T, int D>
__global__ void MomentsCUDAKernel(
const int inner_size,
const SimpleArray<int, D> X_strides,
const SimpleArray<int, D> Y_dims,
const T* X,
T* mean,
T* var) {
__shared__ typename BlockReduce<T>::TempStorage m_storage;
__shared__ typename BlockReduce<T>::TempStorage v_storage;
const T scale = T(1) / static_cast<T>(inner_size);
const int x = blockIdx.x;
T m_val = 0;
T v_val = 0;
for (int y = threadIdx.x; y < inner_size; y += blockDim.x) {
int X_index = 0;
int Y_index = x * inner_size + y;
#pragma unroll
for (int d = D - 1; d >= 0; --d) {
X_index += Y_index % Y_dims.data[d] * X_strides.data[d];
Y_index /= Y_dims.data[d];
}
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
m_val += __ldg(X + X_index);
v_val += __ldg(X + X_index) * __ldg(X + X_index);
#else
m_val += X[X_index];
v_val += X[X_index] * X[X_index];
#endif
}
m_val = BlockReduce<T>(m_storage).Sum(m_val);
v_val = BlockReduce<T>(v_storage).Sum(v_val);
if (threadIdx.x == 0) {
const T mu = m_val * scale;
mean[x] = mu;
var[x] = v_val * scale - mu * mu;
}
}
template <typename T, int D>
void MomentsCUDAImpl(
const int outer_size,
const int inner_size,
const int* dims,
const int* axes,
const T* X,
T* mean,
T* var,
CUDAContext* context) {
SimpleArray<int, D> X_strides;
SimpleArray<int, D> Y_dims;
utils::ComputeTransposedStrides(D, dims, axes, X_strides.data);
for (int i = 0; i < D; ++i) {
Y_dims.data[i] = dims[axes[i]];
}
MomentsCUDAKernel<T, D>
<<<outer_size, CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(
inner_size, X_strides, Y_dims, X, mean, var);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
template <typename T>
void MomentsCUDA(
const int ndim,
const int* X_dims,
const int* Y_dims,
const T* X,
T* mean,
T* var,
CUDAContext* context) {
CAFFE_ENFORCE(utils::CheckReduceDims(ndim, X_dims, Y_dims));
const int X_size =
std::accumulate(X_dims, X_dims + ndim, 1, std::multiplies<int>());
const int Y_size =
std::accumulate(Y_dims, Y_dims + ndim, 1, std::multiplies<int>());
if (X_size == 0) {
Set<T, CUDAContext>(Y_size, T(0), mean, context);
Set<T, CUDAContext>(Y_size, T(0), var, context);
return;
}
if (std::equal(X_dims, X_dims + ndim, Y_dims)) {
cudaMemcpyAsync(
mean,
X,
sizeof(T) * X_size,
cudaMemcpyDeviceToDevice,
context->cuda_stream());
Set<T, CUDAContext>(Y_size, T(0), var, context);
return;
}
int rows;
int cols;
if (utils::IsRowwiseReduce(ndim, X_dims, Y_dims, &rows, &cols)) {
RowwiseMomentsCUDAKernel<T>
<<<rows, CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(
cols, X, mean, var);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return;
}
if (utils::IsColwiseReduce(ndim, X_dims, Y_dims, &rows, &cols)) {
ColwiseMomentsCUDAKernel<T>
<<<cols, CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(
rows, cols, X, mean, var);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return;
}
int M;
int N;
int K;
if (utils::IsBothEndsReduce(ndim, X_dims, Y_dims, &M, &N, &K)) {
DISPATCH_REDUCE_KERNEL_BY_2D_BLOCK_WITH_TYPE_1(
K,
BothEndsMomentsCUDAKernel,
T,
N,
context->cuda_stream(),
M,
N,
K,
X,
mean,
var);
return;
}
std::vector<int> axes(ndim);
utils::ComputeTransposeAxesForReduceOp(ndim, Y_dims, axes.data());
const int outer_size = Y_size;
const int inner_size = X_size / Y_size;
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1(
ndim,
MomentsCUDAImpl,
T,
outer_size,
inner_size,
X_dims,
axes.data(),
X,
mean,
var,
context);
}
} // namespace
#define DELEGATE_CUDA_REDUCE_FUNCTION(T, Func, Reducer, kInit) \
template <> \
CAFFE2_CUDA_EXPORT void Func<T, CUDAContext>( \
const int ndim, \
const int* X_dims, \
const int* Y_dims, \
const T alpha, \
const T* X, \
T* Y, \
CUDAContext* context) { \
ReduceTensorCUDA<T, Reducer>( \
ndim, X_dims, Y_dims, Reducer(), kInit, alpha, X, Y, context); \
}
DELEGATE_CUDA_REDUCE_FUNCTION(
std::int32_t,
ReduceMin,
cub::Min,
std::numeric_limits<std::int32_t>::max())
DELEGATE_CUDA_REDUCE_FUNCTION(
std::int64_t,
ReduceMin,
cub::Min,
std::numeric_limits<std::int64_t>::max())
DELEGATE_CUDA_REDUCE_FUNCTION(
float,
ReduceMin,
cub::Min,
std::numeric_limits<float>::max())
DELEGATE_CUDA_REDUCE_FUNCTION(
double,
ReduceMin,
cub::Min,
std::numeric_limits<double>::max())
DELEGATE_CUDA_REDUCE_FUNCTION(
std::int32_t,
ReduceMax,
cub::Max,
std::numeric_limits<std::int32_t>::lowest())
DELEGATE_CUDA_REDUCE_FUNCTION(
std::int64_t,
ReduceMax,
cub::Max,
std::numeric_limits<std::int64_t>::lowest())
DELEGATE_CUDA_REDUCE_FUNCTION(
float,
ReduceMax,
cub::Max,
std::numeric_limits<float>::lowest())
DELEGATE_CUDA_REDUCE_FUNCTION(
double,
ReduceMax,
cub::Max,
std::numeric_limits<double>::lowest())
DELEGATE_CUDA_REDUCE_FUNCTION(std::int32_t, ReduceSum, cub::Sum, 0)
DELEGATE_CUDA_REDUCE_FUNCTION(std::int64_t, ReduceSum, cub::Sum, 0LL)
DELEGATE_CUDA_REDUCE_FUNCTION(float, ReduceSum, cub::Sum, 0.0f)
DELEGATE_CUDA_REDUCE_FUNCTION(double, ReduceSum, cub::Sum, 0.0)
#undef DELEGATE_CUDA_REDUCE_FUNCTION
#define CAFFE2_SPECIALIZED_CUDA_REDUCE_MEAN(T) \
template <> \
CAFFE2_CUDA_EXPORT void ReduceMean<T, CUDAContext>( \
const int ndim, \
const int* X_dims, \
const int* Y_dims, \
const T alpha, \
const T* X, \
T* Y, \
CUDAContext* context) { \
int scale = 1; \
for (int i = 0; i < ndim; ++i) { \
if (Y_dims[i] == 1) { \
scale *= X_dims[i]; \
} \
} \
ReduceTensorCUDA<T, cub::Sum>( \
ndim, \
X_dims, \
Y_dims, \
cub::Sum(), \
T(0), \
alpha / static_cast<T>(scale), \
X, \
Y, \
context); \
}
CAFFE2_SPECIALIZED_CUDA_REDUCE_MEAN(float)
#undef CAFFE2_SPECIALIZED_CUDA_REDUCE_MEAN
#define CAFFE2_SPECIALIZED_CUDA_MOMENTS(T) \
template <> \
CAFFE2_CUDA_EXPORT void Moments<T, CUDAContext>( \
const int ndim, \
const int* X_dims, \
const int* Y_dims, \
const T* X, \
T* mean, \
T* var, \
CUDAContext* context) { \
MomentsCUDA<T>(ndim, X_dims, Y_dims, X, mean, var, context); \
}
CAFFE2_SPECIALIZED_CUDA_MOMENTS(float)
CAFFE2_SPECIALIZED_CUDA_MOMENTS(double)
#undef CAFFE2_SPECIALIZED_CUDA_MOMENTS
} // namespace math
} // namespace caffe2
|
1096416fe526d63e9b2e568ee183b2ed8c5b0211.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void getIndex(unsigned int *d_index, unsigned int *d_scan, unsigned int *d_mask, unsigned int in_size, unsigned int total_pre) {
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
if (index < in_size) {
if (d_mask[index] == 1) {
d_index[index] = total_pre + d_scan[index];
}
}
}
|
1096416fe526d63e9b2e568ee183b2ed8c5b0211.cu
|
#include "includes.h"
__global__ void getIndex(unsigned int *d_index, unsigned int *d_scan, unsigned int *d_mask, unsigned int in_size, unsigned int total_pre) {
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
if (index < in_size) {
if (d_mask[index] == 1) {
d_index[index] = total_pre + d_scan[index];
}
}
}
|
1e6a6737e048e36c0180cf2f91dad1e8f33cb9d5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "Thumbnail_ushort.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
hipTextureObject_t ushort_tex = 1;
int *histogram = NULL;
hipMalloc(&histogram, XSIZE*YSIZE);
int src_width = XSIZE;
int src_height = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
Thumbnail_ushort), dim3(gridBlock),dim3(threadBlock), 0, 0, ushort_tex,histogram,src_width,src_height);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
Thumbnail_ushort), dim3(gridBlock),dim3(threadBlock), 0, 0, ushort_tex,histogram,src_width,src_height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
Thumbnail_ushort), dim3(gridBlock),dim3(threadBlock), 0, 0, ushort_tex,histogram,src_width,src_height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
1e6a6737e048e36c0180cf2f91dad1e8f33cb9d5.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "Thumbnail_ushort.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
cudaTextureObject_t ushort_tex = 1;
int *histogram = NULL;
cudaMalloc(&histogram, XSIZE*YSIZE);
int src_width = XSIZE;
int src_height = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
Thumbnail_ushort<<<gridBlock,threadBlock>>>(ushort_tex,histogram,src_width,src_height);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
Thumbnail_ushort<<<gridBlock,threadBlock>>>(ushort_tex,histogram,src_width,src_height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
Thumbnail_ushort<<<gridBlock,threadBlock>>>(ushort_tex,histogram,src_width,src_height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
5da32fbb99152ad7532681df76876cab75edc28d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <unistd.h>
#include <sys/wait.h>
#include <sys/time.h>
#define VSIZE 1024*50000
#define TSIZE 1024
#define BSIZE VSIZE/TSIZE
#define ITE 10
__global__ void add(float* a,float* b){
int idx = blockDim.x * blockIdx.x + threadIdx.x;
b[idx] += 1;
a[idx] += b[idx];
}
int main(){
float *ha,*hb;
float *da,*db;
ha = (float*)malloc(sizeof(float)*VSIZE);
hb = (float*)malloc(sizeof(float)*VSIZE);
hipMalloc((void**)&da,sizeof(float)*VSIZE);
hipMalloc((void**)&db,sizeof(float)*VSIZE);
for(int i = 0 ; i < VSIZE ; i ++){
ha[i] = 0.0f;
hb[i] = 0.0f;
}
hipMemcpy(da,ha,sizeof(float)*VSIZE,hipMemcpyHostToDevice);
hipMemcpy(db,hb,sizeof(float)*VSIZE,hipMemcpyHostToDevice);
dim3 threads(TSIZE,1,1);
dim3 blocks (BSIZE,1,1);
printf("threads : %d\n",threads.x);
printf("blocks : %d\n",blocks.x);
for(int i = 0 ; i < ITE ; i ++){
hipLaunchKernelGGL(( add), dim3(blocks),dim3(threads), 0, 0, da,db);
hipDeviceSynchronize();
}
hipMemcpy(ha,da,sizeof(float)*VSIZE,hipMemcpyDeviceToHost);
for(int i = 0 ; i < VSIZE ; i ++){
if(ha[i] != ((ITE+1)*ITE)/2 ){
printf("ha[%d]\t%f\n",i,ha[i]);
printf("Result TEST : FAILED\n");
exit(-1);
}
}
printf("Result TEST : PASS\n");
free(ha);
free(hb);
hipFree(da);
hipFree(db);
return 0;
}
|
5da32fbb99152ad7532681df76876cab75edc28d.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <unistd.h>
#include <sys/wait.h>
#include <sys/time.h>
#define VSIZE 1024*50000
#define TSIZE 1024
#define BSIZE VSIZE/TSIZE
#define ITE 10
__global__ void add(float* a,float* b){
int idx = blockDim.x * blockIdx.x + threadIdx.x;
b[idx] += 1;
a[idx] += b[idx];
}
int main(){
float *ha,*hb;
float *da,*db;
ha = (float*)malloc(sizeof(float)*VSIZE);
hb = (float*)malloc(sizeof(float)*VSIZE);
cudaMalloc((void**)&da,sizeof(float)*VSIZE);
cudaMalloc((void**)&db,sizeof(float)*VSIZE);
for(int i = 0 ; i < VSIZE ; i ++){
ha[i] = 0.0f;
hb[i] = 0.0f;
}
cudaMemcpy(da,ha,sizeof(float)*VSIZE,cudaMemcpyHostToDevice);
cudaMemcpy(db,hb,sizeof(float)*VSIZE,cudaMemcpyHostToDevice);
dim3 threads(TSIZE,1,1);
dim3 blocks (BSIZE,1,1);
printf("threads : %d\n",threads.x);
printf("blocks : %d\n",blocks.x);
for(int i = 0 ; i < ITE ; i ++){
add<<<blocks,threads>>>(da,db);
cudaDeviceSynchronize();
}
cudaMemcpy(ha,da,sizeof(float)*VSIZE,cudaMemcpyDeviceToHost);
for(int i = 0 ; i < VSIZE ; i ++){
if(ha[i] != ((ITE+1)*ITE)/2 ){
printf("ha[%d]\t%f\n",i,ha[i]);
printf("Result TEST : FAILED\n");
exit(-1);
}
}
printf("Result TEST : PASS\n");
free(ha);
free(hb);
cudaFree(da);
cudaFree(db);
return 0;
}
|
d9d30b4cde0ee7874fad7b910e4c28c2b449b3f0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernels_hip.cuh"
#include <stdio.h>
#include <cmath>
#include <iostream>
#include <fstream>
#include "constants.h"
__device__
inline double getBilinearInterpolatedValue_cuda(const unsigned char *img, double pt[2]) {
const unsigned char* d = &img[(int)pt[1] * width + (int)pt[0]];
double xx = pt[0] - floor(pt[0]);
double yy = pt[1] - floor(pt[1]);
return ((1 - xx) * (1 - yy) * double(d[0]) +
xx * (1 - yy) * double(d[1]) +
(1 - xx) * yy * double(d[width]) +
xx * yy * double(d[width + 1])) / 255.0;
}
__device__
inline void pix2cam_cuda(const double in[2], double out[3]) {
out[0] = (in[0] - cx) / fx;
out[1] = (in[1] - cy) / fy;
out[2] = 1.0;
}
__device__
inline void cam2pix_cuda(const double in[3], double out[2]) {
out[0] = in[0] * fx / in[2] + cx;
out[1] = in[1] * fy / in[2] + cy;
}
__device__
double norm3_cuda(const double in[3])
{
return sqrt(in[0]*in[0] + in[1]*in[1] + in[2]*in[2]);
}
__device__
double norm2_cuda(const double in[2])
{
return sqrt(in[0]*in[0] + in[1]*in[1]);
}
// inplace normalization vec 3
__device__
inline void normalize3_cuda(double in_out[3]) {
double d = sqrt(in_out[0]*in_out[0]
+ in_out[1]*in_out[1]
+ in_out[2]*in_out[2]);
in_out[0] /= d;
in_out[1] /= d;
in_out[2] /= d;
}
// inplace normalization vec 2
__device__
inline void normalize2_cuda(double in_out[2]) {
double d = sqrt(in_out[0]*in_out[0] + in_out[1]*in_out[1]);
in_out[0] /= d;
in_out[1] /= d;
}
__device__
void transform_cuda(double x[3], const double T[12], double out[3])
{
for (int i = 0; i < 3; ++i)
{
out[i] = x[0] * T[i*4] + x[1] * T[i*4+1] + x[2] * T[i*4+2] + T[i*4+3];
}
}
__device__
double ZNCC_cuda(const unsigned char *im1, const double pt1[2], const unsigned char *im2, const double pt2[2])
{
// no need to consider block partly outside because of boarder
double v1[ncc_area], v2[ncc_area];
double s1 = 0.0, s2 = 0.0;
int idx = 0;
for (int i = -ncc_window_size; i <= ncc_window_size; ++i)
{
for (int j = -ncc_window_size; j <= ncc_window_size; ++j)
{
double val_1 = ((double) im1[((int)pt1[1] + i) * width + (int)pt1[0] + j]) / 255;
double temp_p2[2] = {pt2[0] + j, pt2[1] + i};
double val_2 = getBilinearInterpolatedValue_cuda(im2, temp_p2);
s1 += val_1;
s2 += val_2;
v1[idx] = val_1;
v2[idx] = val_2;
++idx;
}
}
double mean_1 = s1 / ncc_area;
double mean_2 = s2 / ncc_area;
double numerator = 0.0;
double den1 = 0.0, den2 = 0.0;
for (int i = 0; i < ncc_area; ++i)
{
double zv1 = v1[i] - mean_1;
double zv2 = v2[i] - mean_2;
numerator += zv1*zv2;
den1 += zv1 * zv1;
den2 += zv2 * zv2;
}
auto zncc = numerator / (sqrt(den1 * den2 + epsilon));
// std::cout << "zncc = " << zncc << "\n";
return zncc;
}
__device__
bool epipolar_search_cuda(const unsigned char* ref, const unsigned char* cur,
const double Tcr[12], const double pt[2],
double depth_mu, double depth_sigma2,
double best_pc[2], double epipolar_dir[2])
{
double depth_sigma = sqrt(depth_sigma2);
double dmax = depth_mu + 3 * depth_sigma;
double dmin = depth_mu - 3 * depth_sigma;
dmin = max(0.1, dmin);
double pn[3];
pix2cam_cuda(pt, pn);
normalize3_cuda(pn);
double P_max[3] = {pn[0] * dmax, pn[1] * dmax, pn[2] * dmax};
double P_min[3] = {pn[0] * dmin, pn[1] * dmin, pn[2] * dmin};
double P_mu[3] = {pn[0] * depth_mu, pn[1] * depth_mu, pn[2] * depth_mu};
double P_max_cur[3], P_min_cur[3], P_mu_cur[3];
transform_cuda(P_max, Tcr, P_max_cur);
transform_cuda(P_min, Tcr, P_min_cur);
transform_cuda(P_mu, Tcr, P_mu_cur);
double pc_max[2], pc_min[2], pc_mu[2];
cam2pix_cuda(P_max_cur, pc_max);
cam2pix_cuda(P_min_cur, pc_min);
cam2pix_cuda(P_mu_cur, pc_mu);
double epipolar_line[2] = {pc_max[0] - pc_min[0], pc_max[1] - pc_min[1]};
epipolar_dir[0] = epipolar_line[0];
epipolar_dir[1] = epipolar_line[1];
normalize2_cuda(epipolar_dir);
double epipolar_line_norm = norm2_cuda(epipolar_line);
// double step = 0.7;
// int nb_samples = ::ceil(epipolar_line.norm() / step);
double half_range = 0.5 * epipolar_line_norm;
if (half_range > 100) half_range = 100;
double best_zncc = -1.0;
for (double l = -half_range; l<= half_range; l+= 0.7)
{
double p[2] = {pc_mu[0] + l * epipolar_dir[0], pc_mu[1] + l * epipolar_dir[1]};
if (p[0] < boarder || p[0] >= width-boarder || p[1] < boarder || p[1] >= height-boarder)
continue; // p is outside the cur image
double zncc = ZNCC_cuda(ref, pt, cur, p);
if (zncc > best_zncc)
{
best_zncc = zncc;
best_pc[0] = p[0];
best_pc[1] = p[1];
}
}
if (best_zncc < 0.85)
return false;
else
return true;
}
__device__
double dot3_cuda(const double a[3], const double b[3])
{
return a[0]*b[0] + a[1]*b[1] + a[2]*b[2];
}
__device__
double det2_cuda(const double A[2][2])
{
return A[0][0] * A[1][1] - A[1][0] * A[0][1];
}
__device__
void solve_Axb2_cuda(const double A[2][2], const double b[2], double res[2])
{
double det_inv = 1.0 / det2_cuda(A);
double A_inv[2][2];
A_inv[0][0] = det_inv * A[1][1];
A_inv[0][1] = -det_inv * A[0][1];
A_inv[1][0] = -det_inv * A[1][0];
A_inv[1][1] = det_inv * A[0][0];
res[0] = A_inv[0][0] * b[0] + A_inv[0][1] * b[1];
res[1] = A_inv[1][0] * b[0] + A_inv[1][1] * b[1];
}
__device__
void update_depth_filter_cuda(const double pr[2], const double pc[2], const double Trc[12], const double epipolar_dir[2], double *depth, double *cov2)
{
double fr[3];
pix2cam_cuda(pr, fr);
normalize3_cuda(fr);
double fc[3];
pix2cam_cuda(pc, fc);
normalize3_cuda(fc);
double f2[3] = {dot3_cuda(Trc, fc),
dot3_cuda(Trc+4, fc),
dot3_cuda(Trc+8, fc)};
double trc[3] = {Trc[3], Trc[7], Trc[11]};
double A[2][2];
double b[2];
A[0][0] = dot3_cuda(fr, fr);
A[0][1] = dot3_cuda(fr, f2);
A[1][0] = dot3_cuda(f2, fr);
A[1][1] = dot3_cuda(f2, f2);
A[0][1] *= -1;
A[1][1] *= -1;
b[0] = dot3_cuda(fr, trc);
b[1] = dot3_cuda(f2, trc);
if (abs(det2_cuda(A)) < 1e-20) // not invertible
return;
double res[2];
solve_Axb2_cuda(A, b, res);
double P1[3] = {fr[0] * res[0], fr[1] * res[0], fr[2] * res[0]};
double P2[3] = {trc[0] + fc[0] * res[1], trc[1] + fc[1] * res[1], trc[2] + fc[2] * res[1]};
double P_est[3] = {(P1[0] + P2[0]) * 0.5,
(P1[1] + P2[1]) * 0.5,
(P1[2] + P2[2]) * 0.5};
double depth_obs = norm3_cuda(P_est);
double P[3] = {fr[0] * depth_obs, fr[1] * depth_obs, fr[2] * depth_obs};
double a[3] = {P[0] - trc[0], P[1] - trc[1], P[2] - trc[2]};
double t[3] = {trc[0], trc[1], trc[2]};
normalize3_cuda(t);
double alpha = acos(dot3_cuda(fr, t));
double beta = acos(-dot3_cuda(a, t) / norm3_cuda(a));
double pc2[2] = {pc[0] + epipolar_dir[0], pc[1] + epipolar_dir[1]};
double fc2[3];
pix2cam_cuda(pc2, fc2);
normalize3_cuda(fc2);
double beta_2 = acos(-dot3_cuda(fc2, t));
double gamma = M_PI - alpha - beta_2;
double d_noise = norm3_cuda(trc) * sin(beta_2) / sin(gamma); // sinus law
double sigma_obs = depth_obs - d_noise;
double sigma2_obs = sigma_obs * sigma_obs;
// Depth fusion
double d = depth[(int)pr[1] * width + (int)pr[0]];
double sigma2 = cov2[(int)pr[1] * width + (int)pr[0]];
double d_fused = (sigma2_obs * d + sigma2 * depth_obs) / (sigma2 + sigma2_obs);
double sigma2_fused = (sigma2 * sigma2_obs) / (sigma2 + sigma2_obs);
depth[(int)pr[1] * width + (int)pr[0]] = d_fused;
cov2[(int)pr[1] * width + (int)pr[0]] = sigma2_fused;
}
__device__ double Tcr_global[12];
__device__ double Trc_global[12];
__global__
void process_pixel_cuda(const unsigned char* ref, const unsigned char* cur, double *depth, double *cov2)
{
int j = boarder + (blockIdx.x * blockDim.x) + threadIdx.x;
int i = boarder + (blockIdx.y * blockDim.y) + threadIdx.y;
double depth_mu = depth[i*width+j];
double depth_sigma2 = cov2[i*width+j];
if (depth_sigma2 < min_cov || depth_sigma2 > max_cov)
return;
double pr[2] = {(double)j, (double)i};
double pc[2];
double epipolar_dir[2];
bool found = epipolar_search_cuda(ref, cur, Tcr_global, pr, depth_mu, depth_sigma2, pc, epipolar_dir);
if (!found)
return;
update_depth_filter_cuda(pr, pc, Trc_global, epipolar_dir, depth, cov2);
}
void wrapper_update_cuda(const unsigned char* ref, const unsigned char* cur, double Tcr[3][4], double Trc[3][4], double *depth, double *cov2)
{
size_t size_uchar = sizeof(unsigned char) * width * height;
size_t size_double = sizeof(double) * width * height;
unsigned char *ref_cuda, *cur_cuda;
hipMalloc(&ref_cuda, size_uchar);
hipMalloc(&cur_cuda, size_uchar);
double *depth_cuda, *cov2_cuda;
hipMalloc(&depth_cuda, size_double);
hipMalloc(&cov2_cuda, size_double);
hipMemcpy(ref_cuda, ref, size_uchar, hipMemcpyHostToDevice);
hipMemcpy(cur_cuda, cur, size_uchar, hipMemcpyHostToDevice);
hipMemcpy(depth_cuda, depth, size_double, hipMemcpyHostToDevice);
hipMemcpy(cov2_cuda, cov2, size_double, hipMemcpyHostToDevice);
int A = 480 - 2 * boarder; // height
int B = 640 - 2 * boarder; // width
dim3 block_dim(16, 16);
dim3 grid_dim(B / 16 + 1, A / 16 + 1);
// std::cout << "grid_dim " << grid_dim.x << " " << grid_dim.y << "\n";
// std::cout << "block_dim " << block_dim.x << " " << block_dim.y << "\n";
hipMemcpyToSymbol(Tcr_global, &Tcr[0][0], 12 * sizeof(double), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(Trc_global, &Trc[0][0], 12 * sizeof(double), 0, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( process_pixel_cuda), dim3(grid_dim), dim3(block_dim), 0, 0, ref_cuda, cur_cuda, depth_cuda, cov2_cuda);
hipDeviceSynchronize();
hipMemcpy(depth, depth_cuda, size_double, hipMemcpyDeviceToHost);
hipMemcpy(cov2, cov2_cuda, size_double, hipMemcpyDeviceToHost);
hipFree(ref_cuda);
hipFree(cur_cuda);
hipFree(depth_cuda);
hipFree(cov2_cuda);
}
|
d9d30b4cde0ee7874fad7b910e4c28c2b449b3f0.cu
|
#include "kernels.cuh"
#include <stdio.h>
#include <cmath>
#include <iostream>
#include <fstream>
#include "constants.h"
__device__
inline double getBilinearInterpolatedValue_cuda(const unsigned char *img, double pt[2]) {
const unsigned char* d = &img[(int)pt[1] * width + (int)pt[0]];
double xx = pt[0] - floor(pt[0]);
double yy = pt[1] - floor(pt[1]);
return ((1 - xx) * (1 - yy) * double(d[0]) +
xx * (1 - yy) * double(d[1]) +
(1 - xx) * yy * double(d[width]) +
xx * yy * double(d[width + 1])) / 255.0;
}
__device__
inline void pix2cam_cuda(const double in[2], double out[3]) {
out[0] = (in[0] - cx) / fx;
out[1] = (in[1] - cy) / fy;
out[2] = 1.0;
}
__device__
inline void cam2pix_cuda(const double in[3], double out[2]) {
out[0] = in[0] * fx / in[2] + cx;
out[1] = in[1] * fy / in[2] + cy;
}
__device__
double norm3_cuda(const double in[3])
{
return sqrt(in[0]*in[0] + in[1]*in[1] + in[2]*in[2]);
}
__device__
double norm2_cuda(const double in[2])
{
return sqrt(in[0]*in[0] + in[1]*in[1]);
}
// inplace normalization vec 3
__device__
inline void normalize3_cuda(double in_out[3]) {
double d = sqrt(in_out[0]*in_out[0]
+ in_out[1]*in_out[1]
+ in_out[2]*in_out[2]);
in_out[0] /= d;
in_out[1] /= d;
in_out[2] /= d;
}
// inplace normalization vec 2
__device__
inline void normalize2_cuda(double in_out[2]) {
double d = sqrt(in_out[0]*in_out[0] + in_out[1]*in_out[1]);
in_out[0] /= d;
in_out[1] /= d;
}
__device__
void transform_cuda(double x[3], const double T[12], double out[3])
{
for (int i = 0; i < 3; ++i)
{
out[i] = x[0] * T[i*4] + x[1] * T[i*4+1] + x[2] * T[i*4+2] + T[i*4+3];
}
}
__device__
double ZNCC_cuda(const unsigned char *im1, const double pt1[2], const unsigned char *im2, const double pt2[2])
{
// no need to consider block partly outside because of boarder
double v1[ncc_area], v2[ncc_area];
double s1 = 0.0, s2 = 0.0;
int idx = 0;
for (int i = -ncc_window_size; i <= ncc_window_size; ++i)
{
for (int j = -ncc_window_size; j <= ncc_window_size; ++j)
{
double val_1 = ((double) im1[((int)pt1[1] + i) * width + (int)pt1[0] + j]) / 255;
double temp_p2[2] = {pt2[0] + j, pt2[1] + i};
double val_2 = getBilinearInterpolatedValue_cuda(im2, temp_p2);
s1 += val_1;
s2 += val_2;
v1[idx] = val_1;
v2[idx] = val_2;
++idx;
}
}
double mean_1 = s1 / ncc_area;
double mean_2 = s2 / ncc_area;
double numerator = 0.0;
double den1 = 0.0, den2 = 0.0;
for (int i = 0; i < ncc_area; ++i)
{
double zv1 = v1[i] - mean_1;
double zv2 = v2[i] - mean_2;
numerator += zv1*zv2;
den1 += zv1 * zv1;
den2 += zv2 * zv2;
}
auto zncc = numerator / (sqrt(den1 * den2 + epsilon));
// std::cout << "zncc = " << zncc << "\n";
return zncc;
}
__device__
bool epipolar_search_cuda(const unsigned char* ref, const unsigned char* cur,
const double Tcr[12], const double pt[2],
double depth_mu, double depth_sigma2,
double best_pc[2], double epipolar_dir[2])
{
double depth_sigma = sqrt(depth_sigma2);
double dmax = depth_mu + 3 * depth_sigma;
double dmin = depth_mu - 3 * depth_sigma;
dmin = max(0.1, dmin);
double pn[3];
pix2cam_cuda(pt, pn);
normalize3_cuda(pn);
double P_max[3] = {pn[0] * dmax, pn[1] * dmax, pn[2] * dmax};
double P_min[3] = {pn[0] * dmin, pn[1] * dmin, pn[2] * dmin};
double P_mu[3] = {pn[0] * depth_mu, pn[1] * depth_mu, pn[2] * depth_mu};
double P_max_cur[3], P_min_cur[3], P_mu_cur[3];
transform_cuda(P_max, Tcr, P_max_cur);
transform_cuda(P_min, Tcr, P_min_cur);
transform_cuda(P_mu, Tcr, P_mu_cur);
double pc_max[2], pc_min[2], pc_mu[2];
cam2pix_cuda(P_max_cur, pc_max);
cam2pix_cuda(P_min_cur, pc_min);
cam2pix_cuda(P_mu_cur, pc_mu);
double epipolar_line[2] = {pc_max[0] - pc_min[0], pc_max[1] - pc_min[1]};
epipolar_dir[0] = epipolar_line[0];
epipolar_dir[1] = epipolar_line[1];
normalize2_cuda(epipolar_dir);
double epipolar_line_norm = norm2_cuda(epipolar_line);
// double step = 0.7;
// int nb_samples = std::ceil(epipolar_line.norm() / step);
double half_range = 0.5 * epipolar_line_norm;
if (half_range > 100) half_range = 100;
double best_zncc = -1.0;
for (double l = -half_range; l<= half_range; l+= 0.7)
{
double p[2] = {pc_mu[0] + l * epipolar_dir[0], pc_mu[1] + l * epipolar_dir[1]};
if (p[0] < boarder || p[0] >= width-boarder || p[1] < boarder || p[1] >= height-boarder)
continue; // p is outside the cur image
double zncc = ZNCC_cuda(ref, pt, cur, p);
if (zncc > best_zncc)
{
best_zncc = zncc;
best_pc[0] = p[0];
best_pc[1] = p[1];
}
}
if (best_zncc < 0.85)
return false;
else
return true;
}
__device__
double dot3_cuda(const double a[3], const double b[3])
{
return a[0]*b[0] + a[1]*b[1] + a[2]*b[2];
}
__device__
double det2_cuda(const double A[2][2])
{
return A[0][0] * A[1][1] - A[1][0] * A[0][1];
}
__device__
void solve_Axb2_cuda(const double A[2][2], const double b[2], double res[2])
{
double det_inv = 1.0 / det2_cuda(A);
double A_inv[2][2];
A_inv[0][0] = det_inv * A[1][1];
A_inv[0][1] = -det_inv * A[0][1];
A_inv[1][0] = -det_inv * A[1][0];
A_inv[1][1] = det_inv * A[0][0];
res[0] = A_inv[0][0] * b[0] + A_inv[0][1] * b[1];
res[1] = A_inv[1][0] * b[0] + A_inv[1][1] * b[1];
}
__device__
void update_depth_filter_cuda(const double pr[2], const double pc[2], const double Trc[12], const double epipolar_dir[2], double *depth, double *cov2)
{
double fr[3];
pix2cam_cuda(pr, fr);
normalize3_cuda(fr);
double fc[3];
pix2cam_cuda(pc, fc);
normalize3_cuda(fc);
double f2[3] = {dot3_cuda(Trc, fc),
dot3_cuda(Trc+4, fc),
dot3_cuda(Trc+8, fc)};
double trc[3] = {Trc[3], Trc[7], Trc[11]};
double A[2][2];
double b[2];
A[0][0] = dot3_cuda(fr, fr);
A[0][1] = dot3_cuda(fr, f2);
A[1][0] = dot3_cuda(f2, fr);
A[1][1] = dot3_cuda(f2, f2);
A[0][1] *= -1;
A[1][1] *= -1;
b[0] = dot3_cuda(fr, trc);
b[1] = dot3_cuda(f2, trc);
if (abs(det2_cuda(A)) < 1e-20) // not invertible
return;
double res[2];
solve_Axb2_cuda(A, b, res);
double P1[3] = {fr[0] * res[0], fr[1] * res[0], fr[2] * res[0]};
double P2[3] = {trc[0] + fc[0] * res[1], trc[1] + fc[1] * res[1], trc[2] + fc[2] * res[1]};
double P_est[3] = {(P1[0] + P2[0]) * 0.5,
(P1[1] + P2[1]) * 0.5,
(P1[2] + P2[2]) * 0.5};
double depth_obs = norm3_cuda(P_est);
double P[3] = {fr[0] * depth_obs, fr[1] * depth_obs, fr[2] * depth_obs};
double a[3] = {P[0] - trc[0], P[1] - trc[1], P[2] - trc[2]};
double t[3] = {trc[0], trc[1], trc[2]};
normalize3_cuda(t);
double alpha = acos(dot3_cuda(fr, t));
double beta = acos(-dot3_cuda(a, t) / norm3_cuda(a));
double pc2[2] = {pc[0] + epipolar_dir[0], pc[1] + epipolar_dir[1]};
double fc2[3];
pix2cam_cuda(pc2, fc2);
normalize3_cuda(fc2);
double beta_2 = acos(-dot3_cuda(fc2, t));
double gamma = M_PI - alpha - beta_2;
double d_noise = norm3_cuda(trc) * sin(beta_2) / sin(gamma); // sinus law
double sigma_obs = depth_obs - d_noise;
double sigma2_obs = sigma_obs * sigma_obs;
// Depth fusion
double d = depth[(int)pr[1] * width + (int)pr[0]];
double sigma2 = cov2[(int)pr[1] * width + (int)pr[0]];
double d_fused = (sigma2_obs * d + sigma2 * depth_obs) / (sigma2 + sigma2_obs);
double sigma2_fused = (sigma2 * sigma2_obs) / (sigma2 + sigma2_obs);
depth[(int)pr[1] * width + (int)pr[0]] = d_fused;
cov2[(int)pr[1] * width + (int)pr[0]] = sigma2_fused;
}
__device__ double Tcr_global[12];
__device__ double Trc_global[12];
__global__
void process_pixel_cuda(const unsigned char* ref, const unsigned char* cur, double *depth, double *cov2)
{
int j = boarder + (blockIdx.x * blockDim.x) + threadIdx.x;
int i = boarder + (blockIdx.y * blockDim.y) + threadIdx.y;
double depth_mu = depth[i*width+j];
double depth_sigma2 = cov2[i*width+j];
if (depth_sigma2 < min_cov || depth_sigma2 > max_cov)
return;
double pr[2] = {(double)j, (double)i};
double pc[2];
double epipolar_dir[2];
bool found = epipolar_search_cuda(ref, cur, Tcr_global, pr, depth_mu, depth_sigma2, pc, epipolar_dir);
if (!found)
return;
update_depth_filter_cuda(pr, pc, Trc_global, epipolar_dir, depth, cov2);
}
void wrapper_update_cuda(const unsigned char* ref, const unsigned char* cur, double Tcr[3][4], double Trc[3][4], double *depth, double *cov2)
{
size_t size_uchar = sizeof(unsigned char) * width * height;
size_t size_double = sizeof(double) * width * height;
unsigned char *ref_cuda, *cur_cuda;
cudaMalloc(&ref_cuda, size_uchar);
cudaMalloc(&cur_cuda, size_uchar);
double *depth_cuda, *cov2_cuda;
cudaMalloc(&depth_cuda, size_double);
cudaMalloc(&cov2_cuda, size_double);
cudaMemcpy(ref_cuda, ref, size_uchar, cudaMemcpyHostToDevice);
cudaMemcpy(cur_cuda, cur, size_uchar, cudaMemcpyHostToDevice);
cudaMemcpy(depth_cuda, depth, size_double, cudaMemcpyHostToDevice);
cudaMemcpy(cov2_cuda, cov2, size_double, cudaMemcpyHostToDevice);
int A = 480 - 2 * boarder; // height
int B = 640 - 2 * boarder; // width
dim3 block_dim(16, 16);
dim3 grid_dim(B / 16 + 1, A / 16 + 1);
// std::cout << "grid_dim " << grid_dim.x << " " << grid_dim.y << "\n";
// std::cout << "block_dim " << block_dim.x << " " << block_dim.y << "\n";
cudaMemcpyToSymbol(Tcr_global, &Tcr[0][0], 12 * sizeof(double), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(Trc_global, &Trc[0][0], 12 * sizeof(double), 0, cudaMemcpyHostToDevice);
process_pixel_cuda<<<grid_dim, block_dim>>>(ref_cuda, cur_cuda, depth_cuda, cov2_cuda);
cudaDeviceSynchronize();
cudaMemcpy(depth, depth_cuda, size_double, cudaMemcpyDeviceToHost);
cudaMemcpy(cov2, cov2_cuda, size_double, cudaMemcpyDeviceToHost);
cudaFree(ref_cuda);
cudaFree(cur_cuda);
cudaFree(depth_cuda);
cudaFree(cov2_cuda);
}
|
4a2ddddef100a5aab3c033a28a9e4fdc8198fb57.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "file_system.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
__device__ __managed__ u32 gtime = 0;
__device__ void fs_init(FileSystem *fs, uchar *volume, int SUPERBLOCK_SIZE,
int FCB_SIZE, int FCB_ENTRIES, int VOLUME_SIZE,
int STORAGE_BLOCK_SIZE, int MAX_FILENAME_SIZE,
int MAX_FILE_NUM, int MAX_FILE_SIZE, int FILE_BASE_ADDRESS)
{
// init variables
fs->volume = volume;
// init constants
fs->SUPERBLOCK_SIZE = SUPERBLOCK_SIZE; // 4096 volume[0-4095]
fs->FCB_SIZE = FCB_SIZE; // 32, name-20, created-4, modified-4, start-2, size-2
fs->FCB_ENTRIES = FCB_ENTRIES; // 1024, total size 32768, volume[4096-36863]
fs->STORAGE_SIZE = VOLUME_SIZE; // 1085440
fs->STORAGE_BLOCK_SIZE = STORAGE_BLOCK_SIZE; // 32, total 2^15 blocks, volume[36864-1085440]
fs->MAX_FILENAME_SIZE = MAX_FILENAME_SIZE; // 20
fs->MAX_FILE_NUM = MAX_FILE_NUM; // 1024
fs->MAX_FILE_SIZE = MAX_FILE_SIZE; // 1048576
fs->FILE_BASE_ADDRESS = FILE_BASE_ADDRESS; // 36864
// initialize the Super Block
for (int i = 0; i < SUPERBLOCK_SIZE; ++ i){
volume[i] = 255; // 255 means 1111 1111, each uchar represents 8-blocks bit map
}
}
__device__ u32 fs_open(FileSystem *fs, char *s, int op)
{
//printf("fs_open was called\n");
gtime += 1; // increment the timeing
/* Implement open operation here */
int i, j;
int index = -1;
bool is_match = true;
u32 base;
// serch the file info in FCB
for (i = fs->SUPERBLOCK_SIZE; i < fs->FILE_BASE_ADDRESS; i += fs->FCB_SIZE){
is_match = true;
for (j = 0; j < fs->MAX_FILENAME_SIZE; ++j) {
if (s[j] != fs->volume[i + j]) {
is_match = false;
break;
}
}
if (is_match) {
index = i;
break;
}
}
if (index != -1) { // the file exists
// update the modified time
fs->volume[index + 31] = gtime % 256;
fs->volume[index + 30] = (gtime >> 8) % 256;
u32 address = fs->volume[index+23] + (fs->volume[index+22]<<8) + (fs->volume[index+21]<<16) + (fs->volume[index+20]<<24);
return address;
}else{ // the file does not exist
// create a new zero byte file
if (op == 1) {
// serch for free blocks in the Super Block
for (i = 0; i < fs->SUPERBLOCK_SIZE; i += 4){
if (fs->volume[i] == 255){ // every uchar represents 8 bits(block)
fs->volume[i] = 127;
index = i * 8; // #start block of the file
break;
}
} // end for
// add the info of new file into FCB
base; // start physical address of the target FCB
u32 created, modified, start, size; // the file infomation
base = fs->SUPERBLOCK_SIZE + index; // 4096 + (index / 32) * 32
start = fs->FILE_BASE_ADDRESS + index * (fs->STORAGE_BLOCK_SIZE); // the physical address of the file in byte
size = 0; // the size of the file in in byte
created = gtime; // get current time as created time
modified = gtime; // get current time as modified time
// store the file name
for (j = 0; j < fs->MAX_FILENAME_SIZE; ++j) {
fs->volume[base + j] = s[j];
if (s[j] == '\0')
break;
}
// store the physical start address of the file (4B)
fs->volume[base + 23] = start % 256;
fs->volume[base + 22] = (start >> 8) % 256;
fs->volume[base + 21] = (start >> 16) % 256;
fs->volume[base + 20] = (start >> 24) % 256;
// store the size of the file (4B)
fs->volume[base + 27] = size % 256;
fs->volume[base + 26] = (size >> 8) % 256;
fs->volume[base + 25] = (size >> 16) % 256;
fs->volume[base + 24] = (size >> 24) % 256;
// store the created time
fs->volume[base + 29] = created % 256;
fs->volume[base + 28] = (created >> 8) % 256;
// store the modified time
fs->volume[base + 31] = modified % 256;
fs->volume[base + 30] = (modified >> 8) % 256;
return start;
} // end if
} // end else
}
__device__ void fs_read(FileSystem *fs, uchar *output, u32 size, u32 fp)
{
/* Implement read operation here */
// gtime += 1;
//printf("fs_read was called\n");
for (int i = 0; i < size; ++ i) {
output[i] = (char)fs->volume[fp++];
}
}
__device__ u32 fs_write(FileSystem *fs, uchar* input, u32 size, u32 fp)
{
gtime++;
/* Implement write operation here */
u32 index, base, file_size;
index = (fp - fs->FILE_BASE_ADDRESS) >> 10; // the index of the entry of FCB
base = fs->SUPERBLOCK_SIZE + index * fs->FCB_SIZE;
// update the modified time
fs->volume[base+31] = gtime % 256;
fs->volume[base+30] = (gtime >> 8) % 256;
// get the size of older file
file_size = fs->volume[base+27] + (fs->volume[base+26]<<8) + (fs->volume[base+25]<<16) + (fs->volume[base+24]<<24);
// update the size of file
fs->volume[base + 27] = size % 256;
fs->volume[base + 26] = (size >> 8) % 256;
// write the physical storage
for (int i = 0; i < size; ++ i){
// update the super block everytime writing a new block
if (i % 32 == 0) {
int block_order = i / 32;
uchar target = fs->volume[index * 4 + block_order / 8];
if (target / (1 << (7 - (block_order % 8))) % 2 == 1)
target -= (1 << (7 - (block_order % 8))); // target block: 1 -> 0
fs->volume[index * 4 + block_order / 8] = target;
}
// write
fs->volume[fp++] = input[i];
}
// cleanup the older contents left
if (file_size > size) {
for (int i = fp; i < fp + file_size - size; ++i){
fs->volume[i] = '\0';
}
}
return fp;
}
__device__ void fs_gsys(FileSystem *fs, int op)
{
/* Implement LS_D and LS_S operation here */
// data structure to sort the modified time
// the element of sort_arr is 4 bytes
// the first two bytes represent the FCB entry
// the last two bytes represent the modified time
u32 sort_arr[1024];
int arr_size = 0; // track the size of array
u32 base, modified, size, entry;
// LS_D: file name, order by modified time
if (op == 0) {
for (int i = 0; i < fs->FCB_ENTRIES; ++ i){
base = fs->SUPERBLOCK_SIZE + i * fs->FCB_SIZE;
// FCB entry is not empty
if (fs->volume[base] != '\0'){
entry = i;
modified = fs->volume[base+31] + (fs->volume[base+30] << 8);
int ptr = arr_size - 1;
sort_arr[arr_size++] = (entry << 16) + modified; // add to the sort array
// insertion sort
while (ptr >= 0 && modified < sort_arr[ptr] % (1 << 16)) {
sort_arr[ptr + 1] = sort_arr[ptr];
sort_arr[ptr] = (entry << 16) + modified;
ptr--;
} // end while
} // end if
} // end for
// print information
printf("===sort by modified time===\n");
while (--arr_size >= 0) {
entry = sort_arr[arr_size] >> 16;
printf("%s\n", &fs->volume[fs->SUPERBLOCK_SIZE + entry * fs->FCB_SIZE]);
}
} // end if
// LS_S: file name and size, order by size
if (op == 1) {
for (int i = 0; i < fs->FCB_ENTRIES; ++i) {
base = fs->SUPERBLOCK_SIZE + i * fs->FCB_SIZE;
// FCB entry is not empty
if (fs->volume[base] != '\0') {
entry = i;
size = fs->volume[base + 27] + (fs->volume[base + 26] << 8);
int ptr = arr_size - 1;
sort_arr[arr_size++] = (entry << 16) + size; // add to the sort array
// insertion sort
while (ptr >= 0 && size <= sort_arr[ptr] % (1 << 16)) {
sort_arr[ptr + 1] = sort_arr[ptr];
sort_arr[ptr] = (entry << 16) + size;
ptr--;
} // end while
} // end if
} // end for
// print information
printf("===sort by file size===\n");
while (--arr_size >= 0) {
entry = sort_arr[arr_size] >> 16;
base = fs->SUPERBLOCK_SIZE + entry * fs->FCB_SIZE;
size = sort_arr[arr_size] % (1 << 16);
printf("%s %d\n", &fs->volume[base], size);
}
} // end if
}
__device__ void fs_gsys(FileSystem *fs, int op, char *s)
{
/* Implement rm operation here */
int i, j;
int index = -1;
bool is_match;
if (op == 2) {
for (i = fs->SUPERBLOCK_SIZE; i < fs->FILE_BASE_ADDRESS; i += fs->FCB_SIZE) {
is_match = true;
for (j = 0; j < fs->MAX_FILENAME_SIZE; ++j) {
if (s[j] != fs->volume[i + j]) {
is_match = false;
break;
}
}
if (is_match) {
index = i;
break;
}
} // end for
if (index == -1) {
printf("Cannot find file %s, please check!\n", s);
}
else {
u32 address = fs->volume[index + 23] + (fs->volume[index + 22] << 8) + (fs->volume[index + 21] << 16) + (fs->volume[index + 20] << 24);
u32 size = fs->volume[index + 27] + (fs->volume[index + 26] << 8);
// release the file space
for (i = 0; i < size; ++i) {
fs->volume[address + i] = '\0';
}
// release the corresponding FCB
for (i = 0; i < fs->FCB_SIZE; ++i) {
fs->volume[index + i] = '\0';
}
// update the super block
index = (index - fs->SUPERBLOCK_SIZE) / fs->FCB_SIZE;
for (i = 0; i < 4; ++i) {
fs->volume[index * 4 + i] = 255;
}
}
}
}
|
4a2ddddef100a5aab3c033a28a9e4fdc8198fb57.cu
|
#include "file_system.h"
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
__device__ __managed__ u32 gtime = 0;
__device__ void fs_init(FileSystem *fs, uchar *volume, int SUPERBLOCK_SIZE,
int FCB_SIZE, int FCB_ENTRIES, int VOLUME_SIZE,
int STORAGE_BLOCK_SIZE, int MAX_FILENAME_SIZE,
int MAX_FILE_NUM, int MAX_FILE_SIZE, int FILE_BASE_ADDRESS)
{
// init variables
fs->volume = volume;
// init constants
fs->SUPERBLOCK_SIZE = SUPERBLOCK_SIZE; // 4096 volume[0-4095]
fs->FCB_SIZE = FCB_SIZE; // 32, name-20, created-4, modified-4, start-2, size-2
fs->FCB_ENTRIES = FCB_ENTRIES; // 1024, total size 32768, volume[4096-36863]
fs->STORAGE_SIZE = VOLUME_SIZE; // 1085440
fs->STORAGE_BLOCK_SIZE = STORAGE_BLOCK_SIZE; // 32, total 2^15 blocks, volume[36864-1085440]
fs->MAX_FILENAME_SIZE = MAX_FILENAME_SIZE; // 20
fs->MAX_FILE_NUM = MAX_FILE_NUM; // 1024
fs->MAX_FILE_SIZE = MAX_FILE_SIZE; // 1048576
fs->FILE_BASE_ADDRESS = FILE_BASE_ADDRESS; // 36864
// initialize the Super Block
for (int i = 0; i < SUPERBLOCK_SIZE; ++ i){
volume[i] = 255; // 255 means 1111 1111, each uchar represents 8-blocks bit map
}
}
__device__ u32 fs_open(FileSystem *fs, char *s, int op)
{
//printf("fs_open was called\n");
gtime += 1; // increment the timeing
/* Implement open operation here */
int i, j;
int index = -1;
bool is_match = true;
u32 base;
// serch the file info in FCB
for (i = fs->SUPERBLOCK_SIZE; i < fs->FILE_BASE_ADDRESS; i += fs->FCB_SIZE){
is_match = true;
for (j = 0; j < fs->MAX_FILENAME_SIZE; ++j) {
if (s[j] != fs->volume[i + j]) {
is_match = false;
break;
}
}
if (is_match) {
index = i;
break;
}
}
if (index != -1) { // the file exists
// update the modified time
fs->volume[index + 31] = gtime % 256;
fs->volume[index + 30] = (gtime >> 8) % 256;
u32 address = fs->volume[index+23] + (fs->volume[index+22]<<8) + (fs->volume[index+21]<<16) + (fs->volume[index+20]<<24);
return address;
}else{ // the file does not exist
// create a new zero byte file
if (op == 1) {
// serch for free blocks in the Super Block
for (i = 0; i < fs->SUPERBLOCK_SIZE; i += 4){
if (fs->volume[i] == 255){ // every uchar represents 8 bits(block)
fs->volume[i] = 127;
index = i * 8; // #start block of the file
break;
}
} // end for
// add the info of new file into FCB
base; // start physical address of the target FCB
u32 created, modified, start, size; // the file infomation
base = fs->SUPERBLOCK_SIZE + index; // 4096 + (index / 32) * 32
start = fs->FILE_BASE_ADDRESS + index * (fs->STORAGE_BLOCK_SIZE); // the physical address of the file in byte
size = 0; // the size of the file in in byte
created = gtime; // get current time as created time
modified = gtime; // get current time as modified time
// store the file name
for (j = 0; j < fs->MAX_FILENAME_SIZE; ++j) {
fs->volume[base + j] = s[j];
if (s[j] == '\0')
break;
}
// store the physical start address of the file (4B)
fs->volume[base + 23] = start % 256;
fs->volume[base + 22] = (start >> 8) % 256;
fs->volume[base + 21] = (start >> 16) % 256;
fs->volume[base + 20] = (start >> 24) % 256;
// store the size of the file (4B)
fs->volume[base + 27] = size % 256;
fs->volume[base + 26] = (size >> 8) % 256;
fs->volume[base + 25] = (size >> 16) % 256;
fs->volume[base + 24] = (size >> 24) % 256;
// store the created time
fs->volume[base + 29] = created % 256;
fs->volume[base + 28] = (created >> 8) % 256;
// store the modified time
fs->volume[base + 31] = modified % 256;
fs->volume[base + 30] = (modified >> 8) % 256;
return start;
} // end if
} // end else
}
__device__ void fs_read(FileSystem *fs, uchar *output, u32 size, u32 fp)
{
/* Implement read operation here */
// gtime += 1;
//printf("fs_read was called\n");
for (int i = 0; i < size; ++ i) {
output[i] = (char)fs->volume[fp++];
}
}
__device__ u32 fs_write(FileSystem *fs, uchar* input, u32 size, u32 fp)
{
gtime++;
/* Implement write operation here */
u32 index, base, file_size;
index = (fp - fs->FILE_BASE_ADDRESS) >> 10; // the index of the entry of FCB
base = fs->SUPERBLOCK_SIZE + index * fs->FCB_SIZE;
// update the modified time
fs->volume[base+31] = gtime % 256;
fs->volume[base+30] = (gtime >> 8) % 256;
// get the size of older file
file_size = fs->volume[base+27] + (fs->volume[base+26]<<8) + (fs->volume[base+25]<<16) + (fs->volume[base+24]<<24);
// update the size of file
fs->volume[base + 27] = size % 256;
fs->volume[base + 26] = (size >> 8) % 256;
// write the physical storage
for (int i = 0; i < size; ++ i){
// update the super block everytime writing a new block
if (i % 32 == 0) {
int block_order = i / 32;
uchar target = fs->volume[index * 4 + block_order / 8];
if (target / (1 << (7 - (block_order % 8))) % 2 == 1)
target -= (1 << (7 - (block_order % 8))); // target block: 1 -> 0
fs->volume[index * 4 + block_order / 8] = target;
}
// write
fs->volume[fp++] = input[i];
}
// cleanup the older contents left
if (file_size > size) {
for (int i = fp; i < fp + file_size - size; ++i){
fs->volume[i] = '\0';
}
}
return fp;
}
__device__ void fs_gsys(FileSystem *fs, int op)
{
/* Implement LS_D and LS_S operation here */
// data structure to sort the modified time
// the element of sort_arr is 4 bytes
// the first two bytes represent the FCB entry
// the last two bytes represent the modified time
u32 sort_arr[1024];
int arr_size = 0; // track the size of array
u32 base, modified, size, entry;
// LS_D: file name, order by modified time
if (op == 0) {
for (int i = 0; i < fs->FCB_ENTRIES; ++ i){
base = fs->SUPERBLOCK_SIZE + i * fs->FCB_SIZE;
// FCB entry is not empty
if (fs->volume[base] != '\0'){
entry = i;
modified = fs->volume[base+31] + (fs->volume[base+30] << 8);
int ptr = arr_size - 1;
sort_arr[arr_size++] = (entry << 16) + modified; // add to the sort array
// insertion sort
while (ptr >= 0 && modified < sort_arr[ptr] % (1 << 16)) {
sort_arr[ptr + 1] = sort_arr[ptr];
sort_arr[ptr] = (entry << 16) + modified;
ptr--;
} // end while
} // end if
} // end for
// print information
printf("===sort by modified time===\n");
while (--arr_size >= 0) {
entry = sort_arr[arr_size] >> 16;
printf("%s\n", &fs->volume[fs->SUPERBLOCK_SIZE + entry * fs->FCB_SIZE]);
}
} // end if
// LS_S: file name and size, order by size
if (op == 1) {
for (int i = 0; i < fs->FCB_ENTRIES; ++i) {
base = fs->SUPERBLOCK_SIZE + i * fs->FCB_SIZE;
// FCB entry is not empty
if (fs->volume[base] != '\0') {
entry = i;
size = fs->volume[base + 27] + (fs->volume[base + 26] << 8);
int ptr = arr_size - 1;
sort_arr[arr_size++] = (entry << 16) + size; // add to the sort array
// insertion sort
while (ptr >= 0 && size <= sort_arr[ptr] % (1 << 16)) {
sort_arr[ptr + 1] = sort_arr[ptr];
sort_arr[ptr] = (entry << 16) + size;
ptr--;
} // end while
} // end if
} // end for
// print information
printf("===sort by file size===\n");
while (--arr_size >= 0) {
entry = sort_arr[arr_size] >> 16;
base = fs->SUPERBLOCK_SIZE + entry * fs->FCB_SIZE;
size = sort_arr[arr_size] % (1 << 16);
printf("%s %d\n", &fs->volume[base], size);
}
} // end if
}
__device__ void fs_gsys(FileSystem *fs, int op, char *s)
{
/* Implement rm operation here */
int i, j;
int index = -1;
bool is_match;
if (op == 2) {
for (i = fs->SUPERBLOCK_SIZE; i < fs->FILE_BASE_ADDRESS; i += fs->FCB_SIZE) {
is_match = true;
for (j = 0; j < fs->MAX_FILENAME_SIZE; ++j) {
if (s[j] != fs->volume[i + j]) {
is_match = false;
break;
}
}
if (is_match) {
index = i;
break;
}
} // end for
if (index == -1) {
printf("Cannot find file %s, please check!\n", s);
}
else {
u32 address = fs->volume[index + 23] + (fs->volume[index + 22] << 8) + (fs->volume[index + 21] << 16) + (fs->volume[index + 20] << 24);
u32 size = fs->volume[index + 27] + (fs->volume[index + 26] << 8);
// release the file space
for (i = 0; i < size; ++i) {
fs->volume[address + i] = '\0';
}
// release the corresponding FCB
for (i = 0; i < fs->FCB_SIZE; ++i) {
fs->volume[index + i] = '\0';
}
// update the super block
index = (index - fs->SUPERBLOCK_SIZE) / fs->FCB_SIZE;
for (i = 0; i < 4; ++i) {
fs->volume[index * 4 + i] = 255;
}
}
}
}
|
ac6f95ae61297498d42fd464513b8495ea542b0b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <arbor/gpu/gpu_common.hpp>
#include <arbor/gpu/math_cu.hpp>
#include <arbor/gpu/reduce_by_key.hpp>
#include <arbor/mechanism_abi.h>
namespace arb {
namespace allen_catalogue {
#define PPACK_IFACE_BLOCK \
auto _pp_var_width __attribute__((unused)) = params_.width;\
auto _pp_var_n_detectors __attribute__((unused)) = params_.n_detectors;\
auto* _pp_var_vec_ci __attribute__((unused)) = params_.vec_ci;\
auto* _pp_var_vec_di __attribute__((unused)) = params_.vec_di;\
auto* _pp_var_vec_t __attribute__((unused)) = params_.vec_t;\
auto* _pp_var_vec_dt __attribute__((unused)) = params_.vec_dt;\
auto* _pp_var_vec_v __attribute__((unused)) = params_.vec_v;\
auto* _pp_var_vec_i __attribute__((unused)) = params_.vec_i;\
auto* _pp_var_vec_g __attribute__((unused)) = params_.vec_g;\
auto* _pp_var_temperature_degC __attribute__((unused)) = params_.temperature_degC;\
auto* _pp_var_diam_um __attribute__((unused)) = params_.diam_um;\
auto* _pp_var_time_since_spike __attribute__((unused)) = params_.time_since_spike;\
auto* _pp_var_node_index __attribute__((unused)) = params_.node_index;\
auto* _pp_var_peer_index __attribute__((unused)) = params_.peer_index;\
auto* _pp_var_multiplicity __attribute__((unused)) = params_.multiplicity;\
auto* _pp_var_state_vars __attribute__((unused)) = params_.state_vars;\
auto* _pp_var_weight __attribute__((unused)) = params_.weight;\
auto& _pp_var_events __attribute__((unused)) = params_.events;\
auto& _pp_var_mechanism_id __attribute__((unused)) = params_.mechanism_id;\
auto& _pp_var_index_constraints __attribute__((unused)) = params_.index_constraints;\
auto _pp_var_vshift __attribute__((unused)) = params_.globals[0];\
auto _pp_var_tauF __attribute__((unused)) = params_.globals[1];\
auto* _pp_var_m __attribute__((unused)) = params_.state_vars[0];\
auto* _pp_var_h __attribute__((unused)) = params_.state_vars[1];\
auto* _pp_var_v __attribute__((unused)) = params_.state_vars[2];\
auto* _pp_var_g __attribute__((unused)) = params_.state_vars[3];\
auto* _pp_var_celsius __attribute__((unused)) = params_.state_vars[4];\
auto* _pp_var_mInf __attribute__((unused)) = params_.state_vars[5];\
auto* _pp_var_mTau __attribute__((unused)) = params_.state_vars[6];\
auto* _pp_var_hInf __attribute__((unused)) = params_.state_vars[7];\
auto* _pp_var_hTau __attribute__((unused)) = params_.state_vars[8];\
auto* _pp_var_gbar __attribute__((unused)) = params_.parameters[0];\
auto& _pp_var_ion_k __attribute__((unused)) = params_.ion_states[0];\
auto* _pp_var_ion_k_index __attribute__((unused)) = params_.ion_states[0].index;\
//End of IFACEBLOCK
namespace {
using ::arb::gpu::exprelr;
using ::arb::gpu::safeinv;
using ::arb::gpu::min;
using ::arb::gpu::max;
__device__
void rates(arb_mechanism_ppack params_, int tid_, arb_value_type v) {
PPACK_IFACE_BLOCK;
arb_value_type qt;
qt = pow( 2.2999999999999998, (_pp_var_celsius[tid_]- 21.0)* 0.10000000000000001);
_pp_var_mInf[tid_] = 1.0/( 1.0+exp( -(v-( -14.300000000000001+_pp_var_vshift))* 0.068493150684931503));
if (v< -50.0+_pp_var_vshift) {
_pp_var_mTau[tid_] = _pp_var_tauF*( 1.25+ 175.03*exp( -(v-_pp_var_vshift)* -0.025999999999999999))/qt;
}
else {
_pp_var_mTau[tid_] = _pp_var_tauF*( 1.25+ 13.0*exp( -(v-_pp_var_vshift)* 0.025999999999999999))/qt;
}
_pp_var_hInf[tid_] = 1.0/( 1.0+exp( -(v-( -54.0+_pp_var_vshift))* -0.090909090909090912));
_pp_var_hTau[tid_] = ( 360.0+( 1010.0+ 24.0*(v-( -55.0+_pp_var_vshift)))*exp(pow( -((v-( -75.0+_pp_var_vshift))* 0.020833333333333332), 2.0)))/qt;
}
__global__
void init(arb_mechanism_ppack params_) {
int n_ = params_.width;
int tid_ = threadIdx.x + blockDim.x*blockIdx.x;
PPACK_IFACE_BLOCK;
if (tid_<n_) {
auto node_indexi_ = _pp_var_node_index[tid_];
arb_value_type v = _pp_var_vec_v[node_indexi_];
rates(params_, tid_, v);
_pp_var_m[tid_] = _pp_var_mInf[tid_];
_pp_var_h[tid_] = _pp_var_hInf[tid_];
}
}
__global__
void multiply(arb_mechanism_ppack params_) {
PPACK_IFACE_BLOCK;
auto tid_ = threadIdx.x + blockDim.x*blockIdx.x;
auto idx_ = blockIdx.y; if(tid_<_pp_var_width) {
_pp_var_state_vars[idx_][tid_] *= _pp_var_multiplicity[tid_];
}
}
__global__
void advance_state(arb_mechanism_ppack params_) {
int n_ = params_.width;
int tid_ = threadIdx.x + blockDim.x*blockIdx.x;
PPACK_IFACE_BLOCK;
if (tid_<n_) {
auto node_indexi_ = _pp_var_node_index[tid_];
arb_value_type dt = _pp_var_vec_dt[node_indexi_];
arb_value_type v = _pp_var_vec_v[node_indexi_];
arb_value_type a_1_, a_0_, b_0_, ll0_, ll1_, ll2_, b_1_, ll3_;
ll3_ = 0.;
ll2_ = 0.;
ll1_ = 0.;
ll0_ = 0.;
rates(params_, tid_, v);
a_0_ = _pp_var_mTau[tid_];
b_0_ = _pp_var_mInf[tid_];
ll0_ = -dt/a_0_;
ll1_ = ( 1.0+ 0.5*ll0_)/( 1.0- 0.5*ll0_);
_pp_var_m[tid_] = b_0_+(_pp_var_m[tid_]-b_0_)*ll1_;
a_1_ = _pp_var_hTau[tid_];
b_1_ = _pp_var_hInf[tid_];
ll2_ = -dt/a_1_;
ll3_ = ( 1.0+ 0.5*ll2_)/( 1.0- 0.5*ll2_);
_pp_var_h[tid_] = b_1_+(_pp_var_h[tid_]-b_1_)*ll3_;
}
}
__global__
void compute_currents(arb_mechanism_ppack params_) {
int n_ = params_.width;
int tid_ = threadIdx.x + blockDim.x*blockIdx.x;
PPACK_IFACE_BLOCK;
if (tid_<n_) {
auto ion_k_indexi_ = _pp_var_ion_k_index[tid_];
auto node_indexi_ = _pp_var_node_index[tid_];
arb_value_type conductivity_ = 0;
arb_value_type current_ = 0;
arb_value_type ek = _pp_var_ion_k.reversal_potential[ion_k_indexi_];
arb_value_type v = _pp_var_vec_v[node_indexi_];
arb_value_type ik = 0;
_pp_var_g[tid_] = _pp_var_gbar[tid_]*_pp_var_m[tid_]*_pp_var_m[tid_]*_pp_var_h[tid_];
ik = _pp_var_g[tid_]*(v-ek);
current_ = ik;
conductivity_ = _pp_var_g[tid_];
_pp_var_vec_g[node_indexi_] = fma(10.0*_pp_var_weight[tid_], conductivity_, _pp_var_vec_g[node_indexi_]);
_pp_var_vec_i[node_indexi_] = fma(10.0*_pp_var_weight[tid_], current_, _pp_var_vec_i[node_indexi_]);
_pp_var_ion_k.current_density[ion_k_indexi_] = fma(10.0*_pp_var_weight[tid_], ik, _pp_var_ion_k.current_density[ion_k_indexi_]);
}
}
} // namespace
void mechanism_K_P_gpu_init_(arb_mechanism_ppack* p) {
auto n = p->width;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
hipLaunchKernelGGL(( init), dim3(grid_dim), dim3(block_dim), 0, 0, *p);
if (!p->multiplicity) return;
hipLaunchKernelGGL(( multiply), dim3(dim3{grid_dim), dim3(2}), block_dim, 0, *p);
}
void mechanism_K_P_gpu_compute_currents_(arb_mechanism_ppack* p) {
auto n = p->width;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
hipLaunchKernelGGL(( compute_currents), dim3(grid_dim), dim3(block_dim), 0, 0, *p);
}
void mechanism_K_P_gpu_advance_state_(arb_mechanism_ppack* p) {
auto n = p->width;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
hipLaunchKernelGGL(( advance_state), dim3(grid_dim), dim3(block_dim), 0, 0, *p);
}
void mechanism_K_P_gpu_write_ions_(arb_mechanism_ppack* p) {}
void mechanism_K_P_gpu_post_event_(arb_mechanism_ppack* p) {}
void mechanism_K_P_gpu_apply_events_(arb_mechanism_ppack* p, arb_deliverable_event_stream* events) {}
} // namespace allen_catalogue
} // namespace arb
|
ac6f95ae61297498d42fd464513b8495ea542b0b.cu
|
#include <arbor/gpu/gpu_common.hpp>
#include <arbor/gpu/math_cu.hpp>
#include <arbor/gpu/reduce_by_key.hpp>
#include <arbor/mechanism_abi.h>
namespace arb {
namespace allen_catalogue {
#define PPACK_IFACE_BLOCK \
auto _pp_var_width __attribute__((unused)) = params_.width;\
auto _pp_var_n_detectors __attribute__((unused)) = params_.n_detectors;\
auto* _pp_var_vec_ci __attribute__((unused)) = params_.vec_ci;\
auto* _pp_var_vec_di __attribute__((unused)) = params_.vec_di;\
auto* _pp_var_vec_t __attribute__((unused)) = params_.vec_t;\
auto* _pp_var_vec_dt __attribute__((unused)) = params_.vec_dt;\
auto* _pp_var_vec_v __attribute__((unused)) = params_.vec_v;\
auto* _pp_var_vec_i __attribute__((unused)) = params_.vec_i;\
auto* _pp_var_vec_g __attribute__((unused)) = params_.vec_g;\
auto* _pp_var_temperature_degC __attribute__((unused)) = params_.temperature_degC;\
auto* _pp_var_diam_um __attribute__((unused)) = params_.diam_um;\
auto* _pp_var_time_since_spike __attribute__((unused)) = params_.time_since_spike;\
auto* _pp_var_node_index __attribute__((unused)) = params_.node_index;\
auto* _pp_var_peer_index __attribute__((unused)) = params_.peer_index;\
auto* _pp_var_multiplicity __attribute__((unused)) = params_.multiplicity;\
auto* _pp_var_state_vars __attribute__((unused)) = params_.state_vars;\
auto* _pp_var_weight __attribute__((unused)) = params_.weight;\
auto& _pp_var_events __attribute__((unused)) = params_.events;\
auto& _pp_var_mechanism_id __attribute__((unused)) = params_.mechanism_id;\
auto& _pp_var_index_constraints __attribute__((unused)) = params_.index_constraints;\
auto _pp_var_vshift __attribute__((unused)) = params_.globals[0];\
auto _pp_var_tauF __attribute__((unused)) = params_.globals[1];\
auto* _pp_var_m __attribute__((unused)) = params_.state_vars[0];\
auto* _pp_var_h __attribute__((unused)) = params_.state_vars[1];\
auto* _pp_var_v __attribute__((unused)) = params_.state_vars[2];\
auto* _pp_var_g __attribute__((unused)) = params_.state_vars[3];\
auto* _pp_var_celsius __attribute__((unused)) = params_.state_vars[4];\
auto* _pp_var_mInf __attribute__((unused)) = params_.state_vars[5];\
auto* _pp_var_mTau __attribute__((unused)) = params_.state_vars[6];\
auto* _pp_var_hInf __attribute__((unused)) = params_.state_vars[7];\
auto* _pp_var_hTau __attribute__((unused)) = params_.state_vars[8];\
auto* _pp_var_gbar __attribute__((unused)) = params_.parameters[0];\
auto& _pp_var_ion_k __attribute__((unused)) = params_.ion_states[0];\
auto* _pp_var_ion_k_index __attribute__((unused)) = params_.ion_states[0].index;\
//End of IFACEBLOCK
namespace {
using ::arb::gpu::exprelr;
using ::arb::gpu::safeinv;
using ::arb::gpu::min;
using ::arb::gpu::max;
__device__
void rates(arb_mechanism_ppack params_, int tid_, arb_value_type v) {
PPACK_IFACE_BLOCK;
arb_value_type qt;
qt = pow( 2.2999999999999998, (_pp_var_celsius[tid_]- 21.0)* 0.10000000000000001);
_pp_var_mInf[tid_] = 1.0/( 1.0+exp( -(v-( -14.300000000000001+_pp_var_vshift))* 0.068493150684931503));
if (v< -50.0+_pp_var_vshift) {
_pp_var_mTau[tid_] = _pp_var_tauF*( 1.25+ 175.03*exp( -(v-_pp_var_vshift)* -0.025999999999999999))/qt;
}
else {
_pp_var_mTau[tid_] = _pp_var_tauF*( 1.25+ 13.0*exp( -(v-_pp_var_vshift)* 0.025999999999999999))/qt;
}
_pp_var_hInf[tid_] = 1.0/( 1.0+exp( -(v-( -54.0+_pp_var_vshift))* -0.090909090909090912));
_pp_var_hTau[tid_] = ( 360.0+( 1010.0+ 24.0*(v-( -55.0+_pp_var_vshift)))*exp(pow( -((v-( -75.0+_pp_var_vshift))* 0.020833333333333332), 2.0)))/qt;
}
__global__
void init(arb_mechanism_ppack params_) {
int n_ = params_.width;
int tid_ = threadIdx.x + blockDim.x*blockIdx.x;
PPACK_IFACE_BLOCK;
if (tid_<n_) {
auto node_indexi_ = _pp_var_node_index[tid_];
arb_value_type v = _pp_var_vec_v[node_indexi_];
rates(params_, tid_, v);
_pp_var_m[tid_] = _pp_var_mInf[tid_];
_pp_var_h[tid_] = _pp_var_hInf[tid_];
}
}
__global__
void multiply(arb_mechanism_ppack params_) {
PPACK_IFACE_BLOCK;
auto tid_ = threadIdx.x + blockDim.x*blockIdx.x;
auto idx_ = blockIdx.y; if(tid_<_pp_var_width) {
_pp_var_state_vars[idx_][tid_] *= _pp_var_multiplicity[tid_];
}
}
__global__
void advance_state(arb_mechanism_ppack params_) {
int n_ = params_.width;
int tid_ = threadIdx.x + blockDim.x*blockIdx.x;
PPACK_IFACE_BLOCK;
if (tid_<n_) {
auto node_indexi_ = _pp_var_node_index[tid_];
arb_value_type dt = _pp_var_vec_dt[node_indexi_];
arb_value_type v = _pp_var_vec_v[node_indexi_];
arb_value_type a_1_, a_0_, b_0_, ll0_, ll1_, ll2_, b_1_, ll3_;
ll3_ = 0.;
ll2_ = 0.;
ll1_ = 0.;
ll0_ = 0.;
rates(params_, tid_, v);
a_0_ = _pp_var_mTau[tid_];
b_0_ = _pp_var_mInf[tid_];
ll0_ = -dt/a_0_;
ll1_ = ( 1.0+ 0.5*ll0_)/( 1.0- 0.5*ll0_);
_pp_var_m[tid_] = b_0_+(_pp_var_m[tid_]-b_0_)*ll1_;
a_1_ = _pp_var_hTau[tid_];
b_1_ = _pp_var_hInf[tid_];
ll2_ = -dt/a_1_;
ll3_ = ( 1.0+ 0.5*ll2_)/( 1.0- 0.5*ll2_);
_pp_var_h[tid_] = b_1_+(_pp_var_h[tid_]-b_1_)*ll3_;
}
}
__global__
void compute_currents(arb_mechanism_ppack params_) {
int n_ = params_.width;
int tid_ = threadIdx.x + blockDim.x*blockIdx.x;
PPACK_IFACE_BLOCK;
if (tid_<n_) {
auto ion_k_indexi_ = _pp_var_ion_k_index[tid_];
auto node_indexi_ = _pp_var_node_index[tid_];
arb_value_type conductivity_ = 0;
arb_value_type current_ = 0;
arb_value_type ek = _pp_var_ion_k.reversal_potential[ion_k_indexi_];
arb_value_type v = _pp_var_vec_v[node_indexi_];
arb_value_type ik = 0;
_pp_var_g[tid_] = _pp_var_gbar[tid_]*_pp_var_m[tid_]*_pp_var_m[tid_]*_pp_var_h[tid_];
ik = _pp_var_g[tid_]*(v-ek);
current_ = ik;
conductivity_ = _pp_var_g[tid_];
_pp_var_vec_g[node_indexi_] = fma(10.0*_pp_var_weight[tid_], conductivity_, _pp_var_vec_g[node_indexi_]);
_pp_var_vec_i[node_indexi_] = fma(10.0*_pp_var_weight[tid_], current_, _pp_var_vec_i[node_indexi_]);
_pp_var_ion_k.current_density[ion_k_indexi_] = fma(10.0*_pp_var_weight[tid_], ik, _pp_var_ion_k.current_density[ion_k_indexi_]);
}
}
} // namespace
void mechanism_K_P_gpu_init_(arb_mechanism_ppack* p) {
auto n = p->width;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
init<<<grid_dim, block_dim>>>(*p);
if (!p->multiplicity) return;
multiply<<<dim3{grid_dim, 2}, block_dim>>>(*p);
}
void mechanism_K_P_gpu_compute_currents_(arb_mechanism_ppack* p) {
auto n = p->width;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
compute_currents<<<grid_dim, block_dim>>>(*p);
}
void mechanism_K_P_gpu_advance_state_(arb_mechanism_ppack* p) {
auto n = p->width;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
advance_state<<<grid_dim, block_dim>>>(*p);
}
void mechanism_K_P_gpu_write_ions_(arb_mechanism_ppack* p) {}
void mechanism_K_P_gpu_post_event_(arb_mechanism_ppack* p) {}
void mechanism_K_P_gpu_apply_events_(arb_mechanism_ppack* p, arb_deliverable_event_stream* events) {}
} // namespace allen_catalogue
} // namespace arb
|
f225185971e0870573311fee739d0dcf9cc2bddf.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2014 Jure Ratkovic
*/
#include <optix.h>
#include <optixu/optixu_math_namespace.h>
#include <optixu/optixu_matrix_namespace.h>
#include <optixu/optixu_aabb_namespace.h>
using namespace optix;
rtBuffer<float3> vertex_buffer;
rtBuffer<float3> normal_buffer;
rtBuffer<float3> tangent_buffer;
rtBuffer<float3> bitangent_buffer;
rtBuffer<float2> texcoord_buffer;
rtBuffer<int3> index_buffer;
rtTextureSampler<uchar4, 2, hipReadModeNormalizedFloat> normal_map;
rtDeclareVariable(float3, texcoord, attribute texcoord, );
rtDeclareVariable(float3, geometric_normal, attribute geometric_normal, );
rtDeclareVariable(float3, shading_normal, attribute shading_normal, );
rtDeclareVariable(optix::Ray, ray, rtCurrentRay, );
RT_PROGRAM void intersect(int primIdx)
{
const int3 &idx = index_buffer[primIdx];
const float3 &p0 = vertex_buffer[idx.x];
const float3 &p1 = vertex_buffer[idx.y];
const float3 &p2 = vertex_buffer[idx.z];
float3 n;
float t, beta, gamma;
if(intersect_triangle(ray, p0, p1, p2, n, t, beta, gamma))
{
if(rtPotentialIntersection(t))
{
if(texcoord_buffer.size() == 0)
texcoord = make_float3(0.0f, 0.0f, 0.0f);
else
{
const float2 &t0 = texcoord_buffer[idx.x];
const float2 &t1 = texcoord_buffer[idx.y];
const float2 &t2 = texcoord_buffer[idx.z];
texcoord = make_float3(t1 * beta + t2 * gamma + t0 * (1.0f - beta - gamma));
}
geometric_normal = normalize(n);
if(normal_buffer.size() == 0)
shading_normal = geometric_normal;
else
{
shading_normal = normalize(normal_buffer[idx.y] * beta +
normal_buffer[idx.z] * gamma + normal_buffer[idx.x] * (1.0f - beta - gamma));
if(tangent_buffer.size() > 0)
{
const float3 shading_tangent = normalize(tangent_buffer[idx.y] * beta +
tangent_buffer[idx.z] * gamma + tangent_buffer[idx.x] * (1.0f - beta - gamma));
const float3 shading_bitangent = normalize(bitangent_buffer[idx.y] * beta +
bitangent_buffer[idx.z] * gamma + bitangent_buffer[idx.x] * (1.0f - beta - gamma));
Matrix3x3 tbni;
tbni.setCol(0, shading_tangent);
tbni.setCol(1, shading_bitangent);
tbni.setCol(2, shading_normal);
shading_normal = tbni * normalize(make_float3(tex2D(normal_map, texcoord.x, texcoord.y)) * 2.f - 1.f);
}
}
rtReportIntersection(0);
}
}
}
RT_PROGRAM void bounds(int primIdx, float result[6])
{
const int3 &idx = index_buffer[primIdx];
const float3 &v0 = vertex_buffer[idx.x];
const float3 &v1 = vertex_buffer[idx.y];
const float3 &v2 = vertex_buffer[idx.z];
const float area = length(cross(v1 - v0, v2 - v0));
optix::Aabb *aabb = (optix::Aabb*)result;
if(area > 0.0f && !isinf(area))
{
aabb->m_min = fminf(fminf(v0, v1), v2);
aabb->m_max = fmaxf(fmaxf(v0, v1), v2);
}
else
aabb->invalidate();
}
|
f225185971e0870573311fee739d0dcf9cc2bddf.cu
|
/*
* Copyright (c) 2014 Jure Ratkovic
*/
#include <optix.h>
#include <optixu/optixu_math_namespace.h>
#include <optixu/optixu_matrix_namespace.h>
#include <optixu/optixu_aabb_namespace.h>
using namespace optix;
rtBuffer<float3> vertex_buffer;
rtBuffer<float3> normal_buffer;
rtBuffer<float3> tangent_buffer;
rtBuffer<float3> bitangent_buffer;
rtBuffer<float2> texcoord_buffer;
rtBuffer<int3> index_buffer;
rtTextureSampler<uchar4, 2, cudaReadModeNormalizedFloat> normal_map;
rtDeclareVariable(float3, texcoord, attribute texcoord, );
rtDeclareVariable(float3, geometric_normal, attribute geometric_normal, );
rtDeclareVariable(float3, shading_normal, attribute shading_normal, );
rtDeclareVariable(optix::Ray, ray, rtCurrentRay, );
RT_PROGRAM void intersect(int primIdx)
{
const int3 &idx = index_buffer[primIdx];
const float3 &p0 = vertex_buffer[idx.x];
const float3 &p1 = vertex_buffer[idx.y];
const float3 &p2 = vertex_buffer[idx.z];
float3 n;
float t, beta, gamma;
if(intersect_triangle(ray, p0, p1, p2, n, t, beta, gamma))
{
if(rtPotentialIntersection(t))
{
if(texcoord_buffer.size() == 0)
texcoord = make_float3(0.0f, 0.0f, 0.0f);
else
{
const float2 &t0 = texcoord_buffer[idx.x];
const float2 &t1 = texcoord_buffer[idx.y];
const float2 &t2 = texcoord_buffer[idx.z];
texcoord = make_float3(t1 * beta + t2 * gamma + t0 * (1.0f - beta - gamma));
}
geometric_normal = normalize(n);
if(normal_buffer.size() == 0)
shading_normal = geometric_normal;
else
{
shading_normal = normalize(normal_buffer[idx.y] * beta +
normal_buffer[idx.z] * gamma + normal_buffer[idx.x] * (1.0f - beta - gamma));
if(tangent_buffer.size() > 0)
{
const float3 shading_tangent = normalize(tangent_buffer[idx.y] * beta +
tangent_buffer[idx.z] * gamma + tangent_buffer[idx.x] * (1.0f - beta - gamma));
const float3 shading_bitangent = normalize(bitangent_buffer[idx.y] * beta +
bitangent_buffer[idx.z] * gamma + bitangent_buffer[idx.x] * (1.0f - beta - gamma));
Matrix3x3 tbni;
tbni.setCol(0, shading_tangent);
tbni.setCol(1, shading_bitangent);
tbni.setCol(2, shading_normal);
shading_normal = tbni * normalize(make_float3(tex2D(normal_map, texcoord.x, texcoord.y)) * 2.f - 1.f);
}
}
rtReportIntersection(0);
}
}
}
RT_PROGRAM void bounds(int primIdx, float result[6])
{
const int3 &idx = index_buffer[primIdx];
const float3 &v0 = vertex_buffer[idx.x];
const float3 &v1 = vertex_buffer[idx.y];
const float3 &v2 = vertex_buffer[idx.z];
const float area = length(cross(v1 - v0, v2 - v0));
optix::Aabb *aabb = (optix::Aabb*)result;
if(area > 0.0f && !isinf(area))
{
aabb->m_min = fminf(fminf(v0, v1), v2);
aabb->m_max = fmaxf(fmaxf(v0, v1), v2);
}
else
aabb->invalidate();
}
|
f75d022fdf4d4f1b3a8ef526815ea2417e88f114.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <limits.h>
#include <time.h>
#include <unistd.h>
#include <hip/hip_runtime.h>
#define M 32
#define N 32
#define P 32
#define NOS 7
#define DOF 1
double *A;
double *x;
double *y;
int offsets[NOS]={-M *N *DOF ,-M *DOF ,-DOF ,0 ,DOF ,M *DOF ,M *N *DOF };
void malloc_arrays() {
int i1;
A = (double*) malloc((M *N *P *DOF *DOF *NOS) * sizeof(double));
x = (double*) malloc((M *N *P *DOF) * sizeof(double));
y = (double*) malloc((M *N *P *DOF) * sizeof(double));
}
void init_input_vars() {
int i1;
for (i1=0; i1<M *N *P *DOF *DOF *NOS; i1++)
A[i1] = (i1) % 5 + 1;
for (i1=0; i1<M *N *P *DOF; i1++)
x[i1] = (i1) % 5 + 1;
for (i1=0; i1<M *N *P *DOF; i1++)
y[i1] = 0;
}
__global__ void orcu_kernel2(const int nrows, const int ndiags, int sbdiag, int ndofs, int* offsets, double* A, double* x, double* y) {
const int tid=blockIdx.x*blockDim.x+threadIdx.x;
const int gsize=gridDim.x*blockDim.x;
double ysum;
int j, k, col, row;
for (int i=tid; i<=nrows-1; i+=gsize) {
{
ysum=0.0;
for (j=0; j<=ndiags-1; j++ ) {
row=i+j*sbdiag;
col=(floor((float)i/ndofs)+offsets[j])*ndofs;
if (col>=0&&col<nrows)
for (k=0; k<=ndofs-1; k++ )
ysum=ysum+A[row+k*nrows]*x[col+k];
}
y[i]=ysum;
}
}
}
int main(int argc, char *argv[]) {
malloc_arrays();
init_input_vars();
hipSetDeviceFlags(hipDeviceScheduleBlockingSync);
float orcu_elapsed=0.0, orcu_transfer=0.0;
hipEvent_t tstart, tstop, start, stop;
hipEventCreate(&tstart); hipEventCreate(&tstop);
hipEventCreate(&start); hipEventCreate(&stop);
for (int orio_i=0; orio_i<ORIO_REPS; orio_i++) {
int nrows=M*N*P*DOF;
int ndiags=NOS;
int ndofs=DOF;
int sbdiag=M*N*P*DOF*DOF;
/*@ begin Loop(transform CUDA(threadCount=TC, blockCount=BC, preferL1Size=PL)
for(i=0; i<=nrows-1; i++){
ysum = 0.0;
for(j=0; j<=ndiags-1; j++){
row = i+j*sbdiag;
col = (floor((float)i/ndofs)+offsets[j])*ndofs;
if(col>=0&&col<nrows)
for(k=0; k<=ndofs-1; k++)
ysum += A[row+k*nrows] * x[col+k];
}
y[i] = ysum;
}
) @*/
{
hipDeviceSynchronize();
/*declare variables*/
double *dev_A, *dev_x, *dev_y;
int *dev_offsets;
int nthreads=32;
/*calculate device dimensions*/
dim3 dimGrid, dimBlock;
dimBlock.x=nthreads;
dimGrid.x=14;
/*allocate device memory*/
hipMalloc(&dev_A,M *N *P *DOF *DOF *NOS*sizeof(double));
hipMalloc(&dev_x,M *N *P *DOF*sizeof(double));
hipMalloc(&dev_y,M *N *P *DOF*sizeof(double));
hipMalloc(&dev_offsets,NOS*sizeof(int));
hipDeviceSetCacheConfig(hipFuncCachePreferShared);
/*copy data from host to device*/
hipEventRecord(tstart,0);
hipMemcpy(dev_A,A,M *N *P *DOF *DOF *NOS*sizeof(double),hipMemcpyHostToDevice);
hipMemcpy(dev_x,x,M *N *P *DOF*sizeof(double),hipMemcpyHostToDevice);
hipMemcpy(dev_offsets,offsets,NOS*sizeof(int),hipMemcpyHostToDevice);
hipEventRecord(tstop,0);
hipEventSynchronize(tstop);
hipEventElapsedTime(&orcu_transfer,tstart,tstop);
hipEventRecord(start,0);
/*invoke device kernel*/
hipLaunchKernelGGL(( orcu_kernel2), dim3(dimGrid),dim3(dimBlock), 0, 0, nrows,ndiags,sbdiag,ndofs,dev_offsets,dev_A,dev_x,dev_y);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&orcu_elapsed,start,stop);
/*copy data from device to host*/
hipMemcpy(y,dev_y,M *N *P *DOF*sizeof(double),hipMemcpyDeviceToHost);
hipDeviceSetCacheConfig(hipFuncCachePreferNone);
/*free allocated memory*/
hipFree(dev_A);
hipFree(dev_x);
hipFree(dev_y);
hipFree(dev_offsets);
hipError_t err=hipGetLastError();
if (hipSuccess!=err)
printf("CUDA runtime error: %s@",hipGetErrorString(err));
}
/*@ end @*/
printf("{'[0, 0, 0]' : (%g,%g)}\n", orcu_elapsed, orcu_transfer);
}
hipEventDestroy(tstart); hipEventDestroy(tstop);
hipEventDestroy(start); hipEventDestroy(stop);
return 0;
}
|
f75d022fdf4d4f1b3a8ef526815ea2417e88f114.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <limits.h>
#include <time.h>
#include <unistd.h>
#include <cuda.h>
#define M 32
#define N 32
#define P 32
#define NOS 7
#define DOF 1
double *A;
double *x;
double *y;
int offsets[NOS]={-M *N *DOF ,-M *DOF ,-DOF ,0 ,DOF ,M *DOF ,M *N *DOF };
void malloc_arrays() {
int i1;
A = (double*) malloc((M *N *P *DOF *DOF *NOS) * sizeof(double));
x = (double*) malloc((M *N *P *DOF) * sizeof(double));
y = (double*) malloc((M *N *P *DOF) * sizeof(double));
}
void init_input_vars() {
int i1;
for (i1=0; i1<M *N *P *DOF *DOF *NOS; i1++)
A[i1] = (i1) % 5 + 1;
for (i1=0; i1<M *N *P *DOF; i1++)
x[i1] = (i1) % 5 + 1;
for (i1=0; i1<M *N *P *DOF; i1++)
y[i1] = 0;
}
__global__ void orcu_kernel2(const int nrows, const int ndiags, int sbdiag, int ndofs, int* offsets, double* A, double* x, double* y) {
const int tid=blockIdx.x*blockDim.x+threadIdx.x;
const int gsize=gridDim.x*blockDim.x;
double ysum;
int j, k, col, row;
for (int i=tid; i<=nrows-1; i+=gsize) {
{
ysum=0.0;
for (j=0; j<=ndiags-1; j++ ) {
row=i+j*sbdiag;
col=(floor((float)i/ndofs)+offsets[j])*ndofs;
if (col>=0&&col<nrows)
for (k=0; k<=ndofs-1; k++ )
ysum=ysum+A[row+k*nrows]*x[col+k];
}
y[i]=ysum;
}
}
}
int main(int argc, char *argv[]) {
malloc_arrays();
init_input_vars();
cudaSetDeviceFlags(cudaDeviceBlockingSync);
float orcu_elapsed=0.0, orcu_transfer=0.0;
cudaEvent_t tstart, tstop, start, stop;
cudaEventCreate(&tstart); cudaEventCreate(&tstop);
cudaEventCreate(&start); cudaEventCreate(&stop);
for (int orio_i=0; orio_i<ORIO_REPS; orio_i++) {
int nrows=M*N*P*DOF;
int ndiags=NOS;
int ndofs=DOF;
int sbdiag=M*N*P*DOF*DOF;
/*@ begin Loop(transform CUDA(threadCount=TC, blockCount=BC, preferL1Size=PL)
for(i=0; i<=nrows-1; i++){
ysum = 0.0;
for(j=0; j<=ndiags-1; j++){
row = i+j*sbdiag;
col = (floor((float)i/ndofs)+offsets[j])*ndofs;
if(col>=0&&col<nrows)
for(k=0; k<=ndofs-1; k++)
ysum += A[row+k*nrows] * x[col+k];
}
y[i] = ysum;
}
) @*/
{
cudaDeviceSynchronize();
/*declare variables*/
double *dev_A, *dev_x, *dev_y;
int *dev_offsets;
int nthreads=32;
/*calculate device dimensions*/
dim3 dimGrid, dimBlock;
dimBlock.x=nthreads;
dimGrid.x=14;
/*allocate device memory*/
cudaMalloc(&dev_A,M *N *P *DOF *DOF *NOS*sizeof(double));
cudaMalloc(&dev_x,M *N *P *DOF*sizeof(double));
cudaMalloc(&dev_y,M *N *P *DOF*sizeof(double));
cudaMalloc(&dev_offsets,NOS*sizeof(int));
cudaDeviceSetCacheConfig(cudaFuncCachePreferShared);
/*copy data from host to device*/
cudaEventRecord(tstart,0);
cudaMemcpy(dev_A,A,M *N *P *DOF *DOF *NOS*sizeof(double),cudaMemcpyHostToDevice);
cudaMemcpy(dev_x,x,M *N *P *DOF*sizeof(double),cudaMemcpyHostToDevice);
cudaMemcpy(dev_offsets,offsets,NOS*sizeof(int),cudaMemcpyHostToDevice);
cudaEventRecord(tstop,0);
cudaEventSynchronize(tstop);
cudaEventElapsedTime(&orcu_transfer,tstart,tstop);
cudaEventRecord(start,0);
/*invoke device kernel*/
orcu_kernel2<<<dimGrid,dimBlock>>>(nrows,ndiags,sbdiag,ndofs,dev_offsets,dev_A,dev_x,dev_y);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&orcu_elapsed,start,stop);
/*copy data from device to host*/
cudaMemcpy(y,dev_y,M *N *P *DOF*sizeof(double),cudaMemcpyDeviceToHost);
cudaDeviceSetCacheConfig(cudaFuncCachePreferNone);
/*free allocated memory*/
cudaFree(dev_A);
cudaFree(dev_x);
cudaFree(dev_y);
cudaFree(dev_offsets);
cudaError_t err=cudaGetLastError();
if (cudaSuccess!=err)
printf("CUDA runtime error: %s@",cudaGetErrorString(err));
}
/*@ end @*/
printf("{'[0, 0, 0]' : (%g,%g)}\n", orcu_elapsed, orcu_transfer);
}
cudaEventDestroy(tstart); cudaEventDestroy(tstop);
cudaEventDestroy(start); cudaEventDestroy(stop);
return 0;
}
|
bc070a0d7f8b752d984ca43510457ac680cf3dec.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// std::system includes
#include <cstdio>
// CUDA-C includes
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <helper_cuda.h>
#define TOTAL_SIZE 256*1024*1024
// # threadblocks
#define TBLOCKS 1024
#define THREADS 512
// throw error on equality
#define ERR_EQ(X,Y) do { if ((X) == (Y)) { \
fprintf(stderr,"Error in %s at %s:%d\n",__func__,__FILE__,__LINE__); \
exit(-1);}} while(0)
// throw error on difference
#define ERR_NE(X,Y) do { if ((X) != (Y)) { \
fprintf(stderr,"Error in %s at %s:%d\n",__func__,__FILE__,__LINE__); \
exit(-1);}} while(0)
// experiment iterations to compute averages
#define N_ITERATIONS 2
#define N_HIGH_KERNELS 4
// copy from source -> destination arrays
__device__ void slow_kernel(int *dst, int *src, size_t n) {
int num = gridDim.x * blockDim.x;
int id = blockDim.x * blockIdx.x + threadIdx.x;
for (int i = id; i < n / sizeof(int); i += num) {
#define DELAY 2048
for (volatile int j = 0; j < DELAY; j++)
;
dst[i] = src[i];
}
}
__device__ void fast_kernel(int *dst, int *src, size_t n) {
int num = gridDim.x * blockDim.x;
int id = blockDim.x * blockIdx.x + threadIdx.x;
for (int i = id; i < n / sizeof(int); i += num) {
dst[i] = src[i];
}
}
// named kernels for easier profiling
__global__ void low(int *dst, int *src, size_t n) {
slow_kernel(dst, src, n);
}
__global__ void high(int *dst, int *src, size_t n) {
fast_kernel(dst, src, n);
}
__global__ void low_preempt(int *dst, int *src, size_t n) {
slow_kernel(dst, src, n);
}
__global__ void high_preempt(int *dst, int *src, size_t n) {
fast_kernel(dst, src, n);
}
// initialise memory
void mem_init(int *buf, size_t n) {
for (int i = 0; i < n / sizeof(int); i++) {
buf[i] = i;
}
}
// Forward declaration
hipError_t experiment(int priority_low, int priority_hi);
int main(int argc, char **argv) {
hipDeviceProp_t device_prop;
int dev_id;
printf("Starting [%s]...\n", argv[0]);
// set device
dev_id = findCudaDevice(argc, (const char **) argv);
checkCudaErrors(hipGetDeviceProperties(&device_prop, dev_id));
if ((device_prop.major << 4) + device_prop.minor < 0x35) {
fprintf(stderr,
"%s requires Compute Capability of SM 3.5 or higher to run.\nexiting...\n",
argv[0]);
exit (EXIT_WAIVED);
}
// get the range of priorities available
// [ greatest_priority, least_priority ]
int priority_low;
int priority_hi;
checkCudaErrors(
hipDeviceGetStreamPriorityRange(&priority_low, &priority_hi));
printf("CUDA stream priority range: LOW: %d to HIGH: %d\n", priority_low,
priority_hi);
experiment(priority_low, priority_hi);
exit (EXIT_SUCCESS);
}
hipError_t solo_test(hipStream_t* streams, int low_priority_stream_idx,
int high_priority_stream_idx, size_t n_streams, size_t size) {
// Initialize host data
int *h_src[n_streams];
for (int i = 0; i < n_streams; i++) {
ERR_EQ(h_src[i] = (int * ) malloc(size), NULL);
mem_init(h_src[i], size);
}
// Initialize device data
int *h_dst[n_streams];
for (int i = 0; i < n_streams; i++) {
ERR_EQ(h_dst[i] = (int * ) malloc(size), NULL);
memset(h_dst[i], 0, size);
}
// copy source data -> device
int *d_src[n_streams];
for (int i = 0; i < n_streams; i++) {
checkCudaErrors(hipMalloc(&d_src[i], size));
checkCudaErrors(
hipMemcpyAsync(d_src[i], h_src[i], size, hipMemcpyHostToDevice, streams[i]));
}
// allocate memory for memcopy destination
int *d_dst[n_streams];
for (int i = 0; i < n_streams; i++) {
checkCudaErrors(hipMalloc(&d_dst[i], size));
}
for (int i = 0; i < n_streams; i++) {
checkCudaErrors(hipStreamSynchronize(streams[i]));
}
/* Kernel invocations */
// Run each priority on its own
for (int i = 0; i < n_streams; i++) {
hipEvent_t start, end;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&end));
checkCudaErrors(hipEventRecord(start, streams[i]));
if (i == low_priority_stream_idx) {
hipLaunchKernelGGL(( low), dim3(TBLOCKS), dim3(THREADS), 0, streams[i], d_dst[i], d_src[i], TOTAL_SIZE);
} else {
hipLaunchKernelGGL(( high), dim3(TBLOCKS), dim3(THREADS), 0, streams[i], d_dst[i], d_src[i], TOTAL_SIZE);
}
checkCudaErrors(hipEventRecord(end, streams[i]));
checkCudaErrors(hipEventSynchronize(end));
}
// Copy result to host
for (int i = 0; i < n_streams; i++) {
checkCudaErrors(
hipMemcpyAsync(h_dst[i], d_dst[i], size, hipMemcpyDeviceToHost, streams[i]));
}
// // check results of kernels
// for (int i = 0; i < n_streams; i++) {
// ERR_NE(memcmp(h_dst[i], h_src[i], size), 0);
// }
// Clean up
for (int i = 0; i < n_streams; i++) {
checkCudaErrors(hipFree(d_src[i]));
checkCudaErrors(hipFree(d_dst[i]));
}
for (int i = 0; i < n_streams; i++) {
free(h_src[i]);
}
return hipSuccess;
}
hipError_t concurrent_test(hipStream_t* streams, int low_priority_stream_idx,
int high_priority_stream_idx, int n_kernels, size_t size) {
// Initialize host data
int *h_src[n_kernels];
for (int i = 0; i < n_kernels; i++) {
ERR_EQ(h_src[i] = (int * ) malloc(size), NULL);
mem_init(h_src[i], size);
}
// Initialize device data
int *h_dst[n_kernels];
for (int i = 0; i < n_kernels; i++) {
ERR_EQ(h_dst[i] = (int * ) malloc(size), NULL);
memset(h_dst[i], 0, size);
}
// copy source data -> device
int *d_src[n_kernels];
checkCudaErrors(hipMalloc(&d_src[0], size));
checkCudaErrors(
hipMemcpyAsync(d_src[0], h_src[0], size, hipMemcpyHostToDevice, streams[low_priority_stream_idx]));
for (int i = 0; i < n_kernels; i++) {
checkCudaErrors(hipMalloc(&d_src[i], size));
checkCudaErrors(
hipMemcpyAsync(d_src[i], h_src[i], size, hipMemcpyHostToDevice, streams[high_priority_stream_idx]));
}
// allocate memory for memcopy destination
int *d_dst[n_kernels];
for (int i = 0; i < n_kernels; i++) {
checkCudaErrors(hipMalloc(&d_dst[i], size));
}
/* */
// create some events
hipEvent_t ev_start[n_kernels];
hipEvent_t ev_end[n_kernels];
for (int i = 0; i < n_kernels; i++) {
checkCudaErrors(hipEventCreate(&ev_start[i]));
checkCudaErrors(hipEventCreate(&ev_end[i]));
}
for (int i = 0; i < 2; i++) {
checkCudaErrors(hipStreamSynchronize(streams[i]));
}
// Start low priority kernel
checkCudaErrors(
hipEventRecord(ev_start[0], streams[low_priority_stream_idx]));
hipLaunchKernelGGL(( low_preempt), dim3(TBLOCKS), dim3(THREADS), 0, streams[0], d_dst[0], d_src[0],
size);
checkCudaErrors(hipEventRecord(ev_end[0], streams[low_priority_stream_idx]));
// synchronize on the start, so we launch this after the low priority kernel has started
checkCudaErrors(hipEventSynchronize(ev_start[0]));
// Launch n_kernels - 1 high priority kernels synchronously
for (int i = 1; i < n_kernels; i++) {
checkCudaErrors(
hipEventRecord(ev_start[i], streams[high_priority_stream_idx]));
hipLaunchKernelGGL(( high_preempt), dim3(TBLOCKS), dim3(THREADS), 0, streams[high_priority_stream_idx], d_dst[i], d_src[i],
size);
checkCudaErrors(
hipEventRecord(ev_end[i], streams[high_priority_stream_idx]));
checkCudaErrors(hipEventSynchronize(ev_end[i]));
}
// wait for the low priority kernel to finish
checkCudaErrors(hipEventSynchronize(ev_end[0]));
// Copy result to host
checkCudaErrors(
hipMemcpyAsync(h_dst[0], d_dst[0], size, hipMemcpyDeviceToHost, streams[low_priority_stream_idx]));
for (int i = 1; i < n_kernels; i++) {
checkCudaErrors(
hipMemcpyAsync(h_dst[i], d_dst[i], size, hipMemcpyDeviceToHost, streams[high_priority_stream_idx]));
}
// // check results of kernels
// for (int i = 0; i < n_kernels; i++) {
// ERR_NE(memcmp(h_dst[i], h_src[i], size), 0);
// }
// Clean up
for (int i = 0; i < n_kernels; i++) {
checkCudaErrors(hipFree(d_src[i]));
checkCudaErrors(hipFree(d_dst[i]));
}
for (int i = 0; i < n_kernels; i++) {
free(h_src[i]);
}
return hipSuccess;
}
/**
* Creates streams with priority ranging from high to low and stores them in the streams array.
* Streams are ordered from highest to lowest priority.
*/
hipError_t createStreams(hipStream_t* streams, int priority_low,
int priority_hi, size_t n_streams) {
for (int i = 0; i < n_streams; i++) {
checkCudaErrors(
hipStreamCreateWithPriority(&streams[i], hipStreamNonBlocking,
priority_hi + i));
}
return hipSuccess;
}
/**
* Creates a stream with low priority and starts a long-running kernel on it.
* Creates a stream with high priority and runs a short-running kernel on it,
* after the low-priority kernel has begun.
* -- If preemption works, the run time of the low priority kernel should
* be extended by the runtime of the high priority kernel which preempts it.
*/
hipError_t experiment(int priority_low, int priority_hi) {
// Create streams
size_t n_streams = (priority_low - priority_hi) + 1;
hipStream_t streams[n_streams];
checkCudaErrors(createStreams(streams, priority_low, priority_hi, n_streams));
size_t size = TOTAL_SIZE; // Size of host data
size_t n_kernels = N_HIGH_KERNELS + 1; // 1 low and N high
for (int i = 0; i < N_ITERATIONS; i++) {
checkCudaErrors(solo_test(streams, 0, 1, n_streams, size));
checkCudaErrors(concurrent_test(streams, 0, 1, n_kernels, size));
}
return hipSuccess;
}
|
bc070a0d7f8b752d984ca43510457ac680cf3dec.cu
|
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// std::system includes
#include <cstdio>
// CUDA-C includes
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_profiler_api.h>
#include <helper_cuda.h>
#define TOTAL_SIZE 256*1024*1024
// # threadblocks
#define TBLOCKS 1024
#define THREADS 512
// throw error on equality
#define ERR_EQ(X,Y) do { if ((X) == (Y)) { \
fprintf(stderr,"Error in %s at %s:%d\n",__func__,__FILE__,__LINE__); \
exit(-1);}} while(0)
// throw error on difference
#define ERR_NE(X,Y) do { if ((X) != (Y)) { \
fprintf(stderr,"Error in %s at %s:%d\n",__func__,__FILE__,__LINE__); \
exit(-1);}} while(0)
// experiment iterations to compute averages
#define N_ITERATIONS 2
#define N_HIGH_KERNELS 4
// copy from source -> destination arrays
__device__ void slow_kernel(int *dst, int *src, size_t n) {
int num = gridDim.x * blockDim.x;
int id = blockDim.x * blockIdx.x + threadIdx.x;
for (int i = id; i < n / sizeof(int); i += num) {
#define DELAY 2048
for (volatile int j = 0; j < DELAY; j++)
;
dst[i] = src[i];
}
}
__device__ void fast_kernel(int *dst, int *src, size_t n) {
int num = gridDim.x * blockDim.x;
int id = blockDim.x * blockIdx.x + threadIdx.x;
for (int i = id; i < n / sizeof(int); i += num) {
dst[i] = src[i];
}
}
// named kernels for easier profiling
__global__ void low(int *dst, int *src, size_t n) {
slow_kernel(dst, src, n);
}
__global__ void high(int *dst, int *src, size_t n) {
fast_kernel(dst, src, n);
}
__global__ void low_preempt(int *dst, int *src, size_t n) {
slow_kernel(dst, src, n);
}
__global__ void high_preempt(int *dst, int *src, size_t n) {
fast_kernel(dst, src, n);
}
// initialise memory
void mem_init(int *buf, size_t n) {
for (int i = 0; i < n / sizeof(int); i++) {
buf[i] = i;
}
}
// Forward declaration
cudaError_t experiment(int priority_low, int priority_hi);
int main(int argc, char **argv) {
cudaDeviceProp device_prop;
int dev_id;
printf("Starting [%s]...\n", argv[0]);
// set device
dev_id = findCudaDevice(argc, (const char **) argv);
checkCudaErrors(cudaGetDeviceProperties(&device_prop, dev_id));
if ((device_prop.major << 4) + device_prop.minor < 0x35) {
fprintf(stderr,
"%s requires Compute Capability of SM 3.5 or higher to run.\nexiting...\n",
argv[0]);
exit (EXIT_WAIVED);
}
// get the range of priorities available
// [ greatest_priority, least_priority ]
int priority_low;
int priority_hi;
checkCudaErrors(
cudaDeviceGetStreamPriorityRange(&priority_low, &priority_hi));
printf("CUDA stream priority range: LOW: %d to HIGH: %d\n", priority_low,
priority_hi);
experiment(priority_low, priority_hi);
exit (EXIT_SUCCESS);
}
cudaError_t solo_test(cudaStream_t* streams, int low_priority_stream_idx,
int high_priority_stream_idx, size_t n_streams, size_t size) {
// Initialize host data
int *h_src[n_streams];
for (int i = 0; i < n_streams; i++) {
ERR_EQ(h_src[i] = (int * ) malloc(size), NULL);
mem_init(h_src[i], size);
}
// Initialize device data
int *h_dst[n_streams];
for (int i = 0; i < n_streams; i++) {
ERR_EQ(h_dst[i] = (int * ) malloc(size), NULL);
memset(h_dst[i], 0, size);
}
// copy source data -> device
int *d_src[n_streams];
for (int i = 0; i < n_streams; i++) {
checkCudaErrors(cudaMalloc(&d_src[i], size));
checkCudaErrors(
cudaMemcpyAsync(d_src[i], h_src[i], size, cudaMemcpyHostToDevice, streams[i]));
}
// allocate memory for memcopy destination
int *d_dst[n_streams];
for (int i = 0; i < n_streams; i++) {
checkCudaErrors(cudaMalloc(&d_dst[i], size));
}
for (int i = 0; i < n_streams; i++) {
checkCudaErrors(cudaStreamSynchronize(streams[i]));
}
/* Kernel invocations */
// Run each priority on its own
for (int i = 0; i < n_streams; i++) {
cudaEvent_t start, end;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&end));
checkCudaErrors(cudaEventRecord(start, streams[i]));
if (i == low_priority_stream_idx) {
low<<<TBLOCKS, THREADS, 0, streams[i]>>>(d_dst[i], d_src[i], TOTAL_SIZE);
} else {
high<<<TBLOCKS, THREADS, 0, streams[i]>>>(d_dst[i], d_src[i], TOTAL_SIZE);
}
checkCudaErrors(cudaEventRecord(end, streams[i]));
checkCudaErrors(cudaEventSynchronize(end));
}
// Copy result to host
for (int i = 0; i < n_streams; i++) {
checkCudaErrors(
cudaMemcpyAsync(h_dst[i], d_dst[i], size, cudaMemcpyDeviceToHost, streams[i]));
}
// // check results of kernels
// for (int i = 0; i < n_streams; i++) {
// ERR_NE(memcmp(h_dst[i], h_src[i], size), 0);
// }
// Clean up
for (int i = 0; i < n_streams; i++) {
checkCudaErrors(cudaFree(d_src[i]));
checkCudaErrors(cudaFree(d_dst[i]));
}
for (int i = 0; i < n_streams; i++) {
free(h_src[i]);
}
return cudaSuccess;
}
cudaError_t concurrent_test(cudaStream_t* streams, int low_priority_stream_idx,
int high_priority_stream_idx, int n_kernels, size_t size) {
// Initialize host data
int *h_src[n_kernels];
for (int i = 0; i < n_kernels; i++) {
ERR_EQ(h_src[i] = (int * ) malloc(size), NULL);
mem_init(h_src[i], size);
}
// Initialize device data
int *h_dst[n_kernels];
for (int i = 0; i < n_kernels; i++) {
ERR_EQ(h_dst[i] = (int * ) malloc(size), NULL);
memset(h_dst[i], 0, size);
}
// copy source data -> device
int *d_src[n_kernels];
checkCudaErrors(cudaMalloc(&d_src[0], size));
checkCudaErrors(
cudaMemcpyAsync(d_src[0], h_src[0], size, cudaMemcpyHostToDevice, streams[low_priority_stream_idx]));
for (int i = 0; i < n_kernels; i++) {
checkCudaErrors(cudaMalloc(&d_src[i], size));
checkCudaErrors(
cudaMemcpyAsync(d_src[i], h_src[i], size, cudaMemcpyHostToDevice, streams[high_priority_stream_idx]));
}
// allocate memory for memcopy destination
int *d_dst[n_kernels];
for (int i = 0; i < n_kernels; i++) {
checkCudaErrors(cudaMalloc(&d_dst[i], size));
}
/* */
// create some events
cudaEvent_t ev_start[n_kernels];
cudaEvent_t ev_end[n_kernels];
for (int i = 0; i < n_kernels; i++) {
checkCudaErrors(cudaEventCreate(&ev_start[i]));
checkCudaErrors(cudaEventCreate(&ev_end[i]));
}
for (int i = 0; i < 2; i++) {
checkCudaErrors(cudaStreamSynchronize(streams[i]));
}
// Start low priority kernel
checkCudaErrors(
cudaEventRecord(ev_start[0], streams[low_priority_stream_idx]));
low_preempt<<<TBLOCKS, THREADS, 0, streams[0]>>>(d_dst[0], d_src[0],
size);
checkCudaErrors(cudaEventRecord(ev_end[0], streams[low_priority_stream_idx]));
// synchronize on the start, so we launch this after the low priority kernel has started
checkCudaErrors(cudaEventSynchronize(ev_start[0]));
// Launch n_kernels - 1 high priority kernels synchronously
for (int i = 1; i < n_kernels; i++) {
checkCudaErrors(
cudaEventRecord(ev_start[i], streams[high_priority_stream_idx]));
high_preempt<<<TBLOCKS, THREADS, 0, streams[high_priority_stream_idx]>>>(d_dst[i], d_src[i],
size);
checkCudaErrors(
cudaEventRecord(ev_end[i], streams[high_priority_stream_idx]));
checkCudaErrors(cudaEventSynchronize(ev_end[i]));
}
// wait for the low priority kernel to finish
checkCudaErrors(cudaEventSynchronize(ev_end[0]));
// Copy result to host
checkCudaErrors(
cudaMemcpyAsync(h_dst[0], d_dst[0], size, cudaMemcpyDeviceToHost, streams[low_priority_stream_idx]));
for (int i = 1; i < n_kernels; i++) {
checkCudaErrors(
cudaMemcpyAsync(h_dst[i], d_dst[i], size, cudaMemcpyDeviceToHost, streams[high_priority_stream_idx]));
}
// // check results of kernels
// for (int i = 0; i < n_kernels; i++) {
// ERR_NE(memcmp(h_dst[i], h_src[i], size), 0);
// }
// Clean up
for (int i = 0; i < n_kernels; i++) {
checkCudaErrors(cudaFree(d_src[i]));
checkCudaErrors(cudaFree(d_dst[i]));
}
for (int i = 0; i < n_kernels; i++) {
free(h_src[i]);
}
return cudaSuccess;
}
/**
* Creates streams with priority ranging from high to low and stores them in the streams array.
* Streams are ordered from highest to lowest priority.
*/
cudaError_t createStreams(cudaStream_t* streams, int priority_low,
int priority_hi, size_t n_streams) {
for (int i = 0; i < n_streams; i++) {
checkCudaErrors(
cudaStreamCreateWithPriority(&streams[i], cudaStreamNonBlocking,
priority_hi + i));
}
return cudaSuccess;
}
/**
* Creates a stream with low priority and starts a long-running kernel on it.
* Creates a stream with high priority and runs a short-running kernel on it,
* after the low-priority kernel has begun.
* -- If preemption works, the run time of the low priority kernel should
* be extended by the runtime of the high priority kernel which preempts it.
*/
cudaError_t experiment(int priority_low, int priority_hi) {
// Create streams
size_t n_streams = (priority_low - priority_hi) + 1;
cudaStream_t streams[n_streams];
checkCudaErrors(createStreams(streams, priority_low, priority_hi, n_streams));
size_t size = TOTAL_SIZE; // Size of host data
size_t n_kernels = N_HIGH_KERNELS + 1; // 1 low and N high
for (int i = 0; i < N_ITERATIONS; i++) {
checkCudaErrors(solo_test(streams, 0, 1, n_streams, size));
checkCudaErrors(concurrent_test(streams, 0, 1, n_kernels, size));
}
return cudaSuccess;
}
|
9c81afdbfffa0d93a00eedf30f14784d89fef743.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include <stdbool.h>
//include the header file
//Uncomment below if including into a *.c file rather than *.cu
//extern "C" {
#include "NBodyVisualiser.h"
//}
#include <hip/hip_runtime.h>
#include <cuda_gl_interop.h>
#define TIMING_FRAME_COUNT 20
//User supplied globals
static unsigned int N;
static unsigned int D;
static MODE M;
const float *PositionsX = 0;
const float *PositionsY = 0;
const nbody *Bodies = 0;
const float *Densities = 0;
void(*simulate_function)(void) = 0;
// instancing variables for histogram
GLuint vao_hist = 0;
GLuint vao_hist_vertices = 0;
GLuint tbo_hist = 0;
GLuint tex_hist = 0;
GLuint vao_hist_instance_ids = 0;
// instancing variables for nbody
GLuint vao_nbody = 0;
GLuint vao_nbody_vertices = 0;
GLuint tbo_nbody = 0;
GLuint tex_nbody = 0;
GLuint vao_nbody_instance_ids = 0;
// mouse controls
int mouse_old_x, mouse_old_y;
int mouse_buttons = 0;
float rotate_x = 0.0, rotate_z = 0.0;
float translate_z = -1.0;
// vertex shader handles
GLuint vs_hist_shader = 0;
GLuint vs_nbody_shader = 0;
GLuint vs_hist_program = 0;
GLuint vs_nbody_program = 0;
GLuint vs_hist_instance_index = 0;
GLuint vs_nbody_instance_index = 0;
//render options
bool display_bodies = true;
bool display_denisty = false;
//cuda graphics resources
struct cudaGraphicsResource *cuda_nbody_vbo_resource;
struct cudaGraphicsResource *cuda_hist_vbo_resource;
//timing
float elapsed = 0;
float prev_time = 0;
unsigned int frames;
char title[128];
// function prototypes
void displayLoop(void);
void initHistShader();
void initNBodyShader();
void initHistVertexData();
void initNBodyVertexData();
void initGL();
void destroyViewer();
void render(void);
void checkGLError();
void handleKeyboardDefault(unsigned char key, int x, int y);
void handleMouseDefault(int button, int state, int x, int y);
void handleMouseMotionDefault(int x, int y);
void checkCUDAError(const char *msg);
// Vertex shader source code
const char* hist_vertexShaderSource =
{
"#version 130 \n"
"#extension GL_EXT_gpu_shader4 : enable \n"
"uniform samplerBuffer instance_tex; \n"
"in uint instance_index; \n"
"void main() \n"
"{ \n"
" float instance_data = texelFetchBuffer(instance_tex, int(instance_index)).x; \n"
" vec4 position = vec4(gl_Vertex.x, gl_Vertex.y, 0.0f, 1.0f); \n"
" gl_FrontColor = vec4(instance_data, 0.0f, 0.0f, 0.0f); \n"
" gl_Position = gl_ModelViewProjectionMatrix * position; \n"
"} \n"
};
const char* nbody_vertexShaderSource =
{
"#version 130 \n"
"#extension GL_EXT_gpu_shader4 : enable \n"
"uniform samplerBuffer instance_tex; \n"
"in uint instance_index; \n"
"void main() \n"
"{ \n"
" vec2 instance_data = texelFetchBuffer(instance_tex, int(instance_index)).xy; \n"
" vec4 position = vec4(gl_Vertex.x+instance_data.x, \n"
" gl_Vertex.y+instance_data.y, \n"
" gl_Vertex.z, 1.0f); \n"
" gl_FrontColor = vec4(1.0f, 1.0f, 1.0f, 0.0f); \n"
" gl_Position = gl_ModelViewProjectionMatrix * position; \n"
"} \n"
};
//////////////////////////////// CUDA Kernels ////////////////////////////////
__global__ void copyNBodyData2f(float* buffer, const float *x, const float *y, unsigned int N)
{
unsigned int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < N){
//copy data to mapped buffer
float* ptr = &buffer[i * 2];
ptr[0] = x[i];
ptr[1] = y[i];
}
}
__global__ void copyNBodyData(float* buffer, const nbody* bodies, unsigned int N)
{
unsigned int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < N){
//copy data to mapped buffer
float* ptr = &buffer[i * 2];
ptr[0] = bodies[i].x;
ptr[1] = bodies[i].y;
}
}
__global__ void copyHistData(float* buffer, const float* densities, unsigned int D)
{
unsigned int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < D*D){
//copy data to mapped buffer
buffer[i] = densities[i];
}
}
//////////////////////////////// Header declared functions ////////////////////////////////
void initViewer(unsigned int n, unsigned int d, MODE m, void(*simulate)(void))
{
N = n;
D = d;
M = m;
simulate_function = simulate;
//check for UVA (not available in 32 bit host mode)
if (M == CUDA){
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
if (prop.unifiedAddressing != 1){
printf("Error: No UVA found. Are you trying to build you CUDA code in 32bit mode?\n");
}
}
//initialiser the open gl viewer and context
initGL();
//init out instance rendering and the data
initHistShader();
initNBodyShader();
initHistVertexData();
initNBodyVertexData();
}
void setNBodyPositions2f(const float *positions_x, const float *positions_y)
{
//check that the supplied pointers are device pointers
if (M == CUDA){
hipPointerAttribute_t attributes;
//host allocated memory will cause an error
if (hipPointerGetAttributes(&attributes, positions_x) == hipErrorInvalidValue){
hipGetLastError(); // clear out the previous API error
printf("Error: Pointer (positions_x) passed to setNBodyPositions2f must be a device pointer in CUDA mode!\n");
return;
}
//memory allocated by the device will not be the result may be hipMemoryTypeHost if UVA was used.
if (attributes.type != hipMemoryTypeDevice){
printf("Error: Pointer (positions_x) passed to setNBodyPositions2f must be a device pointer in CUDA mode!\n");
return;
}
//host allocated memory will cause an error
if (hipPointerGetAttributes(&attributes, positions_y) == hipErrorInvalidValue){
hipGetLastError(); // clear out the previous API error
printf("Error: Pointer (positions_y) passed to setNBodyPositions2f must be a device pointer in CUDA mode!\n");
return;
}
//memory allocated by the device will not be the result may be hipMemoryTypeHost if UVA was used.
if (attributes.type != hipMemoryTypeDevice){
printf("Error: Pointer (positions_y) passed to setNBodyPositions2f must be a device pointer in CUDA mode!\n");
return;
}
}
PositionsX = positions_x;
PositionsY = positions_y;
if (Bodies != 0){
printf("Warning: You should use either setNBodyPositions2f or setNBodyPositions\n");
}
}
void setNBodyPositions(const nbody *bodies)
{
//check that the supplied pointer is a device pointer
if (M == CUDA){
hipPointerAttribute_t attributes;
//host allocated memory will cause an error
if (hipPointerGetAttributes(&attributes, bodies) == hipErrorInvalidValue){
hipGetLastError(); // clear out the previous API error
printf("Error: Pointer (bodies) passed to setNBodyPositions must be a device pointer in CUDA mode!\n");
return;
}
//memory allocated by the device will not be the result may be hipMemoryTypeHost if UVA was used.
if (attributes.type != hipMemoryTypeDevice){
printf("Error: Pointer (bodies) passed to setNBodyPositions must be a device pointer in CUDA mode!\n");
return;
}
}
Bodies = bodies;
if ((PositionsX != 0) || (PositionsY != 0)){
printf("Warning: You should use either setNBodyPositions2f or setNBodyPositions\n");
}
}
void setHistogramData(const float *densities)
{
setActivityMapData(densities);
}
void setActivityMapData(const float *activity)
{
//if CUDA check that the supplied pointer is a device pointer
if (M == CUDA){
hipPointerAttribute_t attributes;
//host allocated memory will cause an error
if (hipPointerGetAttributes(&attributes, activity) == hipErrorInvalidValue){
hipGetLastError(); // clear out the previous API error
printf("Error: Pointer passed to setActivityMap (or setHistogramData) must be a device pointer in CUDA mode!\n");
return;
}
//memory allocated by the device will not be the result may be hipMemoryTypeHost if UVA was used.
if (attributes.type != hipMemoryTypeDevice){
printf("Error: Pointer passed to setActivityMap (or setHistogramData) must be a device pointer in CUDA mode!\n");
return;
}
}
Densities = activity;
}
void startVisualisationLoop()
{
glutMainLoop();
}
//////////////////////////////// Source module functions ////////////////////////////////
void displayLoop(void)
{
unsigned int i;
float *dptr;
size_t num_bytes;
unsigned int blocks;
float t;
if (simulate_function == 0){
printf("Error: Simulate function has not been defined by calling initViewer(...)\n");
return;
}
//timing
if (M == CUDA)
hipDeviceSynchronize();
t = (float)clock();
if (prev_time)
elapsed += t - prev_time;
prev_time = t;
frames++;
if (frames == TIMING_FRAME_COUNT){
frames = 0;
elapsed /= TIMING_FRAME_COUNT;
sprintf(title, "Com4521 Assignment - NBody Visualiser (%f FPS)", 1000.0f /elapsed);
glutSetWindowTitle(title);
elapsed = 0;
}
//call the simulation function
simulate_function();
//Map data from user supplied pointers into TBO using CUDA
if (M == CUDA){
//NBODY: map buffer to device pointer so Kernel can populate it
glBindBuffer(GL_TEXTURE_BUFFER_EXT, tbo_nbody);
num_bytes = N * 3 * sizeof(float);
hipGraphicsMapResources(1, &cuda_nbody_vbo_resource, 0);
hipGraphicsResourceGetMappedPointer((void **)&dptr, &num_bytes, cuda_nbody_vbo_resource);
//kernel to map data into buffer
blocks = N / 256;
if (N % 256 != 0)
blocks++;
//two possible formats for users to supplier body data
if (Bodies != 0){
copyNBodyData << <blocks, 256 >> >(dptr, Bodies, N);
}
else if ((PositionsX != 0) && (PositionsY != 0)){
copyNBodyData2f << <blocks, 256 >> >(dptr, PositionsX, PositionsY, N);
}
hipGraphicsUnmapResources(1, &cuda_nbody_vbo_resource, 0);
checkCUDAError("Error copying NBody data from supplier device pointer\n");
glBindBuffer(GL_TEXTURE_BUFFER_EXT, 0);
//HIST: map buffer to device pointer so Kernel can populate it
glBindBuffer(GL_TEXTURE_BUFFER_EXT, tbo_nbody);
num_bytes = D*D * sizeof(float);
hipGraphicsMapResources(1, &cuda_hist_vbo_resource, 0);
hipGraphicsResourceGetMappedPointer((void **)&dptr, &num_bytes, cuda_hist_vbo_resource);
//kernel to map data into buffer
blocks = D*D / 256;
if ((D*D) % 256 != 0)
blocks++;
copyHistData << <blocks, 256 >> >(dptr, Densities, D);
hipGraphicsUnmapResources(1, &cuda_hist_vbo_resource, 0);
checkCUDAError("Error copying Activity Map data from supplier device pointer\n");
glBindBuffer(GL_TEXTURE_BUFFER_EXT, 0);
}
//Map data from user supplied pointers into TBO using CPU
else{
//map buffer to positions TBO and copy data to it from user supplied pointer
glBindBuffer(GL_TEXTURE_BUFFER_EXT, tbo_nbody);
dptr = (float*)glMapBuffer(GL_TEXTURE_BUFFER_EXT, GL_WRITE_ONLY); //tbo_nbody buffer
if (dptr == 0){
printf("Error: Unable to map nBody Texture Buffer Object\n");
return;
}
if (Bodies != 0){
for (i = 0; i < N; i++){
unsigned int index = i * 2;
dptr[index] = Bodies[i].x;
dptr[index + 1] = Bodies[i].y;
}
}
else if ((PositionsX != 0) && (PositionsY != 0)){
for (i = 0; i < N; i++){
unsigned int index = i * 2;
dptr[index] = PositionsX[i];
dptr[index + 1] = PositionsY[i];
}
}
glUnmapBuffer(GL_TEXTURE_BUFFER_EXT);
glBindBuffer(GL_TEXTURE_BUFFER_EXT, 0);
//map hist buffer to positions TBO and copy data to it from user supplied pointer
glBindBuffer(GL_TEXTURE_BUFFER_EXT, tbo_hist);
dptr = (float*)glMapBuffer(GL_TEXTURE_BUFFER_EXT, GL_WRITE_ONLY); //tbo_nbody buffer
if (dptr == 0){
printf("Error: Unable to map Histogram Texture Buffer Object\n");
return;
}
if (Densities != 0){
for (i = 0; i < D*D; i++){
dptr[i] = Densities[i];
}
}
glUnmapBuffer(GL_TEXTURE_BUFFER_EXT);
glBindBuffer(GL_TEXTURE_BUFFER_EXT, 0);
}
//render
render();
checkGLError();
}
void initHistShader()
{
//hist vertex shader
vs_hist_shader = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(vs_hist_shader, 1, &hist_vertexShaderSource, 0);
glCompileShader(vs_hist_shader);
// check for errors
GLint status;
glGetShaderiv(vs_hist_shader, GL_COMPILE_STATUS, &status);
if (status == GL_FALSE){
printf("ERROR: Histogram Shader Compilation Error\n");
char data[1024];
int len;
glGetShaderInfoLog(vs_hist_shader, 1024, &len, data);
printf("%s", data);
}
//program
vs_hist_program = glCreateProgram();
glAttachShader(vs_hist_program, vs_hist_shader);
glLinkProgram(vs_hist_program);
glGetProgramiv(vs_hist_program, GL_LINK_STATUS, &status);
if (status == GL_FALSE){
printf("ERROR: Histogram Shader Program Link Error\n");
}
glUseProgram(vs_hist_program);
// get shader variables
vs_hist_instance_index = glGetAttribLocation(vs_hist_program, "instance_index");
if (vs_hist_instance_index == (GLuint)-1){
printf("Warning: Histogram Shader program missing 'attribute in uint instance_index'\n");
}
glUseProgram(0);
//check for any errors
checkGLError();
}
void initNBodyShader()
{
//nbody vertex shader
vs_nbody_shader = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(vs_nbody_shader, 1,&nbody_vertexShaderSource, 0);
glCompileShader(vs_nbody_shader);
// check for errors
GLint status;
glGetShaderiv(vs_nbody_shader, GL_COMPILE_STATUS, &status);
if (status == GL_FALSE){
printf("ERROR: nbody Program Shader Compilation Error\n");
char data[1024];
int len;
glGetShaderInfoLog(vs_nbody_shader, 1024, &len, data);
printf("%s", data);
}
//program
vs_nbody_program = glCreateProgram();
glAttachShader(vs_nbody_program, vs_nbody_shader);
glLinkProgram(vs_nbody_program);
glGetProgramiv(vs_nbody_program, GL_LINK_STATUS, &status);
if (status == GL_FALSE){
printf("ERROR: NBody Shader Program Link Error\n");
}
glUseProgram(vs_nbody_program);
// get shader variables
vs_nbody_instance_index = glGetAttribLocation(vs_nbody_program, "instance_index");
if (vs_nbody_instance_index == (GLuint)-1){
printf("Warning: nbody Program Shader program missing 'attribute in uint instance_index'\n");
}
glUseProgram(0);
//check for any errors
checkGLError();
}
void initHistVertexData()
{
/* vertex array object */
glGenVertexArrays(1, &vao_hist); // Create our Vertex Array Object
glBindVertexArray(vao_hist); // Bind our Vertex Array Object so we can use it
/* create a vertex buffer */
// create buffer object (all vertex positions normalised between -0.5 and +0.5)
glGenBuffers(1, &vao_hist_vertices);
glBindBuffer(GL_ARRAY_BUFFER, vao_hist_vertices);
glBufferData(GL_ARRAY_BUFFER, D*D * 4 * 3 * sizeof(float), 0, GL_STATIC_DRAW);
float* verts = (float*)glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY);
float quad_size = 1.0f / (float)(D);
for (unsigned int x = 0; x < D; x++) {
for (unsigned int y = 0; y < D; y++) {
int offset = (x + (y * (D))) * 3 * 4;
float x_min = (float)x / (float)(D);
float y_min = (float)y / (float)(D);
//first vertex
verts[offset + 0] = x_min - 0.5f;
verts[offset + 1] = y_min - 0.5f;
verts[offset + 2] = 0.0f;
//second vertex
verts[offset + 3] = x_min - 0.5f;
verts[offset + 4] = y_min + quad_size - 0.5f;
verts[offset + 5] = 0.0f;
//third vertex
verts[offset + 6] = x_min + quad_size - 0.5f;
verts[offset + 7] = y_min + quad_size - 0.5f;
verts[offset + 8] = 0.0f;
//fourth vertex
verts[offset + 9] = x_min + quad_size - 0.5f;
verts[offset + 10] = y_min - 0.5f;
verts[offset + 11] = 0.0f;
}
}
glUnmapBuffer(GL_ARRAY_BUFFER);
glVertexAttribPointer((GLuint)0, 3, GL_FLOAT, GL_FALSE, 0, 0); // Set up our vertex attributes pointer
glEnableVertexAttribArray(0);
checkGLError();
// instance index buffer
glGenBuffers(1, &vao_hist_instance_ids);
glBindBuffer(GL_ARRAY_BUFFER, vao_hist_instance_ids);
glBufferData(GL_ARRAY_BUFFER, D*D * 4 * sizeof(unsigned int), 0, GL_STATIC_DRAW);
unsigned int* ids = (unsigned int*)glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY);
for (unsigned int x = 0; x < D; x++) {
for (unsigned int y = 0; y < D; y++) {
int index = (x + (y * (D)));
int offset = index * 4;
//four vertices (a quad) have the same instance index
ids[offset + 0] = index;
ids[offset + 1] = index;
ids[offset + 2] = index;
ids[offset + 3] = index;
}
}
//map instance
glVertexAttribIPointer((GLuint)vs_hist_instance_index, 1, GL_UNSIGNED_INT, 0, 0); // Set up instance id attributes pointer in shader
glEnableVertexAttribArray(vs_hist_instance_index);
glUnmapBuffer(GL_ARRAY_BUFFER);
//check for errors
checkGLError();
/* texture buffer object */
glGenBuffers(1, &tbo_hist);
glBindBuffer(GL_TEXTURE_BUFFER, tbo_hist);
glBufferData(GL_TEXTURE_BUFFER, D*D * 1 * sizeof(float), 0, GL_DYNAMIC_DRAW); // 1 float elements in a texture buffer object for histogram density
/* generate texture */
glGenTextures(1, &tex_hist);
glBindTexture(GL_TEXTURE_BUFFER, tex_hist);
glTexBuffer(GL_TEXTURE_BUFFER, GL_R32F, tbo_hist);
//create cuda gl resource to write cuda data to TBO
if (M == CUDA){
hipGraphicsGLRegisterBuffer(&cuda_hist_vbo_resource, tbo_hist, hipGraphicsMapFlagsWriteDiscard);
}
//unbind buffers
glBindBuffer(GL_TEXTURE_BUFFER, 0);
//unbind vao
glBindVertexArray(0); // Unbind our Vertex Array Object
checkGLError();
}
void initNBodyVertexData()
{
/* vertex array object */
glGenVertexArrays(1, &vao_nbody); // Create our Vertex Array Object
glBindVertexArray(vao_nbody); // Bind our Vertex Array Object so we can use it
/* create a vertex buffer */
// create buffer object (all vertex positions normalised between -0.5 and +0.5)
glGenBuffers(1, &vao_nbody_vertices);
glBindBuffer(GL_ARRAY_BUFFER, vao_nbody_vertices);
glBufferData(GL_ARRAY_BUFFER, N * 3 * sizeof(float), 0, GL_STATIC_DRAW);
float* verts = (float*)glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY);
for (unsigned int i = 0; i < N; i++) {
int offset = i*3;
//vertex point
verts[offset + 0] = -0.5f;
verts[offset + 1] = -0.5f;
verts[offset + 2] = 0.0f;
}
glUnmapBuffer(GL_ARRAY_BUFFER);
glVertexAttribPointer((GLuint)0, 3, GL_FLOAT, GL_FALSE, 0, 0); // Set up our vertex attributes pointer
glEnableVertexAttribArray(0);
checkGLError();
// instance index buffer
glGenBuffers(1, &vao_nbody_instance_ids);
glBindBuffer(GL_ARRAY_BUFFER, vao_nbody_instance_ids);
glBufferData(GL_ARRAY_BUFFER, N * 1 * sizeof(unsigned int), 0, GL_STATIC_DRAW);
unsigned int* ids = (unsigned int*)glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY);
for (unsigned int i = 0; i < N; i++) {
//single vertex as it is a point
ids[i] = i;
}
//map instance
glVertexAttribIPointer((GLuint)vs_nbody_instance_index, 1, GL_UNSIGNED_INT, 0, 0); // Set up instance id attributes pointer in shader
glEnableVertexAttribArray(vs_nbody_instance_index);
glUnmapBuffer(GL_ARRAY_BUFFER);
//check for errors
checkGLError();
/* texture buffer object */
glGenBuffers(1, &tbo_nbody);
glBindBuffer(GL_TEXTURE_BUFFER, tbo_nbody);
glBufferData(GL_TEXTURE_BUFFER, N * 2 * sizeof(float), 0, GL_DYNAMIC_DRAW); // 2 float elements in a texture buffer object for x and y position
/* generate texture */
glGenTextures(1, &tex_nbody);
glBindTexture(GL_TEXTURE_BUFFER, tex_nbody);
glTexBuffer(GL_TEXTURE_BUFFER, GL_RG32F, tbo_nbody);
//create cuda gl resource to write cuda data to TBO
if (M == CUDA){
hipGraphicsGLRegisterBuffer(&cuda_nbody_vbo_resource, tbo_nbody, hipGraphicsMapFlagsWriteDiscard);
}
//unbind buffers
glBindBuffer(GL_TEXTURE_BUFFER, 0);
//unbind vao
glBindVertexArray(0); // Unbind our Vertex Array Object
checkGLError();
}
void destroyViewer()
{
checkGLError();
//cleanup hist vao
glBindVertexArray(vao_hist);
glDeleteBuffers(1, &vao_hist_vertices);
vao_hist_vertices = 0;
glDeleteBuffers(1, &vao_hist_instance_ids);
vao_hist_instance_ids = 0;
glDeleteBuffers(1, &tbo_hist);
tbo_hist = 0;
glDeleteTextures(1, &tex_hist);
tex_hist = 0;
if (M == CUDA){
hipGraphicsUnregisterResource(cuda_hist_vbo_resource);
}
glDeleteVertexArrays(1, &vao_hist);
vao_hist = 0;
//cleanup nbody vao
glBindVertexArray(vao_nbody);
glDeleteBuffers(1, &vao_nbody_vertices);
vao_nbody_vertices = 0;
glDeleteBuffers(1, &vao_nbody_instance_ids);
vao_nbody_instance_ids = 0;
glDeleteBuffers(1, &tbo_nbody);
tbo_nbody = 0;
glDeleteTextures(1, &tex_nbody);
tex_nbody = 0;
if (M == CUDA){
hipGraphicsUnregisterResource(cuda_nbody_vbo_resource);
}
glDeleteVertexArrays(1, &vao_nbody);
vao_nbody = 0;
checkGLError();
}
void initGL()
{
int argc = 1;
char * argv[] = { "Com4521 Assignment - NBody Visualiser" };
//glut init
glutInit(&argc, argv);
//init window
glutInitDisplayMode(GLUT_RGB);
glutInitWindowSize(WINDOW_WIDTH, WINDOW_HEIGHT);
glutInitWindowPosition(100, 100);
glutCreateWindow(*argv);
// glew init (must be done after window creation for some odd reason)
glewInit();
if (!glewIsSupported("GL_VERSION_2_0 "))
{
fprintf(stderr, "ERROR: Support for necessary OpenGL extensions missing.");
fflush(stderr);
exit(0);
}
// register default callbacks
glutDisplayFunc(displayLoop);
glutKeyboardFunc(handleKeyboardDefault);
glutMotionFunc(handleMouseMotionDefault);
glutMouseFunc(handleMouseDefault);
glutSetOption(GLUT_ACTION_ON_WINDOW_CLOSE, GLUT_ACTION_CONTINUE_EXECUTION);
// default initialization
glClearColor(0.0, 0.0, 0.0, 1.0);
glDisable(GL_DEPTH_TEST);
// viewport
glViewport(0, 0, WINDOW_WIDTH, WINDOW_HEIGHT);
// projection
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(60.0, (GLfloat)WINDOW_WIDTH / (GLfloat)WINDOW_HEIGHT, 0.001, 10.0);
}
void render(void)
{
// set view matrix and prepare for rending
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
//transformations
glTranslatef(0.0, 0.0, translate_z);
glRotatef(rotate_x, 1.0, 0.0, 0.0);
glRotatef(rotate_z, 0.0, 0.0, 1.0);
//render the densisty field
if (display_denisty){
// attach the shader program to rendering pipeline to perform per vertex instance manipulation
glUseProgram(vs_hist_program);
// Bind our Vertex Array Object (contains vertex buffers object and vertex attribute array)
glBindVertexArray(vao_hist);
// Bind and activate texture with instance data (held with the TBO)
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_BUFFER_EXT, tex_hist);
// Draw the vertices with attached vertex attribute pointers
glDrawArrays(GL_QUADS, 0, 4 * D*D);
//unbind the vertex array object
glBindVertexArray(0);
// Disable the shader program and return to the fixed function pipeline
glUseProgram(0);
}
//render the n bodies
if (display_bodies){
// attach the shader program to rendering pipeline to perform per vertex instance manipulation
glUseProgram(vs_nbody_program);
// Bind our Vertex Array Object (contains vertex buffers object and vertex attribute array)
glBindVertexArray(vao_nbody);
// Bind and activate texture with instance data (held with the TBO)
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_BUFFER_EXT, tex_nbody);
// Draw the vertices with attached vertex attribute pointers
glDrawArrays(GL_POINTS, 0, 1 * N);
//unbind the vertex array object
glBindVertexArray(0);
// Disable the shader program and return to the fixed function pipeline
glUseProgram(0);
}
glutSwapBuffers();
glutPostRedisplay();
}
void checkGLError(){
int Error;
if ((Error = glGetError()) != GL_NO_ERROR)
{
const char* Message = (const char*)gluErrorString(Error);
fprintf(stderr, "OpenGL Error : %s\n", Message);
}
}
void handleKeyboardDefault(unsigned char key, int x, int y)
{
switch (key) {
case(27): case('q') : //escape key or q key
//return control to the users program to allow them to clean-up any allcoated memory etc.
glutLeaveMainLoop();
break;
case('b') : //b key
display_bodies = !display_bodies;
break;
case('d') : //d key
display_denisty = !display_denisty;
break;
}
}
void handleMouseDefault(int button, int state, int x, int y)
{
if (state == GLUT_DOWN)
{
mouse_buttons |= 1 << button;
}
else if (state == GLUT_UP)
{
mouse_buttons = 0;
}
mouse_old_x = x;
mouse_old_y = y;
}
void handleMouseMotionDefault(int x, int y)
{
float dx, dy;
dx = (float)(x - mouse_old_x);
dy = (float)(y - mouse_old_y);
if (mouse_buttons & 1)
{
rotate_x += dy * 0.2f;
rotate_z += dx * 0.2f;
}
else if (mouse_buttons & 4)
{
translate_z += dy * 0.01f;
}
mouse_old_x = x;
mouse_old_y = y;
}
void checkCUDAError(const char *msg)
{
hipError_t err = hipGetLastError();
if (hipSuccess != err)
{
fprintf(stderr, "CUDA ERROR: %s: %s.\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
|
9c81afdbfffa0d93a00eedf30f14784d89fef743.cu
|
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include <stdbool.h>
//include the header file
//Uncomment below if including into a *.c file rather than *.cu
//extern "C" {
#include "NBodyVisualiser.h"
//}
#include <cuda_runtime.h>
#include <cuda_gl_interop.h>
#define TIMING_FRAME_COUNT 20
//User supplied globals
static unsigned int N;
static unsigned int D;
static MODE M;
const float *PositionsX = 0;
const float *PositionsY = 0;
const nbody *Bodies = 0;
const float *Densities = 0;
void(*simulate_function)(void) = 0;
// instancing variables for histogram
GLuint vao_hist = 0;
GLuint vao_hist_vertices = 0;
GLuint tbo_hist = 0;
GLuint tex_hist = 0;
GLuint vao_hist_instance_ids = 0;
// instancing variables for nbody
GLuint vao_nbody = 0;
GLuint vao_nbody_vertices = 0;
GLuint tbo_nbody = 0;
GLuint tex_nbody = 0;
GLuint vao_nbody_instance_ids = 0;
// mouse controls
int mouse_old_x, mouse_old_y;
int mouse_buttons = 0;
float rotate_x = 0.0, rotate_z = 0.0;
float translate_z = -1.0;
// vertex shader handles
GLuint vs_hist_shader = 0;
GLuint vs_nbody_shader = 0;
GLuint vs_hist_program = 0;
GLuint vs_nbody_program = 0;
GLuint vs_hist_instance_index = 0;
GLuint vs_nbody_instance_index = 0;
//render options
bool display_bodies = true;
bool display_denisty = false;
//cuda graphics resources
struct cudaGraphicsResource *cuda_nbody_vbo_resource;
struct cudaGraphicsResource *cuda_hist_vbo_resource;
//timing
float elapsed = 0;
float prev_time = 0;
unsigned int frames;
char title[128];
// function prototypes
void displayLoop(void);
void initHistShader();
void initNBodyShader();
void initHistVertexData();
void initNBodyVertexData();
void initGL();
void destroyViewer();
void render(void);
void checkGLError();
void handleKeyboardDefault(unsigned char key, int x, int y);
void handleMouseDefault(int button, int state, int x, int y);
void handleMouseMotionDefault(int x, int y);
void checkCUDAError(const char *msg);
// Vertex shader source code
const char* hist_vertexShaderSource =
{
"#version 130 \n"
"#extension GL_EXT_gpu_shader4 : enable \n"
"uniform samplerBuffer instance_tex; \n"
"in uint instance_index; \n"
"void main() \n"
"{ \n"
" float instance_data = texelFetchBuffer(instance_tex, int(instance_index)).x; \n"
" vec4 position = vec4(gl_Vertex.x, gl_Vertex.y, 0.0f, 1.0f); \n"
" gl_FrontColor = vec4(instance_data, 0.0f, 0.0f, 0.0f); \n"
" gl_Position = gl_ModelViewProjectionMatrix * position; \n"
"} \n"
};
const char* nbody_vertexShaderSource =
{
"#version 130 \n"
"#extension GL_EXT_gpu_shader4 : enable \n"
"uniform samplerBuffer instance_tex; \n"
"in uint instance_index; \n"
"void main() \n"
"{ \n"
" vec2 instance_data = texelFetchBuffer(instance_tex, int(instance_index)).xy; \n"
" vec4 position = vec4(gl_Vertex.x+instance_data.x, \n"
" gl_Vertex.y+instance_data.y, \n"
" gl_Vertex.z, 1.0f); \n"
" gl_FrontColor = vec4(1.0f, 1.0f, 1.0f, 0.0f); \n"
" gl_Position = gl_ModelViewProjectionMatrix * position; \n"
"} \n"
};
//////////////////////////////// CUDA Kernels ////////////////////////////////
__global__ void copyNBodyData2f(float* buffer, const float *x, const float *y, unsigned int N)
{
unsigned int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < N){
//copy data to mapped buffer
float* ptr = &buffer[i * 2];
ptr[0] = x[i];
ptr[1] = y[i];
}
}
__global__ void copyNBodyData(float* buffer, const nbody* bodies, unsigned int N)
{
unsigned int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < N){
//copy data to mapped buffer
float* ptr = &buffer[i * 2];
ptr[0] = bodies[i].x;
ptr[1] = bodies[i].y;
}
}
__global__ void copyHistData(float* buffer, const float* densities, unsigned int D)
{
unsigned int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < D*D){
//copy data to mapped buffer
buffer[i] = densities[i];
}
}
//////////////////////////////// Header declared functions ////////////////////////////////
void initViewer(unsigned int n, unsigned int d, MODE m, void(*simulate)(void))
{
N = n;
D = d;
M = m;
simulate_function = simulate;
//check for UVA (not available in 32 bit host mode)
if (M == CUDA){
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
if (prop.unifiedAddressing != 1){
printf("Error: No UVA found. Are you trying to build you CUDA code in 32bit mode?\n");
}
}
//initialiser the open gl viewer and context
initGL();
//init out instance rendering and the data
initHistShader();
initNBodyShader();
initHistVertexData();
initNBodyVertexData();
}
void setNBodyPositions2f(const float *positions_x, const float *positions_y)
{
//check that the supplied pointers are device pointers
if (M == CUDA){
cudaPointerAttributes attributes;
//host allocated memory will cause an error
if (cudaPointerGetAttributes(&attributes, positions_x) == cudaErrorInvalidValue){
cudaGetLastError(); // clear out the previous API error
printf("Error: Pointer (positions_x) passed to setNBodyPositions2f must be a device pointer in CUDA mode!\n");
return;
}
//memory allocated by the device will not be the result may be cudaMemoryTypeHost if UVA was used.
if (attributes.type != cudaMemoryTypeDevice){
printf("Error: Pointer (positions_x) passed to setNBodyPositions2f must be a device pointer in CUDA mode!\n");
return;
}
//host allocated memory will cause an error
if (cudaPointerGetAttributes(&attributes, positions_y) == cudaErrorInvalidValue){
cudaGetLastError(); // clear out the previous API error
printf("Error: Pointer (positions_y) passed to setNBodyPositions2f must be a device pointer in CUDA mode!\n");
return;
}
//memory allocated by the device will not be the result may be cudaMemoryTypeHost if UVA was used.
if (attributes.type != cudaMemoryTypeDevice){
printf("Error: Pointer (positions_y) passed to setNBodyPositions2f must be a device pointer in CUDA mode!\n");
return;
}
}
PositionsX = positions_x;
PositionsY = positions_y;
if (Bodies != 0){
printf("Warning: You should use either setNBodyPositions2f or setNBodyPositions\n");
}
}
void setNBodyPositions(const nbody *bodies)
{
//check that the supplied pointer is a device pointer
if (M == CUDA){
cudaPointerAttributes attributes;
//host allocated memory will cause an error
if (cudaPointerGetAttributes(&attributes, bodies) == cudaErrorInvalidValue){
cudaGetLastError(); // clear out the previous API error
printf("Error: Pointer (bodies) passed to setNBodyPositions must be a device pointer in CUDA mode!\n");
return;
}
//memory allocated by the device will not be the result may be cudaMemoryTypeHost if UVA was used.
if (attributes.type != cudaMemoryTypeDevice){
printf("Error: Pointer (bodies) passed to setNBodyPositions must be a device pointer in CUDA mode!\n");
return;
}
}
Bodies = bodies;
if ((PositionsX != 0) || (PositionsY != 0)){
printf("Warning: You should use either setNBodyPositions2f or setNBodyPositions\n");
}
}
void setHistogramData(const float *densities)
{
setActivityMapData(densities);
}
void setActivityMapData(const float *activity)
{
//if CUDA check that the supplied pointer is a device pointer
if (M == CUDA){
cudaPointerAttributes attributes;
//host allocated memory will cause an error
if (cudaPointerGetAttributes(&attributes, activity) == cudaErrorInvalidValue){
cudaGetLastError(); // clear out the previous API error
printf("Error: Pointer passed to setActivityMap (or setHistogramData) must be a device pointer in CUDA mode!\n");
return;
}
//memory allocated by the device will not be the result may be cudaMemoryTypeHost if UVA was used.
if (attributes.type != cudaMemoryTypeDevice){
printf("Error: Pointer passed to setActivityMap (or setHistogramData) must be a device pointer in CUDA mode!\n");
return;
}
}
Densities = activity;
}
void startVisualisationLoop()
{
glutMainLoop();
}
//////////////////////////////// Source module functions ////////////////////////////////
void displayLoop(void)
{
unsigned int i;
float *dptr;
size_t num_bytes;
unsigned int blocks;
float t;
if (simulate_function == 0){
printf("Error: Simulate function has not been defined by calling initViewer(...)\n");
return;
}
//timing
if (M == CUDA)
cudaDeviceSynchronize();
t = (float)clock();
if (prev_time)
elapsed += t - prev_time;
prev_time = t;
frames++;
if (frames == TIMING_FRAME_COUNT){
frames = 0;
elapsed /= TIMING_FRAME_COUNT;
sprintf(title, "Com4521 Assignment - NBody Visualiser (%f FPS)", 1000.0f /elapsed);
glutSetWindowTitle(title);
elapsed = 0;
}
//call the simulation function
simulate_function();
//Map data from user supplied pointers into TBO using CUDA
if (M == CUDA){
//NBODY: map buffer to device pointer so Kernel can populate it
glBindBuffer(GL_TEXTURE_BUFFER_EXT, tbo_nbody);
num_bytes = N * 3 * sizeof(float);
cudaGraphicsMapResources(1, &cuda_nbody_vbo_resource, 0);
cudaGraphicsResourceGetMappedPointer((void **)&dptr, &num_bytes, cuda_nbody_vbo_resource);
//kernel to map data into buffer
blocks = N / 256;
if (N % 256 != 0)
blocks++;
//two possible formats for users to supplier body data
if (Bodies != 0){
copyNBodyData << <blocks, 256 >> >(dptr, Bodies, N);
}
else if ((PositionsX != 0) && (PositionsY != 0)){
copyNBodyData2f << <blocks, 256 >> >(dptr, PositionsX, PositionsY, N);
}
cudaGraphicsUnmapResources(1, &cuda_nbody_vbo_resource, 0);
checkCUDAError("Error copying NBody data from supplier device pointer\n");
glBindBuffer(GL_TEXTURE_BUFFER_EXT, 0);
//HIST: map buffer to device pointer so Kernel can populate it
glBindBuffer(GL_TEXTURE_BUFFER_EXT, tbo_nbody);
num_bytes = D*D * sizeof(float);
cudaGraphicsMapResources(1, &cuda_hist_vbo_resource, 0);
cudaGraphicsResourceGetMappedPointer((void **)&dptr, &num_bytes, cuda_hist_vbo_resource);
//kernel to map data into buffer
blocks = D*D / 256;
if ((D*D) % 256 != 0)
blocks++;
copyHistData << <blocks, 256 >> >(dptr, Densities, D);
cudaGraphicsUnmapResources(1, &cuda_hist_vbo_resource, 0);
checkCUDAError("Error copying Activity Map data from supplier device pointer\n");
glBindBuffer(GL_TEXTURE_BUFFER_EXT, 0);
}
//Map data from user supplied pointers into TBO using CPU
else{
//map buffer to positions TBO and copy data to it from user supplied pointer
glBindBuffer(GL_TEXTURE_BUFFER_EXT, tbo_nbody);
dptr = (float*)glMapBuffer(GL_TEXTURE_BUFFER_EXT, GL_WRITE_ONLY); //tbo_nbody buffer
if (dptr == 0){
printf("Error: Unable to map nBody Texture Buffer Object\n");
return;
}
if (Bodies != 0){
for (i = 0; i < N; i++){
unsigned int index = i * 2;
dptr[index] = Bodies[i].x;
dptr[index + 1] = Bodies[i].y;
}
}
else if ((PositionsX != 0) && (PositionsY != 0)){
for (i = 0; i < N; i++){
unsigned int index = i * 2;
dptr[index] = PositionsX[i];
dptr[index + 1] = PositionsY[i];
}
}
glUnmapBuffer(GL_TEXTURE_BUFFER_EXT);
glBindBuffer(GL_TEXTURE_BUFFER_EXT, 0);
//map hist buffer to positions TBO and copy data to it from user supplied pointer
glBindBuffer(GL_TEXTURE_BUFFER_EXT, tbo_hist);
dptr = (float*)glMapBuffer(GL_TEXTURE_BUFFER_EXT, GL_WRITE_ONLY); //tbo_nbody buffer
if (dptr == 0){
printf("Error: Unable to map Histogram Texture Buffer Object\n");
return;
}
if (Densities != 0){
for (i = 0; i < D*D; i++){
dptr[i] = Densities[i];
}
}
glUnmapBuffer(GL_TEXTURE_BUFFER_EXT);
glBindBuffer(GL_TEXTURE_BUFFER_EXT, 0);
}
//render
render();
checkGLError();
}
void initHistShader()
{
//hist vertex shader
vs_hist_shader = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(vs_hist_shader, 1, &hist_vertexShaderSource, 0);
glCompileShader(vs_hist_shader);
// check for errors
GLint status;
glGetShaderiv(vs_hist_shader, GL_COMPILE_STATUS, &status);
if (status == GL_FALSE){
printf("ERROR: Histogram Shader Compilation Error\n");
char data[1024];
int len;
glGetShaderInfoLog(vs_hist_shader, 1024, &len, data);
printf("%s", data);
}
//program
vs_hist_program = glCreateProgram();
glAttachShader(vs_hist_program, vs_hist_shader);
glLinkProgram(vs_hist_program);
glGetProgramiv(vs_hist_program, GL_LINK_STATUS, &status);
if (status == GL_FALSE){
printf("ERROR: Histogram Shader Program Link Error\n");
}
glUseProgram(vs_hist_program);
// get shader variables
vs_hist_instance_index = glGetAttribLocation(vs_hist_program, "instance_index");
if (vs_hist_instance_index == (GLuint)-1){
printf("Warning: Histogram Shader program missing 'attribute in uint instance_index'\n");
}
glUseProgram(0);
//check for any errors
checkGLError();
}
void initNBodyShader()
{
//nbody vertex shader
vs_nbody_shader = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(vs_nbody_shader, 1,&nbody_vertexShaderSource, 0);
glCompileShader(vs_nbody_shader);
// check for errors
GLint status;
glGetShaderiv(vs_nbody_shader, GL_COMPILE_STATUS, &status);
if (status == GL_FALSE){
printf("ERROR: nbody Program Shader Compilation Error\n");
char data[1024];
int len;
glGetShaderInfoLog(vs_nbody_shader, 1024, &len, data);
printf("%s", data);
}
//program
vs_nbody_program = glCreateProgram();
glAttachShader(vs_nbody_program, vs_nbody_shader);
glLinkProgram(vs_nbody_program);
glGetProgramiv(vs_nbody_program, GL_LINK_STATUS, &status);
if (status == GL_FALSE){
printf("ERROR: NBody Shader Program Link Error\n");
}
glUseProgram(vs_nbody_program);
// get shader variables
vs_nbody_instance_index = glGetAttribLocation(vs_nbody_program, "instance_index");
if (vs_nbody_instance_index == (GLuint)-1){
printf("Warning: nbody Program Shader program missing 'attribute in uint instance_index'\n");
}
glUseProgram(0);
//check for any errors
checkGLError();
}
void initHistVertexData()
{
/* vertex array object */
glGenVertexArrays(1, &vao_hist); // Create our Vertex Array Object
glBindVertexArray(vao_hist); // Bind our Vertex Array Object so we can use it
/* create a vertex buffer */
// create buffer object (all vertex positions normalised between -0.5 and +0.5)
glGenBuffers(1, &vao_hist_vertices);
glBindBuffer(GL_ARRAY_BUFFER, vao_hist_vertices);
glBufferData(GL_ARRAY_BUFFER, D*D * 4 * 3 * sizeof(float), 0, GL_STATIC_DRAW);
float* verts = (float*)glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY);
float quad_size = 1.0f / (float)(D);
for (unsigned int x = 0; x < D; x++) {
for (unsigned int y = 0; y < D; y++) {
int offset = (x + (y * (D))) * 3 * 4;
float x_min = (float)x / (float)(D);
float y_min = (float)y / (float)(D);
//first vertex
verts[offset + 0] = x_min - 0.5f;
verts[offset + 1] = y_min - 0.5f;
verts[offset + 2] = 0.0f;
//second vertex
verts[offset + 3] = x_min - 0.5f;
verts[offset + 4] = y_min + quad_size - 0.5f;
verts[offset + 5] = 0.0f;
//third vertex
verts[offset + 6] = x_min + quad_size - 0.5f;
verts[offset + 7] = y_min + quad_size - 0.5f;
verts[offset + 8] = 0.0f;
//fourth vertex
verts[offset + 9] = x_min + quad_size - 0.5f;
verts[offset + 10] = y_min - 0.5f;
verts[offset + 11] = 0.0f;
}
}
glUnmapBuffer(GL_ARRAY_BUFFER);
glVertexAttribPointer((GLuint)0, 3, GL_FLOAT, GL_FALSE, 0, 0); // Set up our vertex attributes pointer
glEnableVertexAttribArray(0);
checkGLError();
// instance index buffer
glGenBuffers(1, &vao_hist_instance_ids);
glBindBuffer(GL_ARRAY_BUFFER, vao_hist_instance_ids);
glBufferData(GL_ARRAY_BUFFER, D*D * 4 * sizeof(unsigned int), 0, GL_STATIC_DRAW);
unsigned int* ids = (unsigned int*)glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY);
for (unsigned int x = 0; x < D; x++) {
for (unsigned int y = 0; y < D; y++) {
int index = (x + (y * (D)));
int offset = index * 4;
//four vertices (a quad) have the same instance index
ids[offset + 0] = index;
ids[offset + 1] = index;
ids[offset + 2] = index;
ids[offset + 3] = index;
}
}
//map instance
glVertexAttribIPointer((GLuint)vs_hist_instance_index, 1, GL_UNSIGNED_INT, 0, 0); // Set up instance id attributes pointer in shader
glEnableVertexAttribArray(vs_hist_instance_index);
glUnmapBuffer(GL_ARRAY_BUFFER);
//check for errors
checkGLError();
/* texture buffer object */
glGenBuffers(1, &tbo_hist);
glBindBuffer(GL_TEXTURE_BUFFER, tbo_hist);
glBufferData(GL_TEXTURE_BUFFER, D*D * 1 * sizeof(float), 0, GL_DYNAMIC_DRAW); // 1 float elements in a texture buffer object for histogram density
/* generate texture */
glGenTextures(1, &tex_hist);
glBindTexture(GL_TEXTURE_BUFFER, tex_hist);
glTexBuffer(GL_TEXTURE_BUFFER, GL_R32F, tbo_hist);
//create cuda gl resource to write cuda data to TBO
if (M == CUDA){
cudaGraphicsGLRegisterBuffer(&cuda_hist_vbo_resource, tbo_hist, cudaGraphicsMapFlagsWriteDiscard);
}
//unbind buffers
glBindBuffer(GL_TEXTURE_BUFFER, 0);
//unbind vao
glBindVertexArray(0); // Unbind our Vertex Array Object
checkGLError();
}
void initNBodyVertexData()
{
/* vertex array object */
glGenVertexArrays(1, &vao_nbody); // Create our Vertex Array Object
glBindVertexArray(vao_nbody); // Bind our Vertex Array Object so we can use it
/* create a vertex buffer */
// create buffer object (all vertex positions normalised between -0.5 and +0.5)
glGenBuffers(1, &vao_nbody_vertices);
glBindBuffer(GL_ARRAY_BUFFER, vao_nbody_vertices);
glBufferData(GL_ARRAY_BUFFER, N * 3 * sizeof(float), 0, GL_STATIC_DRAW);
float* verts = (float*)glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY);
for (unsigned int i = 0; i < N; i++) {
int offset = i*3;
//vertex point
verts[offset + 0] = -0.5f;
verts[offset + 1] = -0.5f;
verts[offset + 2] = 0.0f;
}
glUnmapBuffer(GL_ARRAY_BUFFER);
glVertexAttribPointer((GLuint)0, 3, GL_FLOAT, GL_FALSE, 0, 0); // Set up our vertex attributes pointer
glEnableVertexAttribArray(0);
checkGLError();
// instance index buffer
glGenBuffers(1, &vao_nbody_instance_ids);
glBindBuffer(GL_ARRAY_BUFFER, vao_nbody_instance_ids);
glBufferData(GL_ARRAY_BUFFER, N * 1 * sizeof(unsigned int), 0, GL_STATIC_DRAW);
unsigned int* ids = (unsigned int*)glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY);
for (unsigned int i = 0; i < N; i++) {
//single vertex as it is a point
ids[i] = i;
}
//map instance
glVertexAttribIPointer((GLuint)vs_nbody_instance_index, 1, GL_UNSIGNED_INT, 0, 0); // Set up instance id attributes pointer in shader
glEnableVertexAttribArray(vs_nbody_instance_index);
glUnmapBuffer(GL_ARRAY_BUFFER);
//check for errors
checkGLError();
/* texture buffer object */
glGenBuffers(1, &tbo_nbody);
glBindBuffer(GL_TEXTURE_BUFFER, tbo_nbody);
glBufferData(GL_TEXTURE_BUFFER, N * 2 * sizeof(float), 0, GL_DYNAMIC_DRAW); // 2 float elements in a texture buffer object for x and y position
/* generate texture */
glGenTextures(1, &tex_nbody);
glBindTexture(GL_TEXTURE_BUFFER, tex_nbody);
glTexBuffer(GL_TEXTURE_BUFFER, GL_RG32F, tbo_nbody);
//create cuda gl resource to write cuda data to TBO
if (M == CUDA){
cudaGraphicsGLRegisterBuffer(&cuda_nbody_vbo_resource, tbo_nbody, cudaGraphicsMapFlagsWriteDiscard);
}
//unbind buffers
glBindBuffer(GL_TEXTURE_BUFFER, 0);
//unbind vao
glBindVertexArray(0); // Unbind our Vertex Array Object
checkGLError();
}
void destroyViewer()
{
checkGLError();
//cleanup hist vao
glBindVertexArray(vao_hist);
glDeleteBuffers(1, &vao_hist_vertices);
vao_hist_vertices = 0;
glDeleteBuffers(1, &vao_hist_instance_ids);
vao_hist_instance_ids = 0;
glDeleteBuffers(1, &tbo_hist);
tbo_hist = 0;
glDeleteTextures(1, &tex_hist);
tex_hist = 0;
if (M == CUDA){
cudaGraphicsUnregisterResource(cuda_hist_vbo_resource);
}
glDeleteVertexArrays(1, &vao_hist);
vao_hist = 0;
//cleanup nbody vao
glBindVertexArray(vao_nbody);
glDeleteBuffers(1, &vao_nbody_vertices);
vao_nbody_vertices = 0;
glDeleteBuffers(1, &vao_nbody_instance_ids);
vao_nbody_instance_ids = 0;
glDeleteBuffers(1, &tbo_nbody);
tbo_nbody = 0;
glDeleteTextures(1, &tex_nbody);
tex_nbody = 0;
if (M == CUDA){
cudaGraphicsUnregisterResource(cuda_nbody_vbo_resource);
}
glDeleteVertexArrays(1, &vao_nbody);
vao_nbody = 0;
checkGLError();
}
void initGL()
{
int argc = 1;
char * argv[] = { "Com4521 Assignment - NBody Visualiser" };
//glut init
glutInit(&argc, argv);
//init window
glutInitDisplayMode(GLUT_RGB);
glutInitWindowSize(WINDOW_WIDTH, WINDOW_HEIGHT);
glutInitWindowPosition(100, 100);
glutCreateWindow(*argv);
// glew init (must be done after window creation for some odd reason)
glewInit();
if (!glewIsSupported("GL_VERSION_2_0 "))
{
fprintf(stderr, "ERROR: Support for necessary OpenGL extensions missing.");
fflush(stderr);
exit(0);
}
// register default callbacks
glutDisplayFunc(displayLoop);
glutKeyboardFunc(handleKeyboardDefault);
glutMotionFunc(handleMouseMotionDefault);
glutMouseFunc(handleMouseDefault);
glutSetOption(GLUT_ACTION_ON_WINDOW_CLOSE, GLUT_ACTION_CONTINUE_EXECUTION);
// default initialization
glClearColor(0.0, 0.0, 0.0, 1.0);
glDisable(GL_DEPTH_TEST);
// viewport
glViewport(0, 0, WINDOW_WIDTH, WINDOW_HEIGHT);
// projection
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(60.0, (GLfloat)WINDOW_WIDTH / (GLfloat)WINDOW_HEIGHT, 0.001, 10.0);
}
void render(void)
{
// set view matrix and prepare for rending
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
//transformations
glTranslatef(0.0, 0.0, translate_z);
glRotatef(rotate_x, 1.0, 0.0, 0.0);
glRotatef(rotate_z, 0.0, 0.0, 1.0);
//render the densisty field
if (display_denisty){
// attach the shader program to rendering pipeline to perform per vertex instance manipulation
glUseProgram(vs_hist_program);
// Bind our Vertex Array Object (contains vertex buffers object and vertex attribute array)
glBindVertexArray(vao_hist);
// Bind and activate texture with instance data (held with the TBO)
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_BUFFER_EXT, tex_hist);
// Draw the vertices with attached vertex attribute pointers
glDrawArrays(GL_QUADS, 0, 4 * D*D);
//unbind the vertex array object
glBindVertexArray(0);
// Disable the shader program and return to the fixed function pipeline
glUseProgram(0);
}
//render the n bodies
if (display_bodies){
// attach the shader program to rendering pipeline to perform per vertex instance manipulation
glUseProgram(vs_nbody_program);
// Bind our Vertex Array Object (contains vertex buffers object and vertex attribute array)
glBindVertexArray(vao_nbody);
// Bind and activate texture with instance data (held with the TBO)
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_BUFFER_EXT, tex_nbody);
// Draw the vertices with attached vertex attribute pointers
glDrawArrays(GL_POINTS, 0, 1 * N);
//unbind the vertex array object
glBindVertexArray(0);
// Disable the shader program and return to the fixed function pipeline
glUseProgram(0);
}
glutSwapBuffers();
glutPostRedisplay();
}
void checkGLError(){
int Error;
if ((Error = glGetError()) != GL_NO_ERROR)
{
const char* Message = (const char*)gluErrorString(Error);
fprintf(stderr, "OpenGL Error : %s\n", Message);
}
}
void handleKeyboardDefault(unsigned char key, int x, int y)
{
switch (key) {
case(27): case('q') : //escape key or q key
//return control to the users program to allow them to clean-up any allcoated memory etc.
glutLeaveMainLoop();
break;
case('b') : //b key
display_bodies = !display_bodies;
break;
case('d') : //d key
display_denisty = !display_denisty;
break;
}
}
void handleMouseDefault(int button, int state, int x, int y)
{
if (state == GLUT_DOWN)
{
mouse_buttons |= 1 << button;
}
else if (state == GLUT_UP)
{
mouse_buttons = 0;
}
mouse_old_x = x;
mouse_old_y = y;
}
void handleMouseMotionDefault(int x, int y)
{
float dx, dy;
dx = (float)(x - mouse_old_x);
dy = (float)(y - mouse_old_y);
if (mouse_buttons & 1)
{
rotate_x += dy * 0.2f;
rotate_z += dx * 0.2f;
}
else if (mouse_buttons & 4)
{
translate_z += dy * 0.01f;
}
mouse_old_x = x;
mouse_old_y = y;
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err)
{
fprintf(stderr, "CUDA ERROR: %s: %s.\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
|
f0c9f5005a1eeeae971768f325fc910107c68fc1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void Brightness (uint *dst, int imageW, int imageH, float brightness)
{
const int ix = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const int iy = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if(ix < imageW && iy < imageH){
const float x = (float)ix + 0.5f;
const float y = (float)iy + 0.5f;
float4 fresult = tex2D(texImage, x, y);
float red = fresult.x;
float green = fresult.y;
float blue = fresult.z;
red = red * (1.f - brightness) + brightness;
green = green * (1.f - brightness) + brightness;
blue = blue * (1.f - brightness) + brightness;
dst[imageW * iy + ix] = make_color(red, green, blue, 1.f);
}
}
// if gamma is 0..1 , the dark intensities are stretched up
// if gamma is 1..5 , the high intensities are stretched down
extern "C" void brightnessWrapper (uint *dst, int imageW, int imageH, float brightness)
{
dim3 threads(BLOCKDIM_X, BLOCKDIM_Y);
dim3 grid(iDivUp(imageW, BLOCKDIM_X), iDivUp(imageH, BLOCKDIM_Y));
hipLaunchKernelGGL(( Brightness), dim3(grid), dim3(threads), 0, 0, dst, imageW, imageH, brightness);
}
|
f0c9f5005a1eeeae971768f325fc910107c68fc1.cu
|
__global__ void Brightness (uint *dst, int imageW, int imageH, float brightness)
{
const int ix = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const int iy = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if(ix < imageW && iy < imageH){
const float x = (float)ix + 0.5f;
const float y = (float)iy + 0.5f;
float4 fresult = tex2D(texImage, x, y);
float red = fresult.x;
float green = fresult.y;
float blue = fresult.z;
red = red * (1.f - brightness) + brightness;
green = green * (1.f - brightness) + brightness;
blue = blue * (1.f - brightness) + brightness;
dst[imageW * iy + ix] = make_color(red, green, blue, 1.f);
}
}
// if gamma is 0..1 , the dark intensities are stretched up
// if gamma is 1..5 , the high intensities are stretched down
extern "C" void brightnessWrapper (uint *dst, int imageW, int imageH, float brightness)
{
dim3 threads(BLOCKDIM_X, BLOCKDIM_Y);
dim3 grid(iDivUp(imageW, BLOCKDIM_X), iDivUp(imageH, BLOCKDIM_Y));
Brightness<<<grid, threads>>>(dst, imageW, imageH, brightness);
}
|
8dbf9cdc444628ec026fce52878817c919ab852b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef USE_CUDNN
#include <vector>
#include "caffe/layers/cudnn_cmp_conv_layer.hpp"
namespace caffe {
__global__ void sync_cmp_conv_groups() { }
template <typename Dtype>
__global__ void mask_weight( int n, const Dtype* weight, const int* mask, Dtype* out
) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = weight[index] * mask[index];
}
}
template <typename Dtype>
__global__ void quantize_weight_forward( int n, const int* mask, const int* indice, const Dtype* centroid, Dtype* out
) {
CUDA_KERNEL_LOOP(index, n) {
if (mask[index])
out[index] = centroid[indice[index]];
}
}
template <typename Dtype>
__global__ void quantize_weight_backward( int n, int class_num, const Dtype* diff, const int* mask, const int* indice, Dtype *tmpDiff, int *freq, Dtype* out
) {
CUDA_KERNEL_LOOP(index, n) {
tmpDiff[index] = 0;
freq[index] = 0;
}
CUDA_KERNEL_LOOP(index, n) {
if (mask[index])
{
tmpDiff[indice[index]] += diff[index];
freq[indice[index]]++;
}
}
CUDA_KERNEL_LOOP(index, n){
if (mask[index])
{
out[index] = tmpDiff[indice[index]]/freq[indice[index]] ;
}
}
}
template <typename Dtype>
void CuDNNCmpConvolutionLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
int count = this->blobs_[0]->count();
hipLaunchKernelGGL(( mask_weight<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, this->blobs_[0]->gpu_data(), this->masks_.gpu_data(), this->blobs_[0]->mutable_gpu_data());
if(this->quantize_term_)
{
hipLaunchKernelGGL(( quantize_weight_forward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, this->masks_.gpu_data(), this->indices_.gpu_data(), this->centroids_.gpu_data(), this->blobs_[0]->mutable_gpu_data());
}
const Dtype* weight = this->blobs_[0]->gpu_data();
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* top_data = top[i]->mutable_gpu_data();
// Forward through cuDNN in parallel over groups.
for (int g = 0; g < this->group_; g++) {
// Filters.
CUDNN_CHECK(cudnnConvolutionForward(handle_[g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
filter_desc_, weight + this->weight_offset_ * g,
conv_descs_[i],
fwd_algo_[i], workspace[g], workspace_fwd_sizes_[i],
cudnn::dataType<Dtype>::zero,
top_descs_[i], top_data + top_offset_ * g));
// Bias.
if (this->bias_term_) {
const Dtype* bias_data = this->blobs_[1]->gpu_data();
CUDNN_CHECK(cudnnAddTensor(handle_[g],
cudnn::dataType<Dtype>::one,
bias_desc_, bias_data + bias_offset_ * g,
cudnn::dataType<Dtype>::one,
top_descs_[i], top_data + top_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sync_cmp_conv_groups), dim3(1), dim3(1), 0, 0, );
}
}
template <typename Dtype>
void CuDNNCmpConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* weight = NULL;
Dtype* weight_diff = NULL;
int count = 0 ;
if (this->param_propagate_down_[0]) {
weight = this->blobs_[0]->gpu_data();
weight_diff = this->blobs_[0]->mutable_gpu_diff();
count = this->blobs_[0]->count();
}
Dtype* bias_diff = NULL;
if (this->bias_term_ && this->param_propagate_down_[1]) {
bias_diff = this->blobs_[1]->mutable_gpu_diff();
}
for (int i = 0; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
// Backward through cuDNN in parallel over groups and gradients.
for (int g = 0; g < this->group_; g++) {
// Gradient w.r.t. bias.
if (this->bias_term_ && this->param_propagate_down_[1]) {
CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0*this->group_ + g],
cudnn::dataType<Dtype>::one,
top_descs_[i], top_diff + top_offset_ * g,
cudnn::dataType<Dtype>::one,
bias_desc_, bias_diff + bias_offset_ * g));
}
// Gradient w.r.t. weights.
if (this->param_propagate_down_[0]) {
const Dtype* bottom_data = bottom[i]->gpu_data();
CUDNN_CHECK(cudnnConvolutionBackwardFilter(
handle_[1*this->group_ + g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
bwd_filter_algo_[i], workspace[1*this->group_ + g],
workspace_bwd_filter_sizes_[i],
cudnn::dataType<Dtype>::one,
filter_desc_, weight_diff + this->weight_offset_ * g));
hipLaunchKernelGGL(( mask_weight<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, this->blobs_[0]->gpu_diff(),this->masks_.gpu_data() ,this->blobs_[0]->mutable_gpu_diff());
if(this->quantize_term_)
{
hipLaunchKernelGGL(( quantize_weight_backward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, this->class_num_, this->blobs_[0]->gpu_diff(), this->masks_.gpu_data(), this->indices_.gpu_data(), this->tmpDiff_.mutable_gpu_data(), this->freq_.mutable_gpu_data(), this->blobs_[0]->mutable_gpu_diff());
}
}
// Gradient w.r.t. bottom data.
if (propagate_down[i]) {
if (weight == NULL) {
weight = this->blobs_[0]->gpu_data();
}
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
CUDNN_CHECK(cudnnConvolutionBackwardData(
handle_[2*this->group_ + g],
cudnn::dataType<Dtype>::one,
filter_desc_, weight + this->weight_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
bwd_data_algo_[i], workspace[2*this->group_ + g],
workspace_bwd_data_sizes_[i],
cudnn::dataType<Dtype>::zero,
bottom_descs_[i], bottom_diff + bottom_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sync_cmp_conv_groups), dim3(1), dim3(1), 0, 0, );
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CuDNNCmpConvolutionLayer);
} // namespace caffe
#endif
|
8dbf9cdc444628ec026fce52878817c919ab852b.cu
|
#ifdef USE_CUDNN
#include <vector>
#include "caffe/layers/cudnn_cmp_conv_layer.hpp"
namespace caffe {
__global__ void sync_cmp_conv_groups() { }
template <typename Dtype>
__global__ void mask_weight( int n, const Dtype* weight, const int* mask, Dtype* out
) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = weight[index] * mask[index];
}
}
template <typename Dtype>
__global__ void quantize_weight_forward( int n, const int* mask, const int* indice, const Dtype* centroid, Dtype* out
) {
CUDA_KERNEL_LOOP(index, n) {
if (mask[index])
out[index] = centroid[indice[index]];
}
}
template <typename Dtype>
__global__ void quantize_weight_backward( int n, int class_num, const Dtype* diff, const int* mask, const int* indice, Dtype *tmpDiff, int *freq, Dtype* out
) {
CUDA_KERNEL_LOOP(index, n) {
tmpDiff[index] = 0;
freq[index] = 0;
}
CUDA_KERNEL_LOOP(index, n) {
if (mask[index])
{
tmpDiff[indice[index]] += diff[index];
freq[indice[index]]++;
}
}
CUDA_KERNEL_LOOP(index, n){
if (mask[index])
{
out[index] = tmpDiff[indice[index]]/freq[indice[index]] ;
}
}
}
template <typename Dtype>
void CuDNNCmpConvolutionLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
int count = this->blobs_[0]->count();
mask_weight<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(count, this->blobs_[0]->gpu_data(), this->masks_.gpu_data(), this->blobs_[0]->mutable_gpu_data());
if(this->quantize_term_)
{
quantize_weight_forward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(count, this->masks_.gpu_data(), this->indices_.gpu_data(), this->centroids_.gpu_data(), this->blobs_[0]->mutable_gpu_data());
}
const Dtype* weight = this->blobs_[0]->gpu_data();
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* top_data = top[i]->mutable_gpu_data();
// Forward through cuDNN in parallel over groups.
for (int g = 0; g < this->group_; g++) {
// Filters.
CUDNN_CHECK(cudnnConvolutionForward(handle_[g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
filter_desc_, weight + this->weight_offset_ * g,
conv_descs_[i],
fwd_algo_[i], workspace[g], workspace_fwd_sizes_[i],
cudnn::dataType<Dtype>::zero,
top_descs_[i], top_data + top_offset_ * g));
// Bias.
if (this->bias_term_) {
const Dtype* bias_data = this->blobs_[1]->gpu_data();
CUDNN_CHECK(cudnnAddTensor(handle_[g],
cudnn::dataType<Dtype>::one,
bias_desc_, bias_data + bias_offset_ * g,
cudnn::dataType<Dtype>::one,
top_descs_[i], top_data + top_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
sync_cmp_conv_groups<<<1, 1>>>();
}
}
template <typename Dtype>
void CuDNNCmpConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* weight = NULL;
Dtype* weight_diff = NULL;
int count = 0 ;
if (this->param_propagate_down_[0]) {
weight = this->blobs_[0]->gpu_data();
weight_diff = this->blobs_[0]->mutable_gpu_diff();
count = this->blobs_[0]->count();
}
Dtype* bias_diff = NULL;
if (this->bias_term_ && this->param_propagate_down_[1]) {
bias_diff = this->blobs_[1]->mutable_gpu_diff();
}
for (int i = 0; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
// Backward through cuDNN in parallel over groups and gradients.
for (int g = 0; g < this->group_; g++) {
// Gradient w.r.t. bias.
if (this->bias_term_ && this->param_propagate_down_[1]) {
CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0*this->group_ + g],
cudnn::dataType<Dtype>::one,
top_descs_[i], top_diff + top_offset_ * g,
cudnn::dataType<Dtype>::one,
bias_desc_, bias_diff + bias_offset_ * g));
}
// Gradient w.r.t. weights.
if (this->param_propagate_down_[0]) {
const Dtype* bottom_data = bottom[i]->gpu_data();
CUDNN_CHECK(cudnnConvolutionBackwardFilter(
handle_[1*this->group_ + g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
bwd_filter_algo_[i], workspace[1*this->group_ + g],
workspace_bwd_filter_sizes_[i],
cudnn::dataType<Dtype>::one,
filter_desc_, weight_diff + this->weight_offset_ * g));
mask_weight<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(count, this->blobs_[0]->gpu_diff(),this->masks_.gpu_data() ,this->blobs_[0]->mutable_gpu_diff());
if(this->quantize_term_)
{
quantize_weight_backward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, this->class_num_, this->blobs_[0]->gpu_diff(), this->masks_.gpu_data(), this->indices_.gpu_data(), this->tmpDiff_.mutable_gpu_data(), this->freq_.mutable_gpu_data(), this->blobs_[0]->mutable_gpu_diff());
}
}
// Gradient w.r.t. bottom data.
if (propagate_down[i]) {
if (weight == NULL) {
weight = this->blobs_[0]->gpu_data();
}
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
CUDNN_CHECK(cudnnConvolutionBackwardData(
handle_[2*this->group_ + g],
cudnn::dataType<Dtype>::one,
filter_desc_, weight + this->weight_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
bwd_data_algo_[i], workspace[2*this->group_ + g],
workspace_bwd_data_sizes_[i],
cudnn::dataType<Dtype>::zero,
bottom_descs_[i], bottom_diff + bottom_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
sync_cmp_conv_groups<<<1, 1>>>();
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CuDNNCmpConvolutionLayer);
} // namespace caffe
#endif
|
7dbc5bc5632c3b4c7e828c09d3cd0fc9adc51614.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "backend/base/base_reduce_sum.h"
#include "kernels/gpu/operator_on_gpu.h"
#include <core/tensor_builder.h>
#include "backend/name.h"
#include "global/operator_factory.h"
#include <algorithm>
#include <math.h>
#include <numeric>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <device_launch_parameters.h>
#include "kernels/gpu/cuda_context.h"
#include "core/device_context.h"
#include "utils/ctxmgr_lite.h"
#include "kernels/gpu/cudax_fp16_math.h"
#include "global/fp16_operator_factory.h"
#include "kernels/gpu/gpu_kernel.h"
namespace ts {
namespace gpu {
class ReduceSum : public OperatorOnGPU<base::ReduceSum> {
public:
using self = ReduceSum;
using supper = OperatorOnGPU<base::ReduceSum>;
void reduce(const Tensor &x, int dim, Tensor &out) override;
};
}
}
namespace ts {
namespace gpu {
template <typename T>
static __global__ void reduce_sum_kernel(const T*input_data, T*output_data,
int input_count, int output_count,
int number, int channels, int width, int number_step) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index < input_count) {
auto i = index / number_step;
auto w = index % width;
auto local_input_data = input_data + index;
auto local_output_data = output_data + i * width + w;
atomicAdd(local_output_data, *local_input_data);
}
}
template <typename T>
static __global__ void reduce_sum_kernel_no_atomic(const T* input_data, T* output_data,
int channels, int number, int width) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
int size = number * width;
for (; index < size; index += blockDim.x * gridDim.x)
{
int n = index / width;
int s = index % width;
T sum = T(0.f);
for (int k = 0; k < channels; k++)
{
sum += input_data[(n * channels + k) * width + s];
}
output_data[index] = sum;
}
}
template <typename T>
static void local_run_atomic_kernel(const T*input_data, T*output_data,
int input_count, int output_count,
int number, int channels, int width, int number_step, hipStream_t stream) {
dim3 blockSize(CUDA_THREAD_NUM);
dim3 gridSize(CUDA_BLOCK(input_count, blockSize.x));
hipMemsetAsync(output_data, 0, output_count * sizeof(T), stream);
RUN_KERNEL_STREAM(reduce_sum_kernel<T>, gridSize, blockSize, 0, stream,
input_data, output_data,
input_count, output_count, number, channels, width,
channels * width);
}
template <typename T>
static void local_run_kernel(const T*input_data, T*output_data,
int input_count, int output_count,
int number, int channels, int width, int number_step, hipStream_t stream) {
dim3 blockSize(CUDA_THREAD_NUM);
dim3 gridSize(CUDA_BLOCK(output_count, blockSize.x));
RUN_KERNEL_STREAM(reduce_sum_kernel_no_atomic<T>, gridSize, blockSize, 0, stream,
input_data, output_data,
channels, number, width);
}
#ifdef TS_USE_CUDA_FP16
template <>
__global__ void reduce_sum_kernel_no_atomic<half>(const half* input_data, half* output_data,
int channels, int number, int width) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
int size = number * width;
for (; index < size; index += blockDim.x * gridDim.x)
{
int n = index / width;
int s = index % width;
half sum = __float2half(0.f);
for (int k = 0; k < channels; k++)
{
sum = sum + input_data[(n * channels + k) * width + s];
}
output_data[index] = sum;
}
}
#endif
template<typename T>
void gpu_reduce_sum_compute_run(const Tensor &x, int dim, Tensor &out) {
auto &size = x.sizes();
auto number = std::accumulate(size.begin(), size.begin() + dim, 1, std::multiplies<int32_t>());
auto channels = size[dim];
auto width = std::accumulate(size.begin() + dim + 1, size.end(), 1, std::multiplies<int32_t>());
auto input_data = x.data<T>();
auto output_data = out.data<T>();
auto input_count = x.count();
auto output_count = out.count();
auto &context = ctx::ref<DeviceContext>();
CUDAContextHandle *handle = reinterpret_cast<CUDAContextHandle *>(context.handle);
auto cuda_stream = handle->stream();
local_run_kernel<T>(input_data, output_data,
input_count, output_count, number, channels, width,
channels * width, cuda_stream);
}
void ReduceSum::reduce(const Tensor &x, int dim, Tensor &out) {
// Notice: the all tensor' memory device are CPU, as given in running_memory_device
DTYPE dtype = out.dtype();
switch (dtype) {
#define DECLARE_COMPUTE_RUN(DTYPE, TYPE) \
case DTYPE: { gpu_reduce_sum_compute_run<TYPE>(x, dim, out); break; }
DECLARE_COMPUTE_RUN(INT8, int8_t);
DECLARE_COMPUTE_RUN(UINT8, uint8_t);
DECLARE_COMPUTE_RUN(INT16, int16_t);
DECLARE_COMPUTE_RUN(UINT16, uint16_t);
DECLARE_COMPUTE_RUN(INT32, int32_t);
DECLARE_COMPUTE_RUN(UINT32, uint32_t);
DECLARE_COMPUTE_RUN(INT64, int64_t);
DECLARE_COMPUTE_RUN(UINT64, uint64_t);
#ifdef TS_USE_CUDA_FP16
DECLARE_COMPUTE_RUN(FLOAT16, half);
#endif
DECLARE_COMPUTE_RUN(FLOAT32, float);
DECLARE_COMPUTE_RUN(FLOAT64, double);
#undef DECLARE_COMPUTE_RUN
default: {
TS_LOG_ERROR << this->op() << " not support data type(" << dtype << "): " << type_str(dtype) << eject;
break;
}
}
}
}
}
using namespace ts;
using namespace gpu;
TS_REGISTER_OPERATOR(ReduceSum, ts::GPU, name::layer::reduce_sum())
#ifdef TS_USE_CUDA_FP16
TS_REGISTER_FP16_OPERATOR(ReduceSum, ts::GPU, name::layer::reduce_sum())
#endif
|
7dbc5bc5632c3b4c7e828c09d3cd0fc9adc51614.cu
|
#include "backend/base/base_reduce_sum.h"
#include "kernels/gpu/operator_on_gpu.h"
#include <core/tensor_builder.h>
#include "backend/name.h"
#include "global/operator_factory.h"
#include <algorithm>
#include <math.h>
#include <numeric>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <device_launch_parameters.h>
#include "kernels/gpu/cuda_context.h"
#include "core/device_context.h"
#include "utils/ctxmgr_lite.h"
#include "kernels/gpu/cudax_fp16_math.h"
#include "global/fp16_operator_factory.h"
#include "kernels/gpu/gpu_kernel.h"
namespace ts {
namespace gpu {
class ReduceSum : public OperatorOnGPU<base::ReduceSum> {
public:
using self = ReduceSum;
using supper = OperatorOnGPU<base::ReduceSum>;
void reduce(const Tensor &x, int dim, Tensor &out) override;
};
}
}
namespace ts {
namespace gpu {
template <typename T>
static __global__ void reduce_sum_kernel(const T*input_data, T*output_data,
int input_count, int output_count,
int number, int channels, int width, int number_step) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index < input_count) {
auto i = index / number_step;
auto w = index % width;
auto local_input_data = input_data + index;
auto local_output_data = output_data + i * width + w;
atomicAdd(local_output_data, *local_input_data);
}
}
template <typename T>
static __global__ void reduce_sum_kernel_no_atomic(const T* input_data, T* output_data,
int channels, int number, int width) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
int size = number * width;
for (; index < size; index += blockDim.x * gridDim.x)
{
int n = index / width;
int s = index % width;
T sum = T(0.f);
for (int k = 0; k < channels; k++)
{
sum += input_data[(n * channels + k) * width + s];
}
output_data[index] = sum;
}
}
template <typename T>
static void local_run_atomic_kernel(const T*input_data, T*output_data,
int input_count, int output_count,
int number, int channels, int width, int number_step, cudaStream_t stream) {
dim3 blockSize(CUDA_THREAD_NUM);
dim3 gridSize(CUDA_BLOCK(input_count, blockSize.x));
cudaMemsetAsync(output_data, 0, output_count * sizeof(T), stream);
RUN_KERNEL_STREAM(reduce_sum_kernel<T>, gridSize, blockSize, 0, stream,
input_data, output_data,
input_count, output_count, number, channels, width,
channels * width);
}
template <typename T>
static void local_run_kernel(const T*input_data, T*output_data,
int input_count, int output_count,
int number, int channels, int width, int number_step, cudaStream_t stream) {
dim3 blockSize(CUDA_THREAD_NUM);
dim3 gridSize(CUDA_BLOCK(output_count, blockSize.x));
RUN_KERNEL_STREAM(reduce_sum_kernel_no_atomic<T>, gridSize, blockSize, 0, stream,
input_data, output_data,
channels, number, width);
}
#ifdef TS_USE_CUDA_FP16
template <>
__global__ void reduce_sum_kernel_no_atomic<half>(const half* input_data, half* output_data,
int channels, int number, int width) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
int size = number * width;
for (; index < size; index += blockDim.x * gridDim.x)
{
int n = index / width;
int s = index % width;
half sum = __float2half(0.f);
for (int k = 0; k < channels; k++)
{
sum = sum + input_data[(n * channels + k) * width + s];
}
output_data[index] = sum;
}
}
#endif
template<typename T>
void gpu_reduce_sum_compute_run(const Tensor &x, int dim, Tensor &out) {
auto &size = x.sizes();
auto number = std::accumulate(size.begin(), size.begin() + dim, 1, std::multiplies<int32_t>());
auto channels = size[dim];
auto width = std::accumulate(size.begin() + dim + 1, size.end(), 1, std::multiplies<int32_t>());
auto input_data = x.data<T>();
auto output_data = out.data<T>();
auto input_count = x.count();
auto output_count = out.count();
auto &context = ctx::ref<DeviceContext>();
CUDAContextHandle *handle = reinterpret_cast<CUDAContextHandle *>(context.handle);
auto cuda_stream = handle->stream();
local_run_kernel<T>(input_data, output_data,
input_count, output_count, number, channels, width,
channels * width, cuda_stream);
}
void ReduceSum::reduce(const Tensor &x, int dim, Tensor &out) {
// Notice: the all tensor' memory device are CPU, as given in running_memory_device
DTYPE dtype = out.dtype();
switch (dtype) {
#define DECLARE_COMPUTE_RUN(DTYPE, TYPE) \
case DTYPE: { gpu_reduce_sum_compute_run<TYPE>(x, dim, out); break; }
DECLARE_COMPUTE_RUN(INT8, int8_t);
DECLARE_COMPUTE_RUN(UINT8, uint8_t);
DECLARE_COMPUTE_RUN(INT16, int16_t);
DECLARE_COMPUTE_RUN(UINT16, uint16_t);
DECLARE_COMPUTE_RUN(INT32, int32_t);
DECLARE_COMPUTE_RUN(UINT32, uint32_t);
DECLARE_COMPUTE_RUN(INT64, int64_t);
DECLARE_COMPUTE_RUN(UINT64, uint64_t);
#ifdef TS_USE_CUDA_FP16
DECLARE_COMPUTE_RUN(FLOAT16, half);
#endif
DECLARE_COMPUTE_RUN(FLOAT32, float);
DECLARE_COMPUTE_RUN(FLOAT64, double);
#undef DECLARE_COMPUTE_RUN
default: {
TS_LOG_ERROR << this->op() << " not support data type(" << dtype << "): " << type_str(dtype) << eject;
break;
}
}
}
}
}
using namespace ts;
using namespace gpu;
TS_REGISTER_OPERATOR(ReduceSum, ts::GPU, name::layer::reduce_sum())
#ifdef TS_USE_CUDA_FP16
TS_REGISTER_FP16_OPERATOR(ReduceSum, ts::GPU, name::layer::reduce_sum())
#endif
|
d2b6395677e6c3c794075d2d2671dd5e995edef9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "disp_absolute_residual_scalable_GPU.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_abs_res = NULL;
hipMalloc(&d_abs_res, XSIZE*YSIZE);
const float *d_disparity_compact = NULL;
hipMalloc(&d_disparity_compact, XSIZE*YSIZE);
const float4 *d_Zbuffer_normals_compact = NULL;
hipMalloc(&d_Zbuffer_normals_compact, XSIZE*YSIZE);
const int *d_ind_disparity_Zbuffer = NULL;
hipMalloc(&d_ind_disparity_Zbuffer, XSIZE*YSIZE);
const unsigned int *d_valid_disparity_Zbuffer = NULL;
hipMalloc(&d_valid_disparity_Zbuffer, XSIZE*YSIZE);
float fx = 1;
float fy = 1;
float ox = 1;
float oy = 1;
float b = 2;
int n_cols = 1;
int n_valid_disparity_Zbuffer = 1;
const int *d_offset_ind = NULL;
hipMalloc(&d_offset_ind, XSIZE*YSIZE);
const int *d_segment_translation_table = NULL;
hipMalloc(&d_segment_translation_table, XSIZE*YSIZE);
float w_disp = 1;
const float *d_dTR = NULL;
hipMalloc(&d_dTR, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
disp_absolute_residual_scalable_GPU), dim3(gridBlock),dim3(threadBlock), 0, 0, d_abs_res,d_disparity_compact,d_Zbuffer_normals_compact,d_ind_disparity_Zbuffer,d_valid_disparity_Zbuffer,fx,fy,ox,oy,b,n_cols,n_valid_disparity_Zbuffer,d_offset_ind,d_segment_translation_table,w_disp,d_dTR);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
disp_absolute_residual_scalable_GPU), dim3(gridBlock),dim3(threadBlock), 0, 0, d_abs_res,d_disparity_compact,d_Zbuffer_normals_compact,d_ind_disparity_Zbuffer,d_valid_disparity_Zbuffer,fx,fy,ox,oy,b,n_cols,n_valid_disparity_Zbuffer,d_offset_ind,d_segment_translation_table,w_disp,d_dTR);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
disp_absolute_residual_scalable_GPU), dim3(gridBlock),dim3(threadBlock), 0, 0, d_abs_res,d_disparity_compact,d_Zbuffer_normals_compact,d_ind_disparity_Zbuffer,d_valid_disparity_Zbuffer,fx,fy,ox,oy,b,n_cols,n_valid_disparity_Zbuffer,d_offset_ind,d_segment_translation_table,w_disp,d_dTR);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
d2b6395677e6c3c794075d2d2671dd5e995edef9.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "disp_absolute_residual_scalable_GPU.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_abs_res = NULL;
cudaMalloc(&d_abs_res, XSIZE*YSIZE);
const float *d_disparity_compact = NULL;
cudaMalloc(&d_disparity_compact, XSIZE*YSIZE);
const float4 *d_Zbuffer_normals_compact = NULL;
cudaMalloc(&d_Zbuffer_normals_compact, XSIZE*YSIZE);
const int *d_ind_disparity_Zbuffer = NULL;
cudaMalloc(&d_ind_disparity_Zbuffer, XSIZE*YSIZE);
const unsigned int *d_valid_disparity_Zbuffer = NULL;
cudaMalloc(&d_valid_disparity_Zbuffer, XSIZE*YSIZE);
float fx = 1;
float fy = 1;
float ox = 1;
float oy = 1;
float b = 2;
int n_cols = 1;
int n_valid_disparity_Zbuffer = 1;
const int *d_offset_ind = NULL;
cudaMalloc(&d_offset_ind, XSIZE*YSIZE);
const int *d_segment_translation_table = NULL;
cudaMalloc(&d_segment_translation_table, XSIZE*YSIZE);
float w_disp = 1;
const float *d_dTR = NULL;
cudaMalloc(&d_dTR, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
disp_absolute_residual_scalable_GPU<<<gridBlock,threadBlock>>>(d_abs_res,d_disparity_compact,d_Zbuffer_normals_compact,d_ind_disparity_Zbuffer,d_valid_disparity_Zbuffer,fx,fy,ox,oy,b,n_cols,n_valid_disparity_Zbuffer,d_offset_ind,d_segment_translation_table,w_disp,d_dTR);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
disp_absolute_residual_scalable_GPU<<<gridBlock,threadBlock>>>(d_abs_res,d_disparity_compact,d_Zbuffer_normals_compact,d_ind_disparity_Zbuffer,d_valid_disparity_Zbuffer,fx,fy,ox,oy,b,n_cols,n_valid_disparity_Zbuffer,d_offset_ind,d_segment_translation_table,w_disp,d_dTR);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
disp_absolute_residual_scalable_GPU<<<gridBlock,threadBlock>>>(d_abs_res,d_disparity_compact,d_Zbuffer_normals_compact,d_ind_disparity_Zbuffer,d_valid_disparity_Zbuffer,fx,fy,ox,oy,b,n_cols,n_valid_disparity_Zbuffer,d_offset_ind,d_segment_translation_table,w_disp,d_dTR);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
a21d71b23a12c71ddb2e4db6b7f4bf16c3f51fc2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "../benchmark_common.h"
#define BLOCK_SIZE 16
#define STR_SIZE 256
/* maximum power density possible (say 300W for a 10mm x 10mm chip) */
#define MAX_PD (3.0e6)
/* required precision in degrees */
#define PRECISION 0.001
#define SPEC_HEAT_SI 1.75e6
#define K_SI 100
/* capacitance fitting factor */
#define FACTOR_CHIP 0.5
/* chip parameters */
float t_chip = 0.0005;
float chip_height = 0.016;
float chip_width = 0.016;
/* ambient temperature, assuming no package at all */
float amb_temp = 80.0;
void run(hipStream_t stream_app, pthread_mutex_t* mutexapp, bool flag);
/* define timer macros */
#define pin_stats_reset() startCycle()
#define pin_stats_pause(cycles) stopCycle(cycles)
#define pin_stats_dump(cycles) printf("timer: %Lu\n", cycles)
void fatal(char* s) {
fprintf(stderr, "error: %s\n", s);
}
void writeoutput(float* vect, int grid_rows, int grid_cols, char* file) {
int i, j, index = 0;
FILE* fp;
char str[STR_SIZE];
if ((fp = fopen(file, "w")) == 0)
printf("The file was not opened\n");
for (i = 0; i < grid_rows; i++)
for (j = 0; j < grid_cols; j++) {
sprintf(str, "%d\t%g\n", index, vect[i * grid_cols + j]);
fputs(str, fp);
index++;
}
fclose(fp);
}
void readinput(float* vect, int grid_rows, int grid_cols, char* file) {
int i, j;
FILE* fp;
char str[STR_SIZE];
float val;
if ((fp = fopen(file, "r")) == 0)
printf("The file was not opened\n");
for (i = 0; i <= grid_rows - 1; i++)
for (j = 0; j <= grid_cols - 1; j++) {
fgets(str, STR_SIZE, fp);
if (feof(fp))
fatal("not enough lines in file");
// if ((sscanf(str, "%d%f", &index, &val) != 2) || (index !=
// ((i-1)*(grid_cols-2)+j-1)))
if ((sscanf(str, "%f", &val) != 1))
fatal("invalid file format");
vect[i * grid_cols + j] = val;
}
fclose(fp);
}
#define IN_RANGE(x, min, max) ((x) >= (min) && (x) <= (max))
#define CLAMP_RANGE(x, min, max) x = (x < (min)) ? min : ((x > (max)) ? max : x)
#define MIN_hotspot(a, b) ((a) <= (b) ? (a) : (b))
__global__ void calculate_temp(int iteration, // number of iteration
float* power, // power input
float* temp_src, // temperature input/output
float* temp_dst, // temperature input/output
int grid_cols, // Col of grid
int grid_rows, // Row of grid
int border_cols, // border offset
int border_rows, // border offset
float Cap, // Capacitance
float Rx,
float Ry,
float Rz,
float step,
float time_elapsed) {
__shared__ float temp_on_cuda[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float power_on_cuda[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float temp_t[BLOCK_SIZE]
[BLOCK_SIZE]; // saving temparary temperature result
float amb_temp = 80.0;
float step_div_Cap;
float Rx_1, Ry_1, Rz_1;
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
step_div_Cap = step / Cap;
Rx_1 = 1 / Rx;
Ry_1 = 1 / Ry;
Rz_1 = 1 / Rz;
// each block finally computes result for a small block
// after N iterations.
// it is the non-overlapping small blocks that cover
// all the input data
// calculate the small block size
int small_block_rows = BLOCK_SIZE - iteration * 2; // EXPAND_RATE
int small_block_cols = BLOCK_SIZE - iteration * 2; // EXPAND_RATE
// calculate the boundary for the block according to
// the boundary of its small block
int blkY = small_block_rows * by - border_rows;
int blkX = small_block_cols * bx - border_cols;
int blkYmax = blkY + BLOCK_SIZE - 1;
int blkXmax = blkX + BLOCK_SIZE - 1;
// calculate the global thread coordination
int yidx = blkY + ty;
int xidx = blkX + tx;
// load data if it is within the valid input range
int loadYidx = yidx, loadXidx = xidx;
int index = grid_rows * loadYidx + loadXidx;
if (IN_RANGE(loadYidx, 0, grid_rows - 1) &&
IN_RANGE(loadXidx, 0, grid_cols - 1)) {
temp_on_cuda[ty][tx] = temp_src[index]; // Load the temperature data from
// global memory to shared memory
power_on_cuda[ty][tx] = power[index]; // Load the power data from global
// memory to shared memory
}
__syncthreads();
// effective range within this block that falls within
// the valid range of the input data
// used to rule out computation outside the boundary.
int validYmin = (blkY < 0) ? -blkY : 0;
int validYmax = (blkYmax > grid_rows - 1)
? BLOCK_SIZE - 1 - (blkYmax - grid_rows + 1)
: BLOCK_SIZE - 1;
int validXmin = (blkX < 0) ? -blkX : 0;
int validXmax = (blkXmax > grid_cols - 1)
? BLOCK_SIZE - 1 - (blkXmax - grid_cols + 1)
: BLOCK_SIZE - 1;
int N = ty - 1;
int S = ty + 1;
int W = tx - 1;
int E = tx + 1;
N = (N < validYmin) ? validYmin : N;
S = (S > validYmax) ? validYmax : S;
W = (W < validXmin) ? validXmin : W;
E = (E > validXmax) ? validXmax : E;
bool computed;
for (int i = 0; i < iteration; i++) {
computed = false;
if (IN_RANGE(tx, i + 1, BLOCK_SIZE - i - 2) &&
IN_RANGE(ty, i + 1, BLOCK_SIZE - i - 2) &&
IN_RANGE(tx, validXmin, validXmax) &&
IN_RANGE(ty, validYmin, validYmax)) {
computed = true;
temp_t[ty][tx] =
temp_on_cuda[ty][tx] +
step_div_Cap * (power_on_cuda[ty][tx] +
(temp_on_cuda[S][tx] + temp_on_cuda[N][tx] -
2.0 * temp_on_cuda[ty][tx]) *
Ry_1 +
(temp_on_cuda[ty][E] + temp_on_cuda[ty][W] -
2.0 * temp_on_cuda[ty][tx]) *
Rx_1 +
(amb_temp - temp_on_cuda[ty][tx]) * Rz_1);
}
__syncthreads();
if (i == iteration - 1)
break;
if (computed) // Assign the computation range
temp_on_cuda[ty][tx] = temp_t[ty][tx];
__syncthreads();
}
// update the global memory
// after the last iteration, only threads coordinated within the
// small block perform the calculation and switch on ``computed''
if (computed) {
temp_dst[index] = temp_t[ty][tx];
}
}
/*
compute N time steps
*/
int compute_tran_temp(float* MatrixPower,
float* MatrixTemp[2],
int col,
int row,
int total_iterations,
int num_iterations,
int blockCols,
int blockRows,
int borderCols,
int borderRows,
hipStream_t stream_app,
pthread_mutex_t* mutexapp,
bool flag) {
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(blockCols, blockRows);
float grid_height = chip_height / row;
float grid_width = chip_width / col;
float Cap = FACTOR_CHIP * SPEC_HEAT_SI * t_chip * grid_width * grid_height;
float Rx = grid_width / (2.0 * K_SI * t_chip * grid_height);
float Ry = grid_height / (2.0 * K_SI * t_chip * grid_width);
float Rz = t_chip / (K_SI * grid_height * grid_width);
float max_slope = MAX_PD / (FACTOR_CHIP * t_chip * SPEC_HEAT_SI);
float step = PRECISION / max_slope;
float t;
float time_elapsed;
time_elapsed = 0.001;
int src = 1, dst = 0;
printf("I am in for loop \n");
for (t = 0; t < total_iterations; t += num_iterations) {
int temp = src;
src = dst;
dst = temp;
hipLaunchKernelGGL(( calculate_temp), dim3(dimGrid), dim3(dimBlock), 0, stream_app,
MIN_hotspot(num_iterations, total_iterations - t), MatrixPower,
MatrixTemp[src], MatrixTemp[dst], col, row, borderCols, borderRows, Cap,
Rx, Ry, Rz, step, time_elapsed);
printf("hello from loop\n");
pthread_mutex_unlock(mutexapp);
if (flag)
cutilSafeCall(hipStreamSynchronize(stream_app));
pthread_mutex_lock(mutexapp);
}
pthread_mutex_unlock(mutexapp);
if (flag)
cutilSafeCall(hipStreamSynchronize(stream_app));
printf("I am in function\n");
return dst;
}
/*void usage(int argc, char **argv)
{
fprintf(stderr, "Usage: %s <grid_rows/grid_cols> <pyramid_height>
<sim_time> <temp_file> <power_file> <output_file>\n", argv[0]);
fprintf(stderr, "\t<grid_rows/grid_cols> - number of rows/cols in the
grid (positive integer)\n");
fprintf(stderr, "\t<pyramid_height> - pyramid heigh(positive
integer)\n");
fprintf(stderr, "\t<sim_time> - number of iterations\n");
fprintf(stderr, "\t<temp_file> - name of the file containing the
initial temperature values of each cell\n");
fprintf(stderr, "\t<power_file> - name of the file containing the
dissipated power values of each cell\n");
fprintf(stderr, "\t<output_file> - name of the output file\n");
exit(1);
}*/
// int main(int argc, char** argv)
int main_hotspot(hipStream_t stream_app,
pthread_mutex_t* mutexapp,
bool flag) {
run(stream_app, mutexapp, flag);
// return EXIT_SUCCESS;
return 0;
}
void run(hipStream_t stream_app, pthread_mutex_t* mutexapp, bool flag) {
int size;
int grid_rows, grid_cols;
float *FilesavingTemp, *FilesavingPower, *MatrixOut;
char *tfile, *pfile, *ofile;
// int total_iterations = 60;
// int pyramid_height = 1; // number of iterations
grid_rows = 512;
grid_cols = 512;
int total_iterations = 3;
int pyramid_height = 2;
/*if (argc != 7)
usage(argc, argv);
if((grid_rows = atoi(argv[1]))<=0||
(grid_cols = atoi(argv[1]))<=0||
(pyramid_height = atoi(argv[2]))<=0||
(total_iterations = atoi(argv[3]))<=0)
usage(argc, argv);
tfile=argv[4];
pfile=argv[5];
ofile=argv[6];*/
tfile = (char*)"../HS/temp_512";
pfile = (char*)"../HS/power_512";
ofile = (char*)"output.out";
size = grid_rows * grid_cols;
/* --------------- pyramid parameters --------------- */
#define EXPAND_RATE \
2 // add one iteration will extend the pyramid base by 2 per each borderline
int borderCols = (pyramid_height)*EXPAND_RATE / 2;
int borderRows = (pyramid_height)*EXPAND_RATE / 2;
int smallBlockCol = BLOCK_SIZE - (pyramid_height)*EXPAND_RATE;
int smallBlockRow = BLOCK_SIZE - (pyramid_height)*EXPAND_RATE;
int blockCols =
grid_cols / smallBlockCol + ((grid_cols % smallBlockCol == 0) ? 0 : 1);
int blockRows =
grid_rows / smallBlockRow + ((grid_rows % smallBlockRow == 0) ? 0 : 1);
FilesavingTemp = (float*)malloc(size * sizeof(float));
FilesavingPower = (float*)malloc(size * sizeof(float));
MatrixOut = (float*)calloc(size, sizeof(float));
if (!FilesavingPower || !FilesavingTemp || !MatrixOut)
fatal("unable to allocate memory");
printf(
"pyramidHeight: %d\ngridSize: [%d, %d]\nborder:[%d, %d]\nblockGrid:[%d, "
"%d]\ntargetBlock:[%d, %d]\n",
pyramid_height, grid_cols, grid_rows, borderCols, borderRows, blockCols,
blockRows, smallBlockCol, smallBlockRow);
readinput(FilesavingTemp, grid_rows, grid_cols, tfile);
readinput(FilesavingPower, grid_rows, grid_cols, pfile);
float *MatrixTemp[2], *MatrixPower;
hipMalloc((void**)&MatrixTemp[0], sizeof(float) * size);
hipMalloc((void**)&MatrixTemp[1], sizeof(float) * size);
hipMemcpyAsync(MatrixTemp[0], FilesavingTemp, sizeof(float) * size,
hipMemcpyHostToDevice, stream_app);
hipMalloc((void**)&MatrixPower, sizeof(float) * size);
hipMemcpyAsync(MatrixPower, FilesavingPower, sizeof(float) * size,
hipMemcpyHostToDevice, stream_app);
printf("Start computing the transient temperature\n");
int ret =
compute_tran_temp(MatrixPower, MatrixTemp, grid_cols, grid_rows,
total_iterations, pyramid_height, blockCols, blockRows,
borderCols, borderRows, stream_app, mutexapp, flag);
printf("Ending simulation\n");
hipMemcpyAsync(MatrixOut, MatrixTemp[ret], sizeof(float) * size,
hipMemcpyDeviceToHost, stream_app);
if (flag)
cutilSafeCall(hipStreamSynchronize(stream_app));
writeoutput(MatrixOut, grid_rows, grid_cols, ofile);
hipFree(MatrixPower);
hipFree(MatrixTemp[0]);
hipFree(MatrixTemp[1]);
free(MatrixOut);
}
|
a21d71b23a12c71ddb2e4db6b7f4bf16c3f51fc2.cu
|
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "../benchmark_common.h"
#define BLOCK_SIZE 16
#define STR_SIZE 256
/* maximum power density possible (say 300W for a 10mm x 10mm chip) */
#define MAX_PD (3.0e6)
/* required precision in degrees */
#define PRECISION 0.001
#define SPEC_HEAT_SI 1.75e6
#define K_SI 100
/* capacitance fitting factor */
#define FACTOR_CHIP 0.5
/* chip parameters */
float t_chip = 0.0005;
float chip_height = 0.016;
float chip_width = 0.016;
/* ambient temperature, assuming no package at all */
float amb_temp = 80.0;
void run(cudaStream_t stream_app, pthread_mutex_t* mutexapp, bool flag);
/* define timer macros */
#define pin_stats_reset() startCycle()
#define pin_stats_pause(cycles) stopCycle(cycles)
#define pin_stats_dump(cycles) printf("timer: %Lu\n", cycles)
void fatal(char* s) {
fprintf(stderr, "error: %s\n", s);
}
void writeoutput(float* vect, int grid_rows, int grid_cols, char* file) {
int i, j, index = 0;
FILE* fp;
char str[STR_SIZE];
if ((fp = fopen(file, "w")) == 0)
printf("The file was not opened\n");
for (i = 0; i < grid_rows; i++)
for (j = 0; j < grid_cols; j++) {
sprintf(str, "%d\t%g\n", index, vect[i * grid_cols + j]);
fputs(str, fp);
index++;
}
fclose(fp);
}
void readinput(float* vect, int grid_rows, int grid_cols, char* file) {
int i, j;
FILE* fp;
char str[STR_SIZE];
float val;
if ((fp = fopen(file, "r")) == 0)
printf("The file was not opened\n");
for (i = 0; i <= grid_rows - 1; i++)
for (j = 0; j <= grid_cols - 1; j++) {
fgets(str, STR_SIZE, fp);
if (feof(fp))
fatal("not enough lines in file");
// if ((sscanf(str, "%d%f", &index, &val) != 2) || (index !=
// ((i-1)*(grid_cols-2)+j-1)))
if ((sscanf(str, "%f", &val) != 1))
fatal("invalid file format");
vect[i * grid_cols + j] = val;
}
fclose(fp);
}
#define IN_RANGE(x, min, max) ((x) >= (min) && (x) <= (max))
#define CLAMP_RANGE(x, min, max) x = (x < (min)) ? min : ((x > (max)) ? max : x)
#define MIN_hotspot(a, b) ((a) <= (b) ? (a) : (b))
__global__ void calculate_temp(int iteration, // number of iteration
float* power, // power input
float* temp_src, // temperature input/output
float* temp_dst, // temperature input/output
int grid_cols, // Col of grid
int grid_rows, // Row of grid
int border_cols, // border offset
int border_rows, // border offset
float Cap, // Capacitance
float Rx,
float Ry,
float Rz,
float step,
float time_elapsed) {
__shared__ float temp_on_cuda[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float power_on_cuda[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float temp_t[BLOCK_SIZE]
[BLOCK_SIZE]; // saving temparary temperature result
float amb_temp = 80.0;
float step_div_Cap;
float Rx_1, Ry_1, Rz_1;
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
step_div_Cap = step / Cap;
Rx_1 = 1 / Rx;
Ry_1 = 1 / Ry;
Rz_1 = 1 / Rz;
// each block finally computes result for a small block
// after N iterations.
// it is the non-overlapping small blocks that cover
// all the input data
// calculate the small block size
int small_block_rows = BLOCK_SIZE - iteration * 2; // EXPAND_RATE
int small_block_cols = BLOCK_SIZE - iteration * 2; // EXPAND_RATE
// calculate the boundary for the block according to
// the boundary of its small block
int blkY = small_block_rows * by - border_rows;
int blkX = small_block_cols * bx - border_cols;
int blkYmax = blkY + BLOCK_SIZE - 1;
int blkXmax = blkX + BLOCK_SIZE - 1;
// calculate the global thread coordination
int yidx = blkY + ty;
int xidx = blkX + tx;
// load data if it is within the valid input range
int loadYidx = yidx, loadXidx = xidx;
int index = grid_rows * loadYidx + loadXidx;
if (IN_RANGE(loadYidx, 0, grid_rows - 1) &&
IN_RANGE(loadXidx, 0, grid_cols - 1)) {
temp_on_cuda[ty][tx] = temp_src[index]; // Load the temperature data from
// global memory to shared memory
power_on_cuda[ty][tx] = power[index]; // Load the power data from global
// memory to shared memory
}
__syncthreads();
// effective range within this block that falls within
// the valid range of the input data
// used to rule out computation outside the boundary.
int validYmin = (blkY < 0) ? -blkY : 0;
int validYmax = (blkYmax > grid_rows - 1)
? BLOCK_SIZE - 1 - (blkYmax - grid_rows + 1)
: BLOCK_SIZE - 1;
int validXmin = (blkX < 0) ? -blkX : 0;
int validXmax = (blkXmax > grid_cols - 1)
? BLOCK_SIZE - 1 - (blkXmax - grid_cols + 1)
: BLOCK_SIZE - 1;
int N = ty - 1;
int S = ty + 1;
int W = tx - 1;
int E = tx + 1;
N = (N < validYmin) ? validYmin : N;
S = (S > validYmax) ? validYmax : S;
W = (W < validXmin) ? validXmin : W;
E = (E > validXmax) ? validXmax : E;
bool computed;
for (int i = 0; i < iteration; i++) {
computed = false;
if (IN_RANGE(tx, i + 1, BLOCK_SIZE - i - 2) &&
IN_RANGE(ty, i + 1, BLOCK_SIZE - i - 2) &&
IN_RANGE(tx, validXmin, validXmax) &&
IN_RANGE(ty, validYmin, validYmax)) {
computed = true;
temp_t[ty][tx] =
temp_on_cuda[ty][tx] +
step_div_Cap * (power_on_cuda[ty][tx] +
(temp_on_cuda[S][tx] + temp_on_cuda[N][tx] -
2.0 * temp_on_cuda[ty][tx]) *
Ry_1 +
(temp_on_cuda[ty][E] + temp_on_cuda[ty][W] -
2.0 * temp_on_cuda[ty][tx]) *
Rx_1 +
(amb_temp - temp_on_cuda[ty][tx]) * Rz_1);
}
__syncthreads();
if (i == iteration - 1)
break;
if (computed) // Assign the computation range
temp_on_cuda[ty][tx] = temp_t[ty][tx];
__syncthreads();
}
// update the global memory
// after the last iteration, only threads coordinated within the
// small block perform the calculation and switch on ``computed''
if (computed) {
temp_dst[index] = temp_t[ty][tx];
}
}
/*
compute N time steps
*/
int compute_tran_temp(float* MatrixPower,
float* MatrixTemp[2],
int col,
int row,
int total_iterations,
int num_iterations,
int blockCols,
int blockRows,
int borderCols,
int borderRows,
cudaStream_t stream_app,
pthread_mutex_t* mutexapp,
bool flag) {
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(blockCols, blockRows);
float grid_height = chip_height / row;
float grid_width = chip_width / col;
float Cap = FACTOR_CHIP * SPEC_HEAT_SI * t_chip * grid_width * grid_height;
float Rx = grid_width / (2.0 * K_SI * t_chip * grid_height);
float Ry = grid_height / (2.0 * K_SI * t_chip * grid_width);
float Rz = t_chip / (K_SI * grid_height * grid_width);
float max_slope = MAX_PD / (FACTOR_CHIP * t_chip * SPEC_HEAT_SI);
float step = PRECISION / max_slope;
float t;
float time_elapsed;
time_elapsed = 0.001;
int src = 1, dst = 0;
printf("I am in for loop \n");
for (t = 0; t < total_iterations; t += num_iterations) {
int temp = src;
src = dst;
dst = temp;
calculate_temp<<<dimGrid, dimBlock, 0, stream_app>>>(
MIN_hotspot(num_iterations, total_iterations - t), MatrixPower,
MatrixTemp[src], MatrixTemp[dst], col, row, borderCols, borderRows, Cap,
Rx, Ry, Rz, step, time_elapsed);
printf("hello from loop\n");
pthread_mutex_unlock(mutexapp);
if (flag)
cutilSafeCall(cudaStreamSynchronize(stream_app));
pthread_mutex_lock(mutexapp);
}
pthread_mutex_unlock(mutexapp);
if (flag)
cutilSafeCall(cudaStreamSynchronize(stream_app));
printf("I am in function\n");
return dst;
}
/*void usage(int argc, char **argv)
{
fprintf(stderr, "Usage: %s <grid_rows/grid_cols> <pyramid_height>
<sim_time> <temp_file> <power_file> <output_file>\n", argv[0]);
fprintf(stderr, "\t<grid_rows/grid_cols> - number of rows/cols in the
grid (positive integer)\n");
fprintf(stderr, "\t<pyramid_height> - pyramid heigh(positive
integer)\n");
fprintf(stderr, "\t<sim_time> - number of iterations\n");
fprintf(stderr, "\t<temp_file> - name of the file containing the
initial temperature values of each cell\n");
fprintf(stderr, "\t<power_file> - name of the file containing the
dissipated power values of each cell\n");
fprintf(stderr, "\t<output_file> - name of the output file\n");
exit(1);
}*/
// int main(int argc, char** argv)
int main_hotspot(cudaStream_t stream_app,
pthread_mutex_t* mutexapp,
bool flag) {
run(stream_app, mutexapp, flag);
// return EXIT_SUCCESS;
return 0;
}
void run(cudaStream_t stream_app, pthread_mutex_t* mutexapp, bool flag) {
int size;
int grid_rows, grid_cols;
float *FilesavingTemp, *FilesavingPower, *MatrixOut;
char *tfile, *pfile, *ofile;
// int total_iterations = 60;
// int pyramid_height = 1; // number of iterations
grid_rows = 512;
grid_cols = 512;
int total_iterations = 3;
int pyramid_height = 2;
/*if (argc != 7)
usage(argc, argv);
if((grid_rows = atoi(argv[1]))<=0||
(grid_cols = atoi(argv[1]))<=0||
(pyramid_height = atoi(argv[2]))<=0||
(total_iterations = atoi(argv[3]))<=0)
usage(argc, argv);
tfile=argv[4];
pfile=argv[5];
ofile=argv[6];*/
tfile = (char*)"../HS/temp_512";
pfile = (char*)"../HS/power_512";
ofile = (char*)"output.out";
size = grid_rows * grid_cols;
/* --------------- pyramid parameters --------------- */
#define EXPAND_RATE \
2 // add one iteration will extend the pyramid base by 2 per each borderline
int borderCols = (pyramid_height)*EXPAND_RATE / 2;
int borderRows = (pyramid_height)*EXPAND_RATE / 2;
int smallBlockCol = BLOCK_SIZE - (pyramid_height)*EXPAND_RATE;
int smallBlockRow = BLOCK_SIZE - (pyramid_height)*EXPAND_RATE;
int blockCols =
grid_cols / smallBlockCol + ((grid_cols % smallBlockCol == 0) ? 0 : 1);
int blockRows =
grid_rows / smallBlockRow + ((grid_rows % smallBlockRow == 0) ? 0 : 1);
FilesavingTemp = (float*)malloc(size * sizeof(float));
FilesavingPower = (float*)malloc(size * sizeof(float));
MatrixOut = (float*)calloc(size, sizeof(float));
if (!FilesavingPower || !FilesavingTemp || !MatrixOut)
fatal("unable to allocate memory");
printf(
"pyramidHeight: %d\ngridSize: [%d, %d]\nborder:[%d, %d]\nblockGrid:[%d, "
"%d]\ntargetBlock:[%d, %d]\n",
pyramid_height, grid_cols, grid_rows, borderCols, borderRows, blockCols,
blockRows, smallBlockCol, smallBlockRow);
readinput(FilesavingTemp, grid_rows, grid_cols, tfile);
readinput(FilesavingPower, grid_rows, grid_cols, pfile);
float *MatrixTemp[2], *MatrixPower;
cudaMalloc((void**)&MatrixTemp[0], sizeof(float) * size);
cudaMalloc((void**)&MatrixTemp[1], sizeof(float) * size);
cudaMemcpyAsync(MatrixTemp[0], FilesavingTemp, sizeof(float) * size,
cudaMemcpyHostToDevice, stream_app);
cudaMalloc((void**)&MatrixPower, sizeof(float) * size);
cudaMemcpyAsync(MatrixPower, FilesavingPower, sizeof(float) * size,
cudaMemcpyHostToDevice, stream_app);
printf("Start computing the transient temperature\n");
int ret =
compute_tran_temp(MatrixPower, MatrixTemp, grid_cols, grid_rows,
total_iterations, pyramid_height, blockCols, blockRows,
borderCols, borderRows, stream_app, mutexapp, flag);
printf("Ending simulation\n");
cudaMemcpyAsync(MatrixOut, MatrixTemp[ret], sizeof(float) * size,
cudaMemcpyDeviceToHost, stream_app);
if (flag)
cutilSafeCall(cudaStreamSynchronize(stream_app));
writeoutput(MatrixOut, grid_rows, grid_cols, ofile);
cudaFree(MatrixPower);
cudaFree(MatrixTemp[0]);
cudaFree(MatrixTemp[1]);
free(MatrixOut);
}
|
5b7990c8ad582d0fb95e2d5c3540dcc85acbd023.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2021, NAVER Corp. Authored by CLOVA.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/fastertransformer/kernels/decoder_masked_multihead_attention_utils.h"
#include "src/fastertransformer/kernels/reduce_kernel_utils.cuh"
#include "src/fastertransformer/kernels/unfused_attention_kernels.h"
#include "src/fastertransformer/utils/cuda_type_utils.cuh"
#include "src/fastertransformer/utils/cuda_utils.h"
namespace fastertransformer {
__inline__ __device__ int target_index(int id1, int id2, int id3, int id4, int dim_1, int dim_2, int dim_3, int dim_4)
{
return id1 * (dim_2 * dim_3 * dim_4) + id3 * (dim_2 * dim_4) + id2 * dim_4 + id4;
}
template<typename T>
__global__ void addQKVBiasIA3Transpose(T* q_out,
T* k_out,
T* v_out,
const T* __restrict q_in,
const T* __restrict bias_q,
const T* __restrict k_in,
const T* __restrict bias_k,
const T* __restrict v_in,
const T* __restrict bias_v,
const int* ia3_tasks,
const T* ia3_key_weights,
const T* ia3_value_weights,
const int batch_size,
const int seq_len,
const int head_num,
const int size_per_head)
{
const int n = head_num * size_per_head;
const int batch_id = blockIdx.x;
const int word_id = blockIdx.y;
const int row_id = batch_id * seq_len + word_id;
const bool use_ia3 = ia3_tasks != nullptr;
const int ia3_task = use_ia3 ? ia3_tasks[batch_id] : 0;
const bool use_ia3_key = use_ia3 && (ia3_key_weights != nullptr);
const bool use_ia3_value = use_ia3 && (ia3_value_weights != nullptr);
for (int col_id = threadIdx.x; col_id < n; col_id += blockDim.x) {
const int head_id = col_id / size_per_head;
const int size_id = col_id % size_per_head;
const int target_id = batch_id * (head_num * seq_len * size_per_head) + head_id * seq_len * size_per_head
+ word_id * size_per_head + size_id;
const int src_id = row_id * n + col_id;
T q = ldg(&q_in[src_id]);
q_out[target_id] = add(q, ldg(&bias_q[col_id]));
T k = add(ldg(&k_in[src_id]), ldg(&bias_k[col_id]));
if (use_ia3_key) {
k = k * ia3_key_weights[ia3_task * n + col_id];
}
k_out[target_id] = k;
T v = add(ldg(&v_in[src_id]), ldg(&bias_v[col_id]));
if (use_ia3_value) {
v = v * ia3_value_weights[ia3_task * n + col_id];
}
v_out[target_id] = v;
}
}
template<typename T>
__global__ void QKVIA3Transpose(T* q_out,
T* k_out,
T* v_out,
const T* __restrict q_in,
const T* __restrict k_in,
const T* __restrict v_in,
const int* ia3_tasks,
const T* __restrict ia3_key_weights,
const T* __restrict ia3_value_weights,
const int batch_size,
const int seq_len,
const int head_num,
const int size_per_head)
{
const int n = head_num * size_per_head;
const int batch_id = blockIdx.x;
const int word_id = blockIdx.y;
const int row_id = batch_id * seq_len + word_id;
const bool use_ia3 = ia3_tasks != nullptr;
const int ia3_task = use_ia3 ? ia3_tasks[batch_id] : 0;
const bool use_ia3_key = use_ia3 && (ia3_key_weights != nullptr);
const bool use_ia3_value = use_ia3 && (ia3_value_weights != nullptr);
for (int col_id = threadIdx.x; col_id < n; col_id += blockDim.x) {
const int head_id = col_id / size_per_head;
const int size_id = col_id % size_per_head;
const int target_id = batch_id * (head_num * seq_len * size_per_head) + head_id * seq_len * size_per_head
+ word_id * size_per_head + size_id;
const int src_id = row_id * n + col_id;
q_out[target_id] = ldg(&q_in[src_id]);
T k = ldg(&k_in[src_id]);
if (use_ia3_key) {
k = k * ia3_key_weights[ia3_task * n + col_id];
}
k_out[target_id] = k;
T v = ldg(&v_in[src_id]);
if (use_ia3_value) {
v = v * ia3_value_weights[ia3_task * n + col_id];
}
v_out[target_id] = v;
}
}
template<typename T>
void invokeAddQKVBiasIA3Transpose(T* q_buf,
T* k_buf,
T* v_buf,
T* Q,
const T* bias_Q,
T* K,
const T* bias_K,
T* V,
const T* bias_V,
const int batch_size,
const int seq_len,
const int head_num,
const int size_per_head,
const int* ia3_tasks,
const T* ia3_key_weights,
const T* ia3_value_weights,
hipStream_t stream)
{
const int k = head_num * size_per_head;
dim3 grid(batch_size, seq_len);
bool is_add_bias = bias_Q != nullptr;
if (sizeof(T) == 4 || k % 2 != 0) {
dim3 block(min(k, 512));
if (is_add_bias) {
hipLaunchKernelGGL(( addQKVBiasIA3Transpose<T>), dim3(grid), dim3(block), 0, stream, q_buf,
k_buf,
v_buf,
Q,
bias_Q,
K,
bias_K,
V,
bias_V,
ia3_tasks,
ia3_key_weights,
ia3_value_weights,
batch_size,
seq_len,
head_num,
size_per_head);
}
else {
hipLaunchKernelGGL(( QKVIA3Transpose<T>), dim3(grid), dim3(block), 0, stream, q_buf,
k_buf,
v_buf,
Q,
K,
V,
ia3_tasks,
ia3_key_weights,
ia3_value_weights,
batch_size,
seq_len,
head_num,
size_per_head);
}
sync_check_cuda_error();
}
else {
using T2 = typename TypeConverter<T>::Type; // fp16 to half2, bf16 to bf162
dim3 block(min(k / 2, 512));
if (is_add_bias) {
hipLaunchKernelGGL(( addQKVBiasIA3Transpose<T2>), dim3(grid), dim3(block), 0, stream, (T2*)q_buf,
(T2*)k_buf,
(T2*)v_buf,
(const T2*)Q,
(const T2*)bias_Q,
(const T2*)K,
(const T2*)bias_K,
(const T2*)V,
(const T2*)bias_V,
ia3_tasks,
(const T2*)ia3_key_weights,
(const T2*)ia3_value_weights,
batch_size,
seq_len,
head_num,
size_per_head / 2);
}
else {
hipLaunchKernelGGL(( QKVIA3Transpose<T2>), dim3(grid), dim3(block), 0, stream, (T2*)q_buf,
(T2*)k_buf,
(T2*)v_buf,
(const T2*)Q,
(const T2*)K,
(const T2*)V,
ia3_tasks,
(const T2*)ia3_key_weights,
(const T2*)ia3_value_weights,
batch_size,
seq_len,
head_num,
size_per_head / 2);
}
sync_check_cuda_error();
}
}
#define INSTANTIATEADDQKVBIASIA3TRANSPOSE(T) \
template void invokeAddQKVBiasIA3Transpose(T* q_buf, \
T* k_buf, \
T* v_buf, \
T* Q, \
const T* bias_Q, \
T* K, \
const T* bias_K, \
T* V, \
const T* bias_V, \
const int batch_size, \
const int seq_len, \
const int head_num, \
const int size_per_head, \
const int* ia3_tasks, \
const T* ia3_key_weights, \
const T* ia3_value_weights, \
hipStream_t stream)
INSTANTIATEADDQKVBIASIA3TRANSPOSE(float);
INSTANTIATEADDQKVBIASIA3TRANSPOSE(half);
#ifdef ENABLE_BF16
INSTANTIATEADDQKVBIASIA3TRANSPOSE(__nv_bfloat16);
#endif
#undef INSTANTIATEADDQKVBIASTRANSPOSE
template<typename T, typename T_IN, int ITEMS_PER_THREAD>
__global__ void softmax_kernel(T* attn_score,
const T_IN* qk,
const T* attn_mask,
const T* linear_bias_slopes,
const int batch_size,
const int head_num,
const int q_length,
const int k_length,
const float qk_scale)
{
// attn_score, [batch_size, num_heads, q_length, k_length]
// qk, [batch_size, num_heads, q_length, k_length]
// attn_mask, [batch_size, q_length, k_length]
// linear_bias_slopes, [num_heads]
const int64_t bi = blockIdx.y; // Batch index.
const int64_t hi = blockIdx.z; // Head index.
__shared__ float s_mean, s_max;
const float linear_bias_slope = linear_bias_slopes != nullptr ? (float)linear_bias_slopes[hi] : 0.0f;
// Loop along with Q dimension.
for (int64_t qi = blockIdx.x; qi < q_length; qi += gridDim.x) {
float data[ITEMS_PER_THREAD];
int64_t qk_offset;
float local_max = -1e20f;
// Loop along with K dimension.
for (int64_t i = 0; blockDim.x * i + threadIdx.x < k_length; i++) {
int64_t ki = blockDim.x * i + threadIdx.x; // Index of K dimension.
qk_offset = ((bi * head_num + hi) * q_length + qi) * k_length + ki;
float qk_val = static_cast<float>(qk[qk_offset]);
float qk_bias = 0.0f;
if (linear_bias_slopes != nullptr) {
// We don't handle the upper diagonal (ki > qi) separately, whose values
// are negligible due to the negative infinity mask. And it matches with
// the HF's implementation.
qk_bias += static_cast<float>(linear_bias_slope * (ki - qi));
}
int64_t mask_offset = (bi * q_length + qi) * k_length + ki;
float mask_val = static_cast<float>(ldg(&attn_mask[mask_offset]));
qk_bias += (1.0f - mask_val) * -10000.0f;
data[i] = qk_scale * qk_val + qk_bias;
local_max = fmax(local_max, data[i]);
}
float max_val = blockDim.x <= 32 ? warpReduceMax(local_max) : blockReduceMax<float>(local_max);
if (threadIdx.x == 0) {
s_max = max_val;
}
__syncthreads();
float local_sum = 0;
for (int64_t i = 0; blockDim.x * i + threadIdx.x < k_length; i++) {
data[i] = __expf(data[i] - s_max);
local_sum += data[i];
}
float sum_val = blockDim.x <= 32 ? warpReduceSum(local_sum) : blockReduceSum<float>(local_sum);
if (threadIdx.x == 0) {
s_mean = sum_val + 1e-6f;
s_mean = __fdividef(1.0f, s_mean);
}
__syncthreads();
for (int64_t i = 0; blockDim.x * i + threadIdx.x < k_length; i++) {
qk_offset = ((bi * head_num + hi) * q_length + qi) * k_length + blockDim.x * i + threadIdx.x;
attn_score[qk_offset] = (T)(data[i] * s_mean);
}
}
}
template<typename T, int ITEMS_PER_THREAD>
__global__ void softmax_kernel_h2(T* attn_score,
const T* qk_buf,
const T* attn_mask,
const T* linear_bias_slopes,
const int batch_size,
const int head_num,
const int q_length,
const int k_length,
const T qk_scale)
{
// attn_score, [batch_size, num_heads, q_length, k_length]
// qk, [batch_size, num_heads, q_length, k_length]
// attn_mask, [batch_size, q_length, k_length]
// linear_bias_slopes, [num_heads]
using T2 = typename TypeConverter<T>::Type;
T2* attn_score_h2 = reinterpret_cast<T2*>(attn_score);
const T2* qk_buf_h2 = reinterpret_cast<const T2*>(qk_buf);
const T2* attn_mask_h2 = reinterpret_cast<const T2*>(attn_mask);
const int bi = blockIdx.y; // Batch index
const int hi = blockIdx.z; // Head index.
__shared__ float s_mean, s_max;
// Constant values that will be used repeately in the q/k loop.
const T2 ONE = cuda_cast<T2>(1.0f);
const T2 ZERO = cuda_cast<T2>(0.0f);
const T2 NEG_INFTY = cuda_cast<T2>(-10000.0f);
// The normalization factor of QK.
const T2 qk_scale_h2 = cuda_cast<T2>(qk_scale);
// The slope of a linear position bias of the current attention head.
const T2 linear_bias_slope = linear_bias_slopes != nullptr ? cuda_cast<T2>(linear_bias_slopes[hi]) : ZERO;
// Loop over q dimension.
for (int qi = blockIdx.x; qi < q_length; qi += gridDim.x) {
T2 data[ITEMS_PER_THREAD];
int qk_offset;
float local_max = -1e20f;
// Loop over k dimension.
for (int i = 0; blockDim.x * i + threadIdx.x < (k_length / 2) && i < ITEMS_PER_THREAD; i++) {
// The half of the index of k dimension. We will use the elements at {2 * ki, 2 * ki + 1}.
int ki = blockDim.x * i + threadIdx.x;
qk_offset = ((bi * head_num + hi) * q_length + qi) * (k_length / 2) + ki;
int mask_offset = (bi * q_length + qi) * (k_length / 2) + ki;
// The value of QK^T matrix at (qi, ki).
T2 qk = qk_buf_h2[qk_offset];
// The bias value to the position (qi, ki) including both mask and positional bias.
T2 qk_bias = ZERO;
if (linear_bias_slopes != nullptr) {
// The position bias depends on the distance between qi/ki and is zero if qi >= 2*ki
// or qi >= 2*ki+1. For T2 vectorization, we should handle every two elements along
// with k-dim simultaneously. To do this, we check qi / 2 > ki at ones instead of
// qi >= 2*ki or 2*ki+1. It works because an diagonal element for an odd qi will be
// zero due to slope * (qi - 2*ki+1) = 0. Thus, we don't handle the upper diagonal
// separately, whose values are negligible due to the negative infinity mask.
T2 dist(2.0f * ki - qi, 2.0f * ki + 1 - qi);
qk_bias = hadd2<T2>(qk_bias, hmul2<T2>(linear_bias_slope, dist));
}
T2 mask_val = ldg(&attn_mask_h2[mask_offset]);
qk_bias = hadd2<T2>(qk_bias, hmul2<T2>(hsub2<T2>(ONE, mask_val), NEG_INFTY));
data[i] = hadd2<T2>(hmul2<T2>(qk, qk_scale_h2), qk_bias);
local_max = fmax(local_max, fmax((float)data[i].x, (float)data[i].y));
}
float max_val = blockDim.x <= 32 ? warpReduceMax(local_max) : blockReduceMax<float>(local_max);
if (threadIdx.x == 0) {
s_max = max_val;
}
__syncthreads();
float local_sum = 0.0f;
for (int i = 0; blockDim.x * i + threadIdx.x < (k_length / 2) && i < ITEMS_PER_THREAD; i++) {
data[i] = hexp2<T2>(hsub2<T2>(data[i], cuda_cast<T2>(s_max)));
local_sum += (float)(data[i].x + data[i].y);
}
float sum_val = blockDim.x <= 32 ? warpReduceSum(local_sum) : blockReduceSum<float>(local_sum);
if (threadIdx.x == 0) {
s_mean = sum_val + 1e-6f;
s_mean = __fdividef(1.0f, s_mean);
}
__syncthreads();
for (int i = 0; blockDim.x * i + threadIdx.x < (k_length / 2) && i < ITEMS_PER_THREAD; i++) {
qk_offset = ((bi * head_num + hi) * q_length + qi) * (k_length / 2) + blockDim.x * i + threadIdx.x;
attn_score_h2[qk_offset] = hmul2<T2>(data[i], cuda_cast<T2>(s_mean));
}
}
}
template<typename T, int K_ITEMS_PER_THREAD, int Q_ITEMS_PER_THREAD>
__global__ void softmax_kernel_h2_v2(T* attn_score,
const T* qk_buf,
const T* attn_mask,
const T* linear_bias_slopes,
const int batch_size,
const int head_num,
const int q_length,
const int k_length,
const T scalar)
{
// attn_score, [batch_size, num_heads, q_length, k_length]
// qk, [batch_size, num_heads, q_length, k_length]
// attn_mask, [batch_size, q_length, k_length]
// linear_bias_slopes, [num_heads]
using T2 = typename TypeConverter<T>::Type;
// QK^T matrix of shape (batch_size, head_num, q_length, k_length / 2)
T2* attn_score_h2 = reinterpret_cast<T2*>(attn_score);
const T2* qk_buf_h2 = reinterpret_cast<const T2*>(qk_buf);
const T2* attn_mask_h2 = reinterpret_cast<const T2*>(attn_mask);
const int bi = blockIdx.y; // Batch index
const int hi = blockIdx.z; // Head index.
// Constant values that will be used repeately in the q/k loop.
const T2 ONE = cuda_cast<T2>(1.0f);
const T2 ZERO = cuda_cast<T2>(0.0f);
const T2 NEG_INFTY = cuda_cast<T2>(-10000.0f);
// The normalization factor of QK.
const T2 qk_scale = cuda_cast<T2>(scalar);
// The slope of a linear position bias of the current attention head.
const T2 linear_bias_slope = linear_bias_slopes != nullptr ? cuda_cast<T2>(linear_bias_slopes[hi]) : ZERO;
__shared__ float s_sum[Q_ITEMS_PER_THREAD], s_max[Q_ITEMS_PER_THREAD];
// Loop over q dimension.
for (int qi = blockIdx.x; qi < q_length; qi += gridDim.x * Q_ITEMS_PER_THREAD) {
T2 data[Q_ITEMS_PER_THREAD][K_ITEMS_PER_THREAD];
int qk_offset[Q_ITEMS_PER_THREAD];
float local_max[Q_ITEMS_PER_THREAD];
#pragma unroll
for (int j = 0; j < Q_ITEMS_PER_THREAD; j++) {
local_max[j] = -1e20f;
}
// Loop over k dimension.
const int Q_ITEMS = min((q_length - qi + gridDim.x - 1) / gridDim.x, Q_ITEMS_PER_THREAD);
for (int i = 0; blockDim.x * i + threadIdx.x < k_length / 2 && i < K_ITEMS_PER_THREAD; ++i) {
// The half of the index of k dimension. We will use the elements at {2 * ki, 2 * ki + 1}.
int ki = blockDim.x * i + threadIdx.x;
int mask_offset[Q_ITEMS_PER_THREAD];
#pragma unroll
for (int j = 0; j < Q_ITEMS; j++) {
qk_offset[j] = ((bi * head_num + hi) * q_length + qi + j * gridDim.x) * (k_length / 2) + ki;
mask_offset[j] = (bi * q_length + qi + j * gridDim.x) * (k_length / 2) + ki;
}
T2 mask_val[Q_ITEMS_PER_THREAD];
#pragma unroll
for (int j = 0; j < Q_ITEMS; j++) {
mask_val[j] = ldg(&attn_mask_h2[mask_offset[j]]);
}
T2 qk[Q_ITEMS_PER_THREAD];
#pragma unroll
for (int j = 0; j < Q_ITEMS; j++) {
qk[j] = qk_buf_h2[qk_offset[j]];
}
T2 pos_bias[Q_ITEMS_PER_THREAD];
if (linear_bias_slopes != nullptr) {
#pragma unroll
for (int j = 0; j < Q_ITEMS; j++) {
// The position bias depends on the distance between qi/ki and is zero if qi >= 2*ki
// or qi >= 2*ki+1. For T2 vectorization, we should handle every two elements along
// with k-dim simultaneously. To do this, we check qi / 2 > ki at ones instead of
// qi >= 2*ki or 2*ki+1. It works because an diagonal element for an odd qi will be
// zero due to slope * (qi - 2*ki+1) = 0. Thus, we don't handle the upper diagonal
// separately, whose values are negligible due to the negative infinity mask.
int qidx = qi + j * gridDim.x;
T2 dist(2.0f * ki - qidx, 2.0f * ki + 1 - qidx);
pos_bias[j] = hmul2<T2>(linear_bias_slope, dist);
}
}
#pragma unroll
for (int j = 0; j < Q_ITEMS; j++) {
mask_val[j] = hmul2<T2>(hsub2<T2>(ONE, mask_val[j]), NEG_INFTY);
}
#pragma unroll
for (int j = 0; j < Q_ITEMS; j++) {
T2 val = hadd2<T2>(hmul2<T2>(qk_scale, qk[j]), mask_val[j]);
if (linear_bias_slopes != nullptr) {
val = hadd2<T2>(val, pos_bias[j]);
}
data[j][i] = val;
local_max[j] = fmax(local_max[j], fmax((float)data[j][i].x, (float)data[j][i].y));
}
}
if (blockDim.x <= 32) {
warpReduceMaxV2<float, Q_ITEMS_PER_THREAD>(local_max);
}
else {
blockReduceMaxV2<float, Q_ITEMS_PER_THREAD>(local_max);
}
if (threadIdx.x == 0) {
#pragma unroll
for (int j = 0; j < Q_ITEMS_PER_THREAD; j++) {
s_max[j] = local_max[j];
}
}
__syncthreads();
float local_sum[Q_ITEMS_PER_THREAD];
#pragma unroll
for (int j = 0; j < Q_ITEMS_PER_THREAD; j++) {
local_sum[j] = {0.f};
}
for (int i = 0; blockDim.x * i + threadIdx.x < k_length / 2 && i < K_ITEMS_PER_THREAD; ++i) {
#pragma unroll
for (int j = 0; j < Q_ITEMS; ++j) {
data[j][i] = hexp2<T2>(hsub2<T2>(data[j][i], cuda_cast<T2>(s_max[j])));
}
#pragma unroll
for (int j = 0; j < Q_ITEMS; j++) {
local_sum[j] += (float)(data[j][i].x + data[j][i].y);
}
}
if (blockDim.x <= 32) {
warpReduceSumV2<float, Q_ITEMS_PER_THREAD>(local_sum);
}
else {
blockReduceSumV2<float, Q_ITEMS_PER_THREAD>(local_sum);
}
if (threadIdx.x == 0) {
#pragma unroll
for (int j = 0; j < Q_ITEMS_PER_THREAD; j++) {
s_sum[j] = __fdividef(1.0f, local_sum[j] + 1e-6f);
}
}
__syncthreads();
for (int i = 0; blockDim.x * i + threadIdx.x < k_length / 2 && i < K_ITEMS_PER_THREAD; ++i) {
#pragma unroll
for (int j = 0; j < Q_ITEMS; j++) {
qk_offset[j] = ((bi * head_num + hi) * q_length + qi + j * gridDim.x) * (k_length / 2) + blockDim.x * i
+ threadIdx.x;
}
#pragma unroll
for (int j = 0; j < Q_ITEMS; j++) {
attn_score_h2[qk_offset[j]] = hmul2<T2>(data[j][i], cuda_cast<T2>(s_sum[j]));
}
}
}
}
#define LAUNCH_MAKSED_SOFTMAX_(T_, ITEMS_PER_THREAD) \
block.x /= ITEMS_PER_THREAD; \
block.x = (block.x + 31) / 32 * 32; \
assert(block.x <= 1024); \
if (is_half2) { \
if (grid.x % 4 == 0) { \
grid.x /= 4; \
hipLaunchKernelGGL(( softmax_kernel_h2_v2<T_, ITEMS_PER_THREAD, 4>) \
, dim3(grid), dim3(block), 0, stream, (T_*)param.attention_score, \
(const T_*)param.qk, \
(const T_*)param.attention_mask, \
(const T_*)param.linear_bias_slopes, \
param.batch_size, \
param.num_heads, \
param.q_length, \
param.k_length, \
(const T_)param.qk_scale); \
} \
else { \
hipLaunchKernelGGL(( softmax_kernel_h2<T_, ITEMS_PER_THREAD>), dim3(grid), dim3(block), 0, stream, (T_*)param.attention_score, \
(const T_*)param.qk, \
(const T_*)param.attention_mask, \
(const T_*)param.linear_bias_slopes, \
param.batch_size, \
param.num_heads, \
param.q_length, \
param.k_length, \
(const T_)param.qk_scale); \
} \
} \
else { \
hipLaunchKernelGGL(( softmax_kernel<T, T_IN, ITEMS_PER_THREAD>), dim3(grid), dim3(block), 0, stream, param.attention_score, \
param.qk, \
param.attention_mask, \
param.linear_bias_slopes, \
param.batch_size, \
param.num_heads, \
param.q_length, \
param.k_length, \
param.qk_scale); \
}
#define LAUNCH_MAKSED_SOFTMAX(ITEMS_PER_THREAD) LAUNCH_MAKSED_SOFTMAX_(half, ITEMS_PER_THREAD)
template<typename T, typename T_IN>
void invokeMaskedSoftmax(MaskedSoftmaxParam<T, T_IN>& param, hipStream_t stream)
{
// attention_score, (batch_size, head_num, q_length, k_length), softmax output.
// qk, (batch_size, head_num, q_length, k_length), QK^T.
// attention_mask, (batch_size, q_length, k_length), attention mask.
// linear_bias_slopes, (head_num,) the slopes of the linear position bias.
dim3 grid(param.q_length, param.batch_size, param.num_heads);
if (param.batch_size * param.num_heads > 360) {
grid.x = ceil(float(param.q_length) / 32.0f);
}
bool is_half2 = sizeof(T) == 2 && sizeof(T_IN) == 2 && param.k_length % 2 == 0;
dim3 block((param.k_length / (is_half2 ? 2 : 1) + 31) / 32 * 32);
if (block.x > 2048 && block.x <= 4096) {
LAUNCH_MAKSED_SOFTMAX(4)
}
else if (block.x > 1024) {
LAUNCH_MAKSED_SOFTMAX(2)
}
else if (block.x > 0) {
LAUNCH_MAKSED_SOFTMAX(1)
}
else {
FT_CHECK(param.k_length <= 4096);
}
}
template void invokeMaskedSoftmax(MaskedSoftmaxParam<float, float>& param, hipStream_t stream);
template void invokeMaskedSoftmax(MaskedSoftmaxParam<half, float>& param, hipStream_t stream);
template void invokeMaskedSoftmax(MaskedSoftmaxParam<half, half>& param, hipStream_t stream);
#ifdef ENABLE_BF16
template<>
void invokeMaskedSoftmax(MaskedSoftmaxParam<__nv_bfloat16, float>& param, hipStream_t stream)
{
// attention_score, (batch_size, head_num, q_length, k_length), softmax output.
// qk, (batch_size, head_num, q_length, k_length), QK^T.
// attention_mask, (batch_size, q_length, k_length), attention mask.
// linear_bias_slopes, (head_num,) the slopes of the linear position bias.
using T = __nv_bfloat16;
using T_IN = float;
dim3 grid(param.q_length, param.batch_size, param.num_heads);
if (param.batch_size * param.num_heads > 360) {
grid.x = ceil(float(param.q_length) / 32.0f);
}
bool is_half2 = sizeof(T) == 2 && sizeof(T_IN) == 2 && param.k_length % 2 == 0;
dim3 block((param.k_length / (is_half2 ? 2 : 1) + 31) / 32 * 32);
if (block.x > 2048 && block.x <= 4096) {
LAUNCH_MAKSED_SOFTMAX_(__nv_bfloat16, 4);
}
else if (block.x > 1024) {
LAUNCH_MAKSED_SOFTMAX_(__nv_bfloat16, 2);
}
else if (block.x > 0) {
LAUNCH_MAKSED_SOFTMAX_(__nv_bfloat16, 1);
}
else {
FT_CHECK(param.k_length <= 4096);
}
}
template<>
void invokeMaskedSoftmax(MaskedSoftmaxParam<__nv_bfloat16, __nv_bfloat16>& param, hipStream_t stream)
{
// attention_score, (batch_size, head_num, q_length, k_length), softmax output.
// qk, (batch_size, head_num, q_length, k_length), QK^T.
// attention_mask, (batch_size, q_length, k_length), attention mask.
// linear_bias_slopes, (head_num,) the slopes of the linear position bias.
using T = __nv_bfloat16;
using T_IN = __nv_bfloat16;
dim3 grid(param.q_length, param.batch_size, param.num_heads);
if (param.batch_size * param.num_heads > 360) {
grid.x = ceil(float(param.q_length) / 32.0f);
}
bool is_half2 = sizeof(T) == 2 && sizeof(T_IN) == 2 && param.k_length % 2 == 0;
dim3 block((param.k_length / (is_half2 ? 2 : 1) + 31) / 32 * 32);
if (block.x > 2048 && block.x <= 4096) {
LAUNCH_MAKSED_SOFTMAX_(__nv_bfloat16, 4);
}
else if (block.x > 1024) {
LAUNCH_MAKSED_SOFTMAX_(__nv_bfloat16, 2);
}
else if (block.x > 0) {
LAUNCH_MAKSED_SOFTMAX_(__nv_bfloat16, 1);
}
else {
FT_CHECK(param.k_length <= 4096);
}
}
#endif
#undef LAUNCH_MAKSED_SOFTMAX
#undef LAUNCH_MAKSED_SOFTMAX_
template<typename T>
__global__ void transpose(const T* src,
T* dst,
const int batch_size,
const int seq_len,
const int head_num,
const int size_per_head,
const float* scale,
int int8_mode)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int batch_id = tid / (head_num * seq_len * size_per_head);
int head_id = (tid % (head_num * seq_len * size_per_head)) / (seq_len * size_per_head);
int seq_id = (tid % (seq_len * size_per_head)) / size_per_head;
int id = tid % size_per_head;
int target_id = target_index(batch_id, head_id, seq_id, id, batch_size, head_num, seq_len, size_per_head);
if (int8_mode == 2) {
using Int8_Packed_T = typename packed_as<int8_t, num_elems<T>::value>::type;
using Float_Packed_T = typename packed_as<float, num_elems<T>::value>::type;
const Float_Packed_T scale_val = cuda_cast<Float_Packed_T>(*scale);
reinterpret_cast<Int8_Packed_T*>(dst)[target_id] =
cuda_cast<Int8_Packed_T>(cuda_cast<Float_Packed_T>(src[tid]) * scale_val);
}
else {
dst[target_id] = src[tid];
}
}
template<>
__global__ void transpose(const float* src,
float* dst,
const int batch_size,
const int seq_len,
const int head_num,
const int size_per_head,
const float* scale,
int int8_mode)
{
int batch_id = blockIdx.x / (head_num * seq_len);
int seq_id = blockIdx.x % seq_len;
int head_id = (blockIdx.x % (head_num * seq_len)) / seq_len;
const int target_id = batch_id * (head_num * seq_len * size_per_head) + seq_id * head_num * size_per_head
+ head_id * size_per_head + threadIdx.x;
const int src_id = blockIdx.x * size_per_head + threadIdx.x;
if (int8_mode == 2) {
const float scale_val = *scale;
reinterpret_cast<int8_t*>(dst)[target_id] = cuda_cast<int8_t>(src[src_id] * scale_val);
}
else {
dst[target_id] = src[src_id];
}
}
template<typename T>
void invokeTransposeQKV(T* dst,
T* src,
const int batch_size,
const int seq_len,
const int head_num,
const int size_per_head,
const float* scale,
const int int8_mode,
hipStream_t stream)
{
dim3 grid, block;
if (sizeof(T) == 2) {
int seq_per_block = 1;
grid.x = batch_size * head_num * seq_len / seq_per_block;
while (seq_per_block < 4 && grid.x % 2 == 0) {
grid.x /= 2;
seq_per_block *= 2;
}
FT_CHECK(grid.x * seq_per_block == (size_t)batch_size * head_num * seq_len);
if (seq_per_block * size_per_head % 2 == 0) {
block.x = seq_per_block * size_per_head / 2;
if (std::is_same<T, half>::value) {
hipLaunchKernelGGL(( transpose<half2>), dim3(grid), dim3(block), 0, stream,
(half2*)src, (half2*)dst, batch_size, seq_len, head_num, size_per_head / 2, scale, int8_mode);
}
#ifdef ENABLE_BF16
else {
hipLaunchKernelGGL(( transpose<__nv_bfloat162>), dim3(grid), dim3(block), 0, stream, (__nv_bfloat162*)src,
(__nv_bfloat162*)dst,
batch_size,
seq_len,
head_num,
size_per_head / 2,
scale,
int8_mode);
}
#endif
}
else {
block.x = seq_per_block * size_per_head;
hipLaunchKernelGGL(( transpose<T>)
, dim3(grid), dim3(block), 0, stream, src, dst, batch_size, seq_len, head_num, size_per_head, scale, int8_mode);
}
}
else {
const int seq_per_block = 1;
grid.x = batch_size * head_num * seq_len / seq_per_block;
block.x = seq_per_block * size_per_head;
hipLaunchKernelGGL(( transpose<T>)
, dim3(grid), dim3(block), 0, stream, src, dst, batch_size, seq_len, head_num, size_per_head, scale, int8_mode);
}
}
#define INSTANTIATETRANSPOSEQKV(T) \
template void invokeTransposeQKV(T* src, \
T* dst, \
const int batch_size, \
const int seq_len, \
const int head_num, \
const int size_per_head, \
const float* scale, \
const int int8_mode, \
hipStream_t stream)
INSTANTIATETRANSPOSEQKV(float);
INSTANTIATETRANSPOSEQKV(half);
#ifdef ENABLE_BF16
INSTANTIATETRANSPOSEQKV(__nv_bfloat16);
#endif
#undef INSTANTIATETRANSPOSEQKV
template<typename T>
__global__ void add_QKV_bias_rebuild_padding_ia3(const T* Q,
const T* bias_Q,
const T* K,
const T* bias_K,
const T* V,
const T* bias_V,
T* q_buf_,
T* k_buf_,
T* v_buf_,
const int* ia3_tasks,
const T* ia3_key_weights,
const T* ia3_value_weights,
const int batch_size,
const int seq_len,
const int head_num,
const int size_per_head,
const int* mask_offset)
{
const int bid = blockIdx.x;
const int tgt_batch_id = (bid + mask_offset[bid]) / seq_len;
const int tgt_seq_id = (bid + mask_offset[bid]) % seq_len;
const int n = head_num * size_per_head;
const bool use_ia3 = ia3_tasks != nullptr;
const int ia3_task = use_ia3 ? ia3_tasks[tgt_batch_id] : 0;
const bool use_ia3_key = use_ia3 && (ia3_key_weights != nullptr);
const bool use_ia3_value = use_ia3 && (ia3_value_weights != nullptr);
for (int idx = threadIdx.x; idx < n; idx += blockDim.x) {
const int tgt_head_id = idx / size_per_head;
const int tgt_hidden_id = idx % size_per_head;
const int src_id = bid * n + idx;
const int tgt_id = tgt_batch_id * head_num * seq_len * size_per_head + tgt_head_id * seq_len * size_per_head
+ tgt_seq_id * size_per_head + tgt_hidden_id;
q_buf_[tgt_id] = add(ldg(&Q[src_id]), ldg(&bias_Q[idx]));
T k = ldg(&K[src_id]);
if (use_ia3_key) {
k = k * ia3_key_weights[ia3_task * n + idx];
}
k_buf_[tgt_id] = add(k, ldg(&bias_K[idx]));
T v = ldg(&V[src_id]);
if (use_ia3_value) {
v = v * ia3_value_weights[ia3_task * n + idx];
}
v_buf_[tgt_id] = add(v, ldg(&bias_V[idx]));
}
}
template<typename T>
__global__ void rebuild_padding_ia3(const T* Q,
const T* K,
const T* V,
T* q_buf_,
T* k_buf_,
T* v_buf_,
const int* ia3_tasks,
const T* ia3_key_weights,
const T* ia3_value_weights,
const int batch_size,
const int seq_len,
const int head_num,
const int size_per_head,
const int* mask_offset)
{
const int bid = blockIdx.x;
const int tgt_batch_id = (bid + mask_offset[bid]) / seq_len;
const int tgt_seq_id = (bid + mask_offset[bid]) % seq_len;
const int n = head_num * size_per_head;
const bool use_ia3 = ia3_tasks != nullptr;
const int ia3_task = use_ia3 ? ia3_tasks[tgt_batch_id] : 0;
const bool use_ia3_key = use_ia3 && (ia3_key_weights != nullptr);
const bool use_ia3_value = use_ia3 && (ia3_value_weights != nullptr);
for (int idx = threadIdx.x; idx < n; idx += blockDim.x) {
const int tgt_head_id = idx / size_per_head;
const int tgt_hidden_id = idx % size_per_head;
const int src_id = bid * n + idx;
const int tgt_id = tgt_batch_id * head_num * seq_len * size_per_head + tgt_head_id * seq_len * size_per_head
+ tgt_seq_id * size_per_head + tgt_hidden_id;
q_buf_[tgt_id] = ldg(&Q[src_id]);
T k = ldg(&K[src_id]);
if (use_ia3_key) {
k = k * ia3_key_weights[ia3_task * n + idx];
}
k_buf_[tgt_id] = k;
T v = ldg(&V[src_id]);
if (use_ia3_value) {
v = v * ia3_value_weights[ia3_task * n + idx];
}
v_buf_[tgt_id] = v;
}
}
template<typename T>
void invokeAddQKVBiasIA3RebuildPadding(T* Q,
const T* bias_Q,
T* K,
const T* bias_K,
T* V,
const T* bias_V,
T* q_buf,
T* k_buf,
T* v_buf,
const int batch_size,
const int seq_len,
const int head_num,
const int size_per_head,
const int valid_word_num,
const int* mask_offset,
const int* ia3_tasks,
const T* ia3_key_weights,
const T* ia3_value_weights,
hipStream_t stream)
{
#ifdef ENABLE_BF16
bool is_half2 = (std::is_same<T, half>::value || std::is_same<T, __nv_bfloat16>::value) && (size_per_head % 2 == 0);
#else
bool is_half2 = (std::is_same<T, half>::value) && (size_per_head % 2 == 0);
#endif
using T2 = typename TypeConverter<T>::Type; // fp16 to half2, bf16 to bf162
int block_size = head_num * size_per_head;
if (is_half2) {
while (block_size > 512) {
if (block_size % 2 == 0) {
block_size /= 2;
}
else {
is_half2 = false;
block_size = ::min(block_size, 512);
break;
}
}
}
else {
block_size = ::min(block_size, 512);
}
if (bias_Q == nullptr && bias_K == nullptr && bias_V == nullptr) {
if (is_half2) {
hipLaunchKernelGGL(( rebuild_padding_ia3), dim3(valid_word_num), dim3(block_size), 0, stream, (T2*)Q,
(T2*)K,
(T2*)V,
(T2*)q_buf,
(T2*)k_buf,
(T2*)v_buf,
ia3_tasks,
(const T2*)ia3_key_weights,
(const T2*)ia3_value_weights,
batch_size,
seq_len,
head_num,
size_per_head / 2,
mask_offset);
}
else {
hipLaunchKernelGGL(( rebuild_padding_ia3), dim3(valid_word_num), dim3(block_size), 0, stream, Q,
K,
V,
q_buf,
k_buf,
v_buf,
ia3_tasks,
ia3_key_weights,
ia3_value_weights,
batch_size,
seq_len,
head_num,
size_per_head,
mask_offset);
}
}
else if (bias_Q != nullptr && bias_K != nullptr && bias_V != nullptr) {
if (is_half2) {
hipLaunchKernelGGL(( add_QKV_bias_rebuild_padding_ia3), dim3(valid_word_num), dim3(block_size), 0, stream, (T2*)Q,
(const T2*)bias_Q,
(T2*)K,
(const T2*)bias_K,
(T2*)V,
(const T2*)bias_V,
(T2*)q_buf,
(T2*)k_buf,
(T2*)v_buf,
ia3_tasks,
(const T2*)ia3_key_weights,
(const T2*)ia3_value_weights,
batch_size,
seq_len,
head_num,
size_per_head / 2,
mask_offset);
}
else {
hipLaunchKernelGGL(( add_QKV_bias_rebuild_padding_ia3), dim3(valid_word_num), dim3(block_size), 0, stream, Q,
bias_Q,
K,
bias_K,
V,
bias_V,
q_buf,
k_buf,
v_buf,
ia3_tasks,
ia3_key_weights,
ia3_value_weights,
batch_size,
seq_len,
head_num,
size_per_head,
mask_offset);
}
}
else {
FT_CHECK(false);
}
}
#define INSTANTIATEADDQKVBIASIA3REBUILDPADDING(T) \
template void invokeAddQKVBiasIA3RebuildPadding(T* Q, \
const T* bias_Q, \
T* K, \
const T* bias_K, \
T* V, \
const T* bias_V, \
T* q_buf, \
T* k_buf, \
T* v_buf, \
const int batch_size, \
const int seq_len, \
const int head_num, \
const int size_per_head, \
const int valid_word_num, \
const int* mask_offset, \
const int* ia3_tasks, \
const T* ia3_key_weights, \
const T* ia3_value_weights, \
hipStream_t stream)
INSTANTIATEADDQKVBIASIA3REBUILDPADDING(float);
INSTANTIATEADDQKVBIASIA3REBUILDPADDING(half);
#ifdef ENABLE_BF16
INSTANTIATEADDQKVBIASIA3REBUILDPADDING(__nv_bfloat16);
#endif
#undef INSTANTIATEADDQKVBIASREBUILDPADDING
template<typename T>
__global__ void transpose_remove_padding(const T* src,
T* dst,
const int batch_size,
const int seq_len,
const int head_num,
const int size_per_head,
const int* mask_offset,
const float* scale,
const int int8_mode)
{
// TODO: optimize this kernel?
// do remove_sequence_length_padding
const int bid = blockIdx.x; // batch * seq_len or valid_word_num
const int src_batch_id = (bid + mask_offset[bid]) / seq_len;
const int src_seq_id = (bid + mask_offset[bid]) % seq_len;
const int dst_seq_id = bid;
const int src_offset_base = src_batch_id * seq_len * head_num * size_per_head + src_seq_id * size_per_head;
const int dst_offset_base = dst_seq_id * head_num * size_per_head;
using Int8_Packed_T = typename packed_as<int8_t, num_elems<T>::value>::type;
using Float_Packed_T = typename packed_as<float, num_elems<T>::value>::type;
const Float_Packed_T scale_val =
int8_mode == 2 ? cuda_cast<Float_Packed_T>(*scale) : cuda_cast<Float_Packed_T>(0.0f);
for (int idx = threadIdx.x; idx < head_num * size_per_head; idx += blockDim.x) {
const int head_id = idx / size_per_head;
const int hidden_id = idx % size_per_head;
const T src_elem = ldg(&src[src_offset_base + head_id * seq_len * size_per_head + hidden_id]);
if (int8_mode == 2) {
reinterpret_cast<Int8_Packed_T*>(dst)[dst_offset_base + idx] =
cuda_cast<Int8_Packed_T>(cuda_cast<Float_Packed_T>(src_elem) * scale_val);
}
else {
dst[dst_offset_base + idx] = src_elem;
}
}
}
// clang-format off
template<typename T>
void invokeTransposeAttentionOutRemovePadding(T* src,
T* dst,
const int valid_word_num,
const int batch_size,
const int seq_len,
const int head_num,
const int size_per_head,
const int* mask_offset,
const float* scale,
const int int8_mode,
hipStream_t stream)
{
#ifdef ENABLE_BF16
bool is_half2 = (std::is_same<T, half>::value || std::is_same<T, __nv_bfloat16>::value) && (size_per_head % 2 == 0);
#else
bool is_half2 = (std::is_same<T, half>::value) && (size_per_head % 2 == 0);
#endif
using T2 = typename TypeConverter<T>::Type; // fp16 to half2, bf16 to bf162
int block_size = head_num * size_per_head;
if (is_half2) {
while (block_size > 512) {
if (block_size % 2 == 0) {
block_size /= 2;
}
else {
is_half2 = false;
block_size = ::min(block_size, 1024);
break;
}
}
}
else {
block_size = ::min(block_size, 1024);
}
if (is_half2) {
hipLaunchKernelGGL(( transpose_remove_padding<T2>), dim3(valid_word_num), dim3(block_size), 0, stream,
(T2*)src, (T2*)dst, batch_size, seq_len, head_num, size_per_head / 2, mask_offset, scale, int8_mode);
}
else {
hipLaunchKernelGGL(( transpose_remove_padding), dim3(valid_word_num), dim3(block_size), 0, stream,
src, dst, batch_size, seq_len, head_num, size_per_head, mask_offset, scale, int8_mode);
}
}
// clang-format on
#define INSTANTIATETRANSPOSEATTENTIONOUTREMOVEPADDING(T) \
template void invokeTransposeAttentionOutRemovePadding(T* src, \
T* dst, \
const int valid_word_num, \
const int batch_size, \
const int seq_len, \
const int head_num, \
const int size_per_head, \
const int* mask_offset, \
const float* scale, \
const int int8_mode, \
hipStream_t stream)
INSTANTIATETRANSPOSEATTENTIONOUTREMOVEPADDING(float);
INSTANTIATETRANSPOSEATTENTIONOUTREMOVEPADDING(half);
#ifdef ENABLE_BF16
INSTANTIATETRANSPOSEATTENTIONOUTREMOVEPADDING(__nv_bfloat16);
#endif
#undef INSTANTIATETRANSPOSEATTENTIONOUTREMOVEPADDING
template<typename T>
__global__ void add_fusedQKV_bias_transpose_kernel(T* q_buf,
T* k_buf,
T* v_buf,
T* QKV,
const T* __restrict qkv_bias,
const int* padding_offset,
const int batch_size,
const int seq_len,
const int token_num,
const int head_num,
const int size_per_head,
const float* scale,
const int int8_mode)
{
// QKV: [token_num, 3, n]
// qkv_bias: [3, n]
// q_buf, k_buf, v_buf: [batch, head_num, seq_len, size_per_head]
T* qkv_ptr[3] = {q_buf, k_buf, v_buf};
const int n = head_num * size_per_head;
for (int index = blockDim.x * blockIdx.x + threadIdx.x; index < token_num * 3 * n;
index += gridDim.x * blockDim.x) {
const int bias_id = index % (3 * n);
const int token_idx = index / (3 * n);
const int token_padded_idx = token_idx + (padding_offset == nullptr ? 0 : padding_offset[token_idx]);
const int target_batch_id = token_padded_idx / seq_len;
const int seq_id = token_padded_idx % seq_len;
const int qkv_id = (index % (3 * n)) / n;
const int head_id = (index % n) / size_per_head;
const int size_id = index % size_per_head;
T val;
if (int8_mode == 2) {
val = cuda_cast<T>(cuda_cast<float>(reinterpret_cast<const int8_t*>(QKV)[index]) * scale[qkv_id]);
}
else {
val = ldg(&QKV[index]);
}
val = val + ldg(&qkv_bias[bias_id]);
if (int8_mode == 2) {
// TODO(mseznec): add support for int8 BMM with FusedAtt
}
else {
QKV[index] = val;
}
qkv_ptr[qkv_id][target_batch_id * head_num * seq_len * size_per_head + head_id * seq_len * size_per_head
+ seq_id * size_per_head + size_id] = val;
}
}
template<typename T>
struct Vec_t {
static constexpr int size = 0;
};
template<>
struct Vec_t<float> {
using Type = float2;
static constexpr int size = 2;
};
template<>
struct Vec_t<half> {
using Type = uint32_t;
static constexpr int size = 2;
};
#ifdef ENABLE_BF16
template<>
struct Vec_t<__nv_bfloat16> {
using Type = __nv_bfloat162;
static constexpr int size = 2;
};
#endif
template<typename T, bool PREFIX_PROMPT>
__global__ void add_fusedQKV_bias_transpose_kernel(T* q_buf,
T* k_buf,
T* v_buf,
PrefixPromptBatchWeightsParam<T> param,
T* QKV,
const T* __restrict qkv_bias,
const int* padding_offset,
const int batch_size,
const int seq_len,
const int head_num,
const int size_per_head,
const int rotary_embedding_dim,
const bool neox_rotary_style)
{
// This kernel add bias to QKV, which has shape [batch_size, seq_len, 3, head_num, size_per_head], and
// QKV split to 3 split buffer q, k, v and transpose them to [batch_size, head_num, seq_len, size_per_head].
// For q and k, also apply the rotary embedding.
// When we pass prefix prompt, this kernel also concatenate the prefix prompt and key/value along
// seq_len dimension like [prompt, key/value].
// So, the final shape of q is same ([batch_size, head_num, seq_len, size_per_head]), but
// the shapes of key and values become [batch_size, head_num, max_prefix_prompt_length + seq_len, size_per_head].
// NOTE: QKV src shape (batch_size, seq_len, 3, head_num, size_per_head)
// QKV dst shape (3, batch_size, head_num, seq_len, size_per_head)
extern __shared__ __align__(sizeof(float2)) char smem_[]; // align on largest vector type
constexpr int vec_size = Vec_t<T>::size;
using Vec_t = typename Vec_t<T>::Type;
const int token_idx = blockIdx.x - batch_size * param.max_prefix_prompt_length;
const int token_padding_offset = (padding_offset == nullptr || token_idx < 0) ? 0 : padding_offset[token_idx];
const int tgt_token_idx = token_idx + token_padding_offset;
const int batch_idx = tgt_token_idx / seq_len;
const int seq_idx = tgt_token_idx % seq_len;
const int head_idx = blockIdx.y;
const int tidx = threadIdx.x;
const int total_seq_len = param.max_prefix_prompt_length + seq_len;
const bool is_masked = tidx * vec_size >= size_per_head;
// NOTE: blockIdx.x < batch_size * param.max_prefix_prompt_length really handles prefix prompts
if (PREFIX_PROMPT && token_idx < 0) {
const int prompt_batch_idx = blockIdx.x / param.max_prefix_prompt_length;
const int prompt_seq_idx = blockIdx.x % param.max_prefix_prompt_length;
const int prompt_length = param.d_prefix_prompt_lengths[prompt_batch_idx];
if (prompt_seq_idx < prompt_length) {
const int dest_kv_idx = prompt_batch_idx * size_per_head * total_seq_len * head_num
+ head_idx * size_per_head * total_seq_len + prompt_seq_idx * size_per_head
+ tidx * vec_size;
const int prefix_kv_idx =
size_per_head * prompt_length * head_idx + size_per_head * prompt_seq_idx + tidx * vec_size;
const T* prefix_prompt_k = param.d_prefix_prompt_batch[prompt_batch_idx]
+ param.prefix_prompt_layer_offset_per_seq * prompt_length;
const T* prefix_prompt_v = prefix_prompt_k + prompt_length * head_num * size_per_head;
if (!is_masked) {
*reinterpret_cast<Vec_t*>(&k_buf[dest_kv_idx]) =
*reinterpret_cast<const Vec_t*>(&prefix_prompt_k[prefix_kv_idx]);
*reinterpret_cast<Vec_t*>(&v_buf[dest_kv_idx]) =
*reinterpret_cast<const Vec_t*>(&prefix_prompt_v[prefix_kv_idx]);
}
}
return;
}
const int prefix_prompt_length = PREFIX_PROMPT ? param.d_prefix_prompt_lengths[batch_idx] : 0;
const int hidden_idx = head_idx * size_per_head + tidx * vec_size;
const int n = head_num * size_per_head;
// the [0..seq_len) indices really handle KV [max_pp_len..seq_len+max_pp_len)
// and Q [0..seq_len)
// Note: if !PREFIX_PROMPT, max_pp_len = 0, so it's no-op
const int dst_kv_seq_idx = seq_idx + prefix_prompt_length;
// NOTE: q has seq len excluding prefix prompt
// src QKV: [batch, time, 3, head, hidden]
const int src_q_idx = token_idx * 3 * n + hidden_idx;
const int src_k_idx = token_idx * 3 * n + hidden_idx + n;
const int src_v_idx = token_idx * 3 * n + hidden_idx + 2 * n;
Vec_t q, k, v;
Vec_t q_bias, k_bias, v_bias;
if (!is_masked) {
q = *reinterpret_cast<const Vec_t*>(&QKV[src_q_idx]);
k = *reinterpret_cast<const Vec_t*>(&QKV[src_k_idx]);
v = *reinterpret_cast<const Vec_t*>(&QKV[src_v_idx]);
q_bias = *reinterpret_cast<const Vec_t*>(&qkv_bias[hidden_idx]);
k_bias = *reinterpret_cast<const Vec_t*>(&qkv_bias[hidden_idx + n]);
v_bias = *reinterpret_cast<const Vec_t*>(&qkv_bias[hidden_idx + 2 * n]);
}
q = mmha::add(q, q_bias);
k = mmha::add(k, k_bias);
v = mmha::add(v, v_bias);
if (!neox_rotary_style) {
mmha::apply_rotary_embedding(q, k, tidx, rotary_embedding_dim, dst_kv_seq_idx);
}
else {
const bool do_rotary = !is_masked && vec_size * tidx < rotary_embedding_dim;
T* q_smem = reinterpret_cast<T*>(smem_);
T* k_smem = q_smem + rotary_embedding_dim;
const int half_rotary_dim = rotary_embedding_dim / 2;
const int half_idx = (tidx * vec_size) / half_rotary_dim;
const int intra_half_idx = (tidx * vec_size) % half_rotary_dim;
const int smem_pitch = half_rotary_dim; // TODO: adjust for bank conflicts?
if (do_rotary) {
*reinterpret_cast<Vec_t*>(q_smem + half_idx * smem_pitch + intra_half_idx) = q;
*reinterpret_cast<Vec_t*>(k_smem + half_idx * smem_pitch + intra_half_idx) = k;
}
__syncthreads();
const int transpose_idx = half_idx * (half_rotary_dim / 2) + intra_half_idx / 2;
constexpr int tidx_factor = vec_size / 2;
if (do_rotary) {
mmha::vec_from_smem_transpose(q, q_smem, transpose_idx, smem_pitch);
mmha::vec_from_smem_transpose(k, k_smem, transpose_idx, smem_pitch);
mmha::apply_rotary_embedding(q, k, transpose_idx / tidx_factor, rotary_embedding_dim, dst_kv_seq_idx);
mmha::write_smem_transpose(q, q_smem, transpose_idx, smem_pitch);
mmha::write_smem_transpose(k, k_smem, transpose_idx, smem_pitch);
}
__syncthreads();
if (do_rotary) {
q = *reinterpret_cast<Vec_t*>(q_smem + half_idx * smem_pitch + intra_half_idx);
k = *reinterpret_cast<Vec_t*>(k_smem + half_idx * smem_pitch + intra_half_idx);
}
}
if (!is_masked) {
*reinterpret_cast<Vec_t*>(&QKV[src_q_idx]) = q;
*reinterpret_cast<Vec_t*>(&QKV[src_k_idx]) = k;
*reinterpret_cast<Vec_t*>(&QKV[src_v_idx]) = v;
}
const int dest_q_idx = batch_idx * size_per_head * seq_len * head_num + head_idx * size_per_head * seq_len
+ seq_idx * size_per_head + tidx * vec_size;
const int dest_kv_idx = batch_idx * size_per_head * total_seq_len * head_num
+ head_idx * size_per_head * total_seq_len + dst_kv_seq_idx * size_per_head
+ tidx * vec_size;
if (!is_masked) {
*reinterpret_cast<Vec_t*>(&q_buf[dest_q_idx]) = q;
*reinterpret_cast<Vec_t*>(&k_buf[dest_kv_idx]) = k;
*reinterpret_cast<Vec_t*>(&v_buf[dest_kv_idx]) = v;
}
}
#define FUSED_QKV_BIAS_TRANSPOSE_LAUNCH(T, PREFIX_PROMPT) \
hipLaunchKernelGGL(( add_fusedQKV_bias_transpose_kernel<T, PREFIX_PROMPT>), dim3(grid), dim3(block), smem_size, stream, q_buf, \
k_buf, \
v_buf, \
param, \
QKV, \
qkv_bias, \
padding_offset, \
batch_size, \
seq_len, \
head_num, \
size_per_head, \
rotary_embedding_dim, \
neox_rotary_style);
template<typename T>
void invokeAddFusedQKVBiasTranspose(T* q_buf,
T* k_buf,
T* v_buf,
PrefixPromptBatchWeightsParam<T> param,
T* QKV,
const T* qkv_bias,
const int* padding_offset,
const int batch_size,
const int seq_len,
const int token_num,
const int head_num,
const int size_per_head,
const int rotary_embedding_dim,
const int neox_rotary_style,
const float* scale,
const int int8_mode,
hipStream_t stream)
{
// [bs, seq_len, 3, head, Dh]
if (rotary_embedding_dim == 0 && param.max_prefix_prompt_length == 0) {
const int m = token_num;
const int n = head_num * size_per_head;
dim3 block(384);
dim3 grid((int)(ceil(1.0 * m * n / 384)));
hipLaunchKernelGGL(( add_fusedQKV_bias_transpose_kernel), dim3(grid), dim3(block), 0, stream, q_buf,
k_buf,
v_buf,
QKV,
qkv_bias,
padding_offset,
batch_size,
seq_len,
token_num,
head_num,
size_per_head,
scale,
int8_mode);
}
else {
FT_CHECK_WITH_INFO(int8_mode != 2, "w8a8 not yet implemented with prefix prompt"); // TODO(mseznec)
// To implement rotary embeddings, each thread processes two QKV elems:
dim3 block((size_per_head / Vec_t<T>::size + 31) / 32 * 32);
dim3 grid(token_num + batch_size * param.max_prefix_prompt_length, head_num);
size_t smem_size = neox_rotary_style ? 2 * rotary_embedding_dim * sizeof(T) : 0;
// NOTE: add offset for rotary embedding
// hipLaunchKernelGGL(( add_fusedQKV_bias_transpose_kernel), dim3(grid), dim3(block), 0, stream,
// q_buf, k_buf, v_buf, param, QKV, qkv_bias, batch_size, seq_len, head_num, size_per_head,
// rotary_embedding_dim);
if (param.max_prefix_prompt_length == 0) {
FUSED_QKV_BIAS_TRANSPOSE_LAUNCH(T, false);
}
else {
FUSED_QKV_BIAS_TRANSPOSE_LAUNCH(T, true);
}
}
}
#define INSTANTIATEADDFUSEDQKVBIASTRANSPOSE(T) \
template void invokeAddFusedQKVBiasTranspose(T* q_buf, \
T* k_buf, \
T* v_buf, \
PrefixPromptBatchWeightsParam<T> param, \
T* QKV, \
const T* qkv_bias, \
const int* padding_offset, \
const int batch_size, \
const int seq_len, \
const int token_num, \
const int head_num, \
const int size_per_head, \
const int rotary_embedding_dim, \
const int neox_rotary_style, \
const float* scale, \
const int int8_mode, \
hipStream_t stream)
INSTANTIATEADDFUSEDQKVBIASTRANSPOSE(float);
INSTANTIATEADDFUSEDQKVBIASTRANSPOSE(half);
#ifdef ENABLE_BF16
INSTANTIATEADDFUSEDQKVBIASTRANSPOSE(__nv_bfloat16);
#endif
#undef INSTANTIATEADDFUSEDQKVBIASTRANSPOSE
template<typename T>
__global__ void transpose_4d(T* dst,
T* src,
const int dim0,
const int dim1,
const int dim2,
const int dim3,
const int dim0_leading_dim,
const int ite)
{
// transpose from [dim0, dim1, dim2, dim3] to [dim2, X, dim1, dim3]
// where the dimension of X is dim0_leading_dim, and offset is ite * dim0
for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < dim0 * dim1 * dim2 * dim3; i += blockDim.x * gridDim.x) {
int index = i;
const int d3 = index % dim3;
index = (index - d3) / dim3;
const int d2 = index % dim2;
index = (index - d2) / dim2;
const int d1 = index % dim1;
index = (index - d1) / dim1;
const int d0 = index % dim0;
index = (index - d0) / dim0;
dst[d2 * dim0_leading_dim * dim1 * dim3 + (d0 + dim0 * ite) * dim1 * dim3 + d1 * dim3 + d3] = src[i];
}
}
template<>
__global__ void transpose_4d(half* dst,
half* src,
const int dim0,
const int dim1,
const int dim2,
const int dim3,
const int dim0_leading_dim,
const int ite)
{
half2* dst_ptr = (half2*)dst;
half2* src_ptr = (half2*)src;
const int half_dim3 = dim3 / 2;
// transpose from [dim0, dim1, dim2, half_dim3] to [dim2, dim0, dim1, half_dim3]
// where the dimension of X is dim0_leading_dim, and offset is ite * dim0
for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < dim0 * dim1 * dim2 * half_dim3;
i += blockDim.x * gridDim.x) {
int index = i;
const int d3 = index % half_dim3;
index = (index - d3) / half_dim3;
const int d2 = index % dim2;
index = (index - d2) / dim2;
const int d1 = index % dim1;
index = (index - d1) / dim1;
const int d0 = index % dim0;
index = (index - d0) / dim0;
dst_ptr[d2 * dim0_leading_dim * dim1 * half_dim3 + (d0 + dim0 * ite) * dim1 * half_dim3 + d1 * half_dim3 + d3] =
src_ptr[i];
}
}
template<typename T>
void invokeTranspose4d(T* dst,
T* src,
const int local_batch_size,
const int seq_len,
const int size_per_head,
const int local_hidden_units,
const int local_head_num,
const int batch_size,
const int ite,
hipStream_t stream)
{
hipLaunchKernelGGL(( transpose_4d), dim3(local_batch_size * seq_len * local_hidden_units / 512), dim3(512 / (4 / (sizeof(T)))), 0, stream,
dst, src, local_batch_size, local_head_num, seq_len, size_per_head, batch_size, ite);
}
#define INSTANTIATETRANSPOSE4D(T) \
template void invokeTranspose4d(T* dst, \
T* src, \
const int local_batch_size, \
const int seq_len, \
const int size_per_head, \
const int local_hidden_units, \
const int local_head_num, \
const int batch_size, \
const int ite, \
hipStream_t stream)
INSTANTIATETRANSPOSE4D(float);
INSTANTIATETRANSPOSE4D(half);
#undef INSTANTIATETRANSPOSE4D
template<typename T>
__global__ void transpose_4d_batch_major_k_cache(
T* k_dst, const T* k_src, const int head_num, const int size_per_head, const int seq_len, const int max_seq_len)
{
const int batch_id = blockIdx.y;
const int head_id = blockIdx.z;
constexpr int X_ELEMS = (sizeof(T) == 4) ? 4 : 8;
auto key_src = reinterpret_cast<const uint4*>(k_src + batch_id * head_num * size_per_head * seq_len
+ head_id * size_per_head * seq_len);
auto key_dst = reinterpret_cast<uint4*>(k_dst + batch_id * head_num * size_per_head * max_seq_len
+ head_id * size_per_head * max_seq_len);
const int out_idx = blockIdx.x * blockDim.x + threadIdx.x;
int size_per_head_div_x = size_per_head / X_ELEMS;
if (out_idx >= size_per_head_div_x * max_seq_len) {
return;
}
int idx = out_idx;
const int k_seq_len_id = idx % max_seq_len;
idx = (idx - k_seq_len_id) / max_seq_len;
const int k_head_size_id = idx % size_per_head_div_x;
if (k_seq_len_id < seq_len) {
key_dst[out_idx] = key_src[k_seq_len_id * size_per_head_div_x + k_head_size_id];
}
}
template<typename T>
__global__ void transpose_4d_batch_major_v_cache(
T* v_dst, const T* v_src, const int head_num, const int size_per_head, const int seq_len, const int max_seq_len)
{
const int batch_id = blockIdx.y;
const int head_id = blockIdx.z;
// 16 byte loads will handle "x" dimension
auto val_src = reinterpret_cast<const uint4*>(v_src + batch_id * head_num * size_per_head * seq_len
+ head_id * size_per_head * seq_len);
auto val_dst = reinterpret_cast<uint4*>(v_dst + batch_id * head_num * size_per_head * max_seq_len
+ head_id * size_per_head * max_seq_len);
// idx is over output dimension L * size_per_head / x for values
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
constexpr int X_ELEMS = (sizeof(T) == 4) ? 4 : 8;
const int size_per_head_div_x = size_per_head / X_ELEMS;
if (idx >= size_per_head_div_x * seq_len) {
return;
}
val_dst[idx] = val_src[idx];
}
template<typename T>
void invokeTranspose4dBatchMajor(T* k_dst,
T* v_dst,
const T* k_src,
const T* v_src,
const int local_batch_size,
const int seq_len,
const int max_seq_len,
const int size_per_head,
const int local_head_num,
hipStream_t stream)
{
constexpr int block_sz = 128;
constexpr int x = (sizeof(T) == 4) ? 4 : 8;
int size = max_seq_len * size_per_head / x;
dim3 grid((size + block_sz - 1) / block_sz, local_batch_size, local_head_num);
dim3 grid_v((seq_len * size_per_head / x + block_sz - 1) / block_sz, local_batch_size, local_head_num);
hipLaunchKernelGGL(( transpose_4d_batch_major_k_cache), dim3(grid), dim3(block_sz), 0, stream,
k_dst, k_src, local_head_num, size_per_head, seq_len, max_seq_len);
hipLaunchKernelGGL(( transpose_4d_batch_major_v_cache), dim3(grid_v), dim3(block_sz), 0, stream,
v_dst, v_src, local_head_num, size_per_head, seq_len, max_seq_len);
}
#define INSTANTIATETRANSPOSE4DBATCHMAJOR(T) \
template void invokeTranspose4dBatchMajor(T* k_dst, \
T* v_dst, \
const T* k_src, \
const T* v_src, \
const int local_batch_size, \
const int seq_len, \
const int max_seq_len, \
const int size_per_head, \
const int local_head_num, \
hipStream_t stream)
INSTANTIATETRANSPOSE4DBATCHMAJOR(float);
INSTANTIATETRANSPOSE4DBATCHMAJOR(half);
#ifdef ENABLE_BF16
INSTANTIATETRANSPOSE4DBATCHMAJOR(__nv_bfloat16);
#endif
#undef INSTANTIATETRANSPOSE4DBATCHMAJOR
template<typename T>
__global__ void addRelativeAttentionBias(
T* qk_buf, const T* relative_attention_bias, const int batch_size, const int head_num, const int seq_len)
{
for (int i = threadIdx.x; i < batch_size * seq_len; i += blockDim.x) {
int batch_id = i / seq_len;
int seq_id = i % seq_len;
const int bias_index = blockIdx.x * seq_len + seq_id;
const int qk_index = batch_id * gridDim.x * seq_len + bias_index;
qk_buf[qk_index] = add(qk_buf[qk_index], relative_attention_bias[bias_index]);
}
}
template<typename T>
void invokeAddRelativeAttentionBias(T* qk_buf,
const T* relative_attention_bias,
const int batch_size,
const int head_num,
const int seq_len,
hipStream_t stream)
{
// qk_buf: [batch_size, head_num, seq_len, seq_len]
// relative_attention_bias: [1, head_num, seq_len, seq_len]
dim3 grid(head_num * seq_len);
dim3 block(512);
using T2 = typename TypeConverter<T>::Type;
#ifdef ENABLE_BF16
const bool is_half2 = (std::is_same<T, half>::value || std::is_same<T, __nv_bfloat16>::value) && (seq_len % 2 == 0);
#else
const bool is_half2 = (std::is_same<T, half>::value) && (seq_len % 2 == 0);
#endif
if (is_half2) {
hipLaunchKernelGGL(( addRelativeAttentionBias<T2>), dim3(grid), dim3(block), 0, stream,
(T2*)qk_buf, (const T2*)relative_attention_bias, batch_size, head_num, seq_len / 2);
}
else {
hipLaunchKernelGGL(( addRelativeAttentionBias), dim3(grid), dim3(block), 0, stream,
qk_buf, relative_attention_bias, batch_size, head_num, seq_len);
}
}
#define INSTANTIATEADDRELATIVEATTENTIONBIAS(T) \
template void invokeAddRelativeAttentionBias(T* qk_buf, \
const T* relative_attention_bias, \
const int batch_size, \
const int head_num, \
const int seq_len, \
hipStream_t stream)
INSTANTIATEADDRELATIVEATTENTIONBIAS(float);
INSTANTIATEADDRELATIVEATTENTIONBIAS(half);
#ifdef ENABLE_BF16
INSTANTIATEADDRELATIVEATTENTIONBIAS(__nv_bfloat16);
#endif
#undef INSTANTIATEADDRELATIVEATTENTIONBIAS
/******************* invokeAddHead3SizeQKVBias ***********************/
// m = batch*window_num*window_len
// mm_qkv is [m, head*3*size_per_head] row-major
// bias_qkv is [head*3*size_per_head]
// q_buf_, k_buf_, v_buf_ is [batch*window_num, num_head, window_len, size_per_head] row-major
// grid(window_len, window_num, 3*batch);
// block(num_head * size_per_head)
template<typename T>
__global__ void add_head3Size_QKV_bias(const T* mm_qkv,
const T* bias_qkv,
T* q_buf_,
T* k_buf_,
T* v_buf_,
const int batch,
const int window_num,
const int window_len,
const int num_head,
const int size_per_head)
{
T* buf_ptr;
int qkv_id = blockIdx.z / batch;
if (qkv_id == 0) {
buf_ptr = q_buf_;
}
else if (qkv_id == 1) {
buf_ptr = k_buf_;
}
else {
buf_ptr = v_buf_;
}
const int batch_id = blockIdx.z % batch;
const int token_id = blockIdx.x;
const int window_id = blockIdx.y;
const int head_id = threadIdx.x / size_per_head;
const int id_in_head = threadIdx.x % size_per_head;
const int bias_idx = (head_id * 3 + qkv_id) * size_per_head + id_in_head;
const T bias = ldg(bias_qkv + bias_idx);
const int input_idx =
((batch_id * window_num + window_id) * window_len + token_id) * num_head * 3 * size_per_head + bias_idx;
T tmp = mm_qkv[input_idx] + bias;
int target_id = (((batch_id * window_num + window_id) * num_head + head_id) * window_len + token_id) * size_per_head
+ id_in_head;
;
buf_ptr[target_id] = tmp;
}
// for float2, size_per_head /= 2
// m = batch*window_num*window_len
// mm_qkv is [m, head*3*size_per_head] row-major
// bias_qkv is [head*3*size_per_head]
// q_buf_, k_buf_, v_buf_ is [batch*window_num, num_head, window_len, size_per_head] row-major
// grid(window_len, window_num, 3*batch);
// block(num_head * size_per_head)
template<>
__global__ void add_head3Size_QKV_bias(const float2* mm_qkv,
const float2* bias_qkv,
float2* q_buf_,
float2* k_buf_,
float2* v_buf_,
const int batch,
const int window_num,
const int window_len,
const int num_head,
const int size_per_head)
{
float2* buf_ptr;
int qkv_id = blockIdx.z / batch;
if (qkv_id == 0) {
buf_ptr = q_buf_;
}
else if (qkv_id == 1) {
buf_ptr = k_buf_;
}
else {
buf_ptr = v_buf_;
}
const int batch_id = blockIdx.z % batch;
const int token_id = blockIdx.x;
const int window_id = blockIdx.y;
const int head_id = threadIdx.x / size_per_head;
const int id_in_head = threadIdx.x % size_per_head;
const int bias_idx = (head_id * 3 + qkv_id) * size_per_head + id_in_head;
const float2 bias = ldg(bias_qkv + bias_idx);
const int input_idx =
((batch_id * window_num + window_id) * window_len + token_id) * num_head * 3 * size_per_head + bias_idx;
float2 tmp = mm_qkv[input_idx];
tmp.x += bias.x;
tmp.y += bias.y;
int target_id = (((batch_id * window_num + window_id) * num_head + head_id) * window_len + token_id) * size_per_head
+ id_in_head;
;
buf_ptr[target_id] = tmp;
}
// for half2, size_per_head /= 2
// m = batch*window_num*window_len
// mm_qkv is [m, head*3*size_per_head] row-major
// bias_qkv is [head*3*size_per_head]
// q_buf_, k_buf_, v_buf_ is [batch*window_num, num_head, window_len, size_per_head] row-major
// grid(window_len, window_num, batch);
// block(num_head * size_per_head)
template<>
__global__ void add_head3Size_QKV_bias(const half2* mm_qkv,
const half2* bias_qkv,
half2* q_buf_,
half2* k_buf_,
half2* v_buf_,
const int batch,
const int window_num,
const int window_len,
const int num_head,
const int size_per_head)
{
const int batch_id = blockIdx.z;
const int token_id = blockIdx.x;
const int window_id = blockIdx.y;
const int head_id = threadIdx.x / size_per_head;
const int id_in_head = threadIdx.x % size_per_head;
const int input_offset =
((batch_id * window_num + window_id) * window_len + token_id) * num_head * 3 * size_per_head;
const int target_id =
(((batch_id * window_num + window_id) * num_head + head_id) * window_len + token_id) * size_per_head
+ id_in_head;
int qkv_id = 0;
int bias_idx = (head_id * 3 + qkv_id) * size_per_head + id_in_head;
half2 bias = __ldg(bias_qkv + bias_idx);
int input_idx = input_offset + bias_idx;
half2 tmp = mm_qkv[input_idx];
tmp = __hadd2(tmp, bias);
q_buf_[target_id] = tmp;
qkv_id = 1;
bias_idx = (head_id * 3 + qkv_id) * size_per_head + id_in_head;
bias = __ldg(bias_qkv + bias_idx);
input_idx = input_offset + bias_idx;
tmp = mm_qkv[input_idx];
tmp = __hadd2(tmp, bias);
k_buf_[target_id] = tmp;
qkv_id = 2;
bias_idx = (head_id * 3 + qkv_id) * size_per_head + id_in_head;
bias = __ldg(bias_qkv + bias_idx);
input_idx = input_offset + bias_idx;
tmp = mm_qkv[input_idx];
tmp = __hadd2(tmp, bias);
v_buf_[target_id] = tmp;
}
#ifdef ENABLE_BF16
template<>
__global__ void add_head3Size_QKV_bias(const __nv_bfloat162* mm_qkv,
const __nv_bfloat162* bias_qkv,
__nv_bfloat162* q_buf_,
__nv_bfloat162* k_buf_,
__nv_bfloat162* v_buf_,
const int batch,
const int window_num,
const int window_len,
const int num_head,
const int size_per_head)
{
const int batch_id = blockIdx.z;
const int token_id = blockIdx.x;
const int window_id = blockIdx.y;
const int head_id = threadIdx.x / size_per_head;
const int id_in_head = threadIdx.x % size_per_head;
const int input_offset =
((batch_id * window_num + window_id) * window_len + token_id) * num_head * 3 * size_per_head;
const int target_id =
(((batch_id * window_num + window_id) * num_head + head_id) * window_len + token_id) * size_per_head
+ id_in_head;
int qkv_id = 0;
int bias_idx = (head_id * 3 + qkv_id) * size_per_head + id_in_head;
__nv_bfloat162 bias = ldg(bias_qkv + bias_idx);
int input_idx = input_offset + bias_idx;
__nv_bfloat162 tmp = mm_qkv[input_idx];
tmp = bf16hadd2(tmp, bias);
q_buf_[target_id] = tmp;
qkv_id = 1;
bias_idx = (head_id * 3 + qkv_id) * size_per_head + id_in_head;
bias = ldg(bias_qkv + bias_idx);
input_idx = input_offset + bias_idx;
tmp = mm_qkv[input_idx];
tmp = bf16hadd2(tmp, bias);
k_buf_[target_id] = tmp;
qkv_id = 2;
bias_idx = (head_id * 3 + qkv_id) * size_per_head + id_in_head;
bias = ldg(bias_qkv + bias_idx);
input_idx = input_offset + bias_idx;
tmp = mm_qkv[input_idx];
tmp = bf16hadd2(tmp, bias);
v_buf_[target_id] = tmp;
}
#endif
template<typename T>
void invokeAddHead3SizeQKVBias(const T* mm_qkv,
const T* bias_qkv,
T* q_buf_,
T* k_buf_,
T* v_buf_,
const int batch,
const int window_num,
const int window_len,
const int num_head,
const int size_per_head,
hipStream_t stream)
{
if (std::is_same<T, float>::value) {
dim3 grid(window_len, window_num, 3 * batch);
dim3 block(num_head * size_per_head);
if (block.x < 1024) {
hipLaunchKernelGGL(( add_head3Size_QKV_bias), dim3(grid), dim3(block), 0, stream,
mm_qkv, bias_qkv, q_buf_, k_buf_, v_buf_, batch, window_num, window_len, num_head, size_per_head);
}
else if ((block.x % 2 == 0) && (block.x / 2 < 1024)) {
block.x /= 2;
hipLaunchKernelGGL(( add_head3Size_QKV_bias), dim3(grid), dim3(block), 0, stream, (const float2*)mm_qkv,
(const float2*)bias_qkv,
(float2*)q_buf_,
(float2*)k_buf_,
(float2*)v_buf_,
batch,
window_num,
window_len,
num_head,
size_per_head / 2);
}
else {
printf("[ERROR][invokeAddHead3SizeQKVBias] unsupported block.x!\n");
exit(-1);
}
}
#ifdef ENABLE_BF16
else if (std::is_same<T, half>::value || std::is_same<T, __nv_bfloat16>::value) {
#else
else if (std::is_same<T, half>::value) {
#endif
dim3 grid(window_len, window_num, batch);
dim3 block(num_head * size_per_head / 2);
using T2 = typename TypeConverter<T>::Type; // half2 or bfloat16
if (block.x > 1024) {
printf("[ERROR][invokeAddHead3SizeQKVBias] block.x > 1024!\n");
exit(-1);
}
hipLaunchKernelGGL(( add_head3Size_QKV_bias), dim3(grid), dim3(block), 0, stream, (const T2*)mm_qkv,
(const T2*)bias_qkv,
(T2*)q_buf_,
(T2*)k_buf_,
(T2*)v_buf_,
batch,
window_num,
window_len,
num_head,
size_per_head / 2);
}
}
#define INSTANTIATEADDHEAD3SIZEQKVBIAS(T) \
template void invokeAddHead3SizeQKVBias<T>(const T* mm_qkv, \
const T* bias_qkv, \
T* q_buf_, \
T* k_buf_, \
T* v_buf_, \
const int batch, \
const int window_num, \
const int window_len, \
const int num_head, \
const int size_per_head, \
hipStream_t stream)
INSTANTIATEADDHEAD3SIZEQKVBIAS(float);
INSTANTIATEADDHEAD3SIZEQKVBIAS(half);
#ifdef ENABLE_BF16
INSTANTIATEADDHEAD3SIZEQKVBIAS(__nv_bfloat16);
#endif
#undef INSTANTIATEADDHEAD3SIZEQKVBIAS
/******************* invokeMaskedSoftMaxWithRelPosBias ***********************/
// grid = (window_len/word_per_thread, window_num*num_head, batch_size)
// block.x = max(32, (window_len + 31)/32*32)
// qk_buf is [batch, window_num, num_head, window_len, window_len]
// attn_mask is [window_num, window_len, window_len] + row-major
// relative_pos_bias is [num_head, window_len, window_len] + row-majot
template<typename T>
__global__ void softmax_withRelPosBias_element1_kernel(T* qk_buf,
const T* attn_mask,
const T* relative_pos_bias,
const int batch_size,
const int num_head,
const int window_num,
const int window_len,
const int window_len_x_window_len,
const float qk_scale)
{
bool qual = threadIdx.x < window_len;
for (int window_id = blockIdx.x; window_id < window_len; window_id += gridDim.x) {
float tmp = -1e20f;
__shared__ float s_mean, s_max;
int qk_offset;
if (qual) {
const int offset_in_window = window_id * window_len + threadIdx.x;
qk_offset = (blockIdx.z * gridDim.y + blockIdx.y) * window_len_x_window_len + offset_in_window;
const int relative_pos_bias_offset = (blockIdx.y % num_head) * window_len_x_window_len + offset_in_window;
float mask_val =
(attn_mask == nullptr) ?
0.0f :
static_cast<float>(
ldg(attn_mask + ((blockIdx.y / num_head) * window_len_x_window_len + offset_in_window)));
tmp = qk_scale * static_cast<float>(qk_buf[qk_offset]) + mask_val
+ static_cast<float>(ldg(relative_pos_bias + relative_pos_bias_offset));
}
float max_val = blockReduceMax<float>(tmp);
if (threadIdx.x == 0) {
s_max = max_val;
}
__syncthreads();
float qk_tmp = qual ? __expf(tmp - s_max) : 0.0f;
float sum_val = blockReduceSum<float>(qk_tmp);
if (threadIdx.x == 0) {
s_mean = sum_val + 1e-6f;
s_mean = __fdividef(1.0f, s_mean);
}
__syncthreads();
if (qual) {
qk_buf[qk_offset] = (T)(qk_tmp * s_mean);
}
}
}
// grid = (window_len/word_per_thread, window_num*num_head, batch_size)
// block.x = max(32, (window_len/2 + 31)/32*32)
// qk_buf is [batch, window_num, num_head, window_len, window_len]
// attn_mask is [window_num, window_len, window_len] + row-major
// relative_pos_bias is [num_head, window_len, window_len] + row-majot
template<typename T2, typename T>
__global__ void softmax_withRelPosBias_element2_kernel(T2* qk_buf,
const T2* attn_mask,
const T2* relative_pos_bias,
const int batch_size,
const int num_head,
const int window_num,
const int window_len,
const int window_len_x_window_len,
const float qk_scale)
{
const int window_len_2 = window_len / 2;
const int tidx = threadIdx.x;
bool qual = tidx < window_len_2;
const T2 zero = {T(0.0f), T(0.0f)};
const int bdim = blockDim.x;
for (int window_id = blockIdx.x; window_id < window_len; window_id += gridDim.x) {
float tmp = -1e20f;
__shared__ float s_mean, s_max;
int qk_offset;
float2 local_qk_val;
T2 qk_val;
if (qual) {
const int offset_in_window = window_id * window_len + 2 * tidx;
qk_offset = ((blockIdx.z * gridDim.y + blockIdx.y) * window_len_x_window_len + offset_in_window) / 2;
const int relative_pos_bias_offset =
((blockIdx.y % num_head) * window_len_x_window_len + offset_in_window) / 2;
T2 mask_val =
(attn_mask == nullptr) ?
zero :
ldg(attn_mask + ((blockIdx.y / num_head) * window_len_x_window_len + offset_in_window) / 2);
qk_val = qk_buf[qk_offset];
local_qk_val.x = static_cast<float>(qk_val.x);
local_qk_val.y = static_cast<float>(qk_val.y);
const T2 bias_val = ldg(relative_pos_bias + relative_pos_bias_offset);
local_qk_val.x =
qk_scale * local_qk_val.x + static_cast<float>(mask_val.x) + static_cast<float>(bias_val.x);
local_qk_val.y =
qk_scale * local_qk_val.y + static_cast<float>(mask_val.y) + static_cast<float>(bias_val.y);
tmp = local_qk_val.x > local_qk_val.y ? local_qk_val.x : local_qk_val.y;
}
float max_val = bdim <= 32 ? warpReduceMax<float>(tmp) : blockReduceMax<float>(tmp);
if (tidx == 0) {
s_max = max_val;
}
__syncthreads();
local_qk_val.x = qual ? __expf(local_qk_val.x - s_max) : 0.0f;
local_qk_val.y = qual ? __expf(local_qk_val.y - s_max) : 0.0f;
float sum_val = bdim <= 32 ? warpReduceSum<float>(local_qk_val.x + local_qk_val.y) :
blockReduceSum<float>(local_qk_val.x + local_qk_val.y);
if (tidx == 0) {
s_mean = sum_val + 1e-6f;
s_mean = __fdividef(1.0f, s_mean);
}
__syncthreads();
if (qual) {
local_qk_val.x = local_qk_val.x * s_mean;
local_qk_val.y = local_qk_val.y * s_mean;
qk_val.x = T(local_qk_val.x);
qk_val.y = T(local_qk_val.y);
qk_buf[qk_offset] = qk_val;
}
}
}
// grid = (window_len/word_per_thread, window_num*num_head, batch_size)
// block.x = max(32, (window_len/4 + 31)/32*32)
// qk_buf is [batch, window_num, num_head, window_len, window_len]
// attn_mask is [window_num, window_len, window_len] + row-major
// relative_pos_bias is [num_head, window_len, window_len] + row-majot
template<typename T4, typename T>
__global__ void softmax_withRelPosBias_element4_kernel(T4* qk_buf,
const T4* attn_mask,
const T4* relative_pos_bias,
const int batch_size,
const int num_head,
const int window_num,
const int window_len,
const int window_len_x_window_len,
const float qk_scale)
{
const int window_len_4 = window_len / 4;
const int tidx = threadIdx.x;
bool qual = tidx < window_len_4;
const T4 zero = {T(0.0f), T(0.0f), T(0.0f), T(0.0f)};
const int bdim = blockDim.x;
for (int window_id = blockIdx.x; window_id < window_len; window_id += gridDim.x) {
float tmp = -1e20f;
__shared__ float s_mean, s_max;
int qk_offset;
float4 local_qk_val;
T4 qk_val;
if (qual) {
const int offset_in_window = window_id * window_len + 4 * tidx;
qk_offset = ((blockIdx.z * gridDim.y + blockIdx.y) * window_len_x_window_len + offset_in_window) / 4;
const int relative_pos_bias_offset =
((blockIdx.y % num_head) * window_len_x_window_len + offset_in_window) / 4;
T4 mask_val = (attn_mask == nullptr) ?
zero :
attn_mask[((blockIdx.y / num_head) * window_len_x_window_len + offset_in_window) / 4];
qk_val = qk_buf[qk_offset];
local_qk_val.x = static_cast<float>(qk_val.x);
local_qk_val.y = static_cast<float>(qk_val.y);
local_qk_val.z = static_cast<float>(qk_val.z);
local_qk_val.w = static_cast<float>(qk_val.w);
const T4 bias_val = relative_pos_bias[relative_pos_bias_offset];
local_qk_val.x =
qk_scale * local_qk_val.x + static_cast<float>(mask_val.x) + static_cast<float>(bias_val.x);
local_qk_val.y =
qk_scale * local_qk_val.y + static_cast<float>(mask_val.y) + static_cast<float>(bias_val.y);
local_qk_val.z =
qk_scale * local_qk_val.z + static_cast<float>(mask_val.z) + static_cast<float>(bias_val.z);
local_qk_val.w =
qk_scale * local_qk_val.w + static_cast<float>(mask_val.w) + static_cast<float>(bias_val.w);
tmp = local_qk_val.x > local_qk_val.y ? local_qk_val.x : local_qk_val.y;
tmp = tmp > local_qk_val.z ? tmp : local_qk_val.z;
tmp = tmp > local_qk_val.w ? tmp : local_qk_val.w;
}
float max_val = bdim <= 32 ? warpReduceMax<float>(tmp) : blockReduceMax<float>(tmp);
if (tidx == 0) {
s_max = max_val;
}
__syncthreads();
local_qk_val.x = qual ? __expf(local_qk_val.x - s_max) : 0.0f;
local_qk_val.y = qual ? __expf(local_qk_val.y - s_max) : 0.0f;
local_qk_val.z = qual ? __expf(local_qk_val.z - s_max) : 0.0f;
local_qk_val.w = qual ? __expf(local_qk_val.w - s_max) : 0.0f;
float sum_val = bdim <= 32 ?
warpReduceSum<float>(local_qk_val.x + local_qk_val.y + local_qk_val.z + local_qk_val.w) :
blockReduceSum<float>(local_qk_val.x + local_qk_val.y + local_qk_val.z + local_qk_val.w);
if (tidx == 0) {
s_mean = sum_val + 1e-6f;
s_mean = __fdividef(1.0f, s_mean);
}
__syncthreads();
if (qual) {
local_qk_val.x = local_qk_val.x * s_mean;
local_qk_val.y = local_qk_val.y * s_mean;
local_qk_val.z = local_qk_val.z * s_mean;
local_qk_val.w = local_qk_val.w * s_mean;
qk_val.x = T(local_qk_val.x);
qk_val.y = T(local_qk_val.y);
qk_val.z = T(local_qk_val.z);
qk_val.w = T(local_qk_val.w);
qk_buf[qk_offset] = qk_val;
}
}
}
template<typename T>
void invokeMaskedSoftMaxWithRelPosBias(T* qk_buf,
const T* attn_mask,
const T* relative_pos_bias,
const int batch_size,
const int num_head,
const int window_num,
const int window_len,
float qk_scale,
hipStream_t stream)
{
const int word_per_thread = 1;
dim3 grid((window_len + word_per_thread - 1) / word_per_thread, window_num * num_head, batch_size);
if ((window_len % 4 == 0) && window_len / 4 >= 32) {
dim3 block((window_len / 4 + 31) / 32 * 32);
if (std::is_same<T, float>::value) {
hipLaunchKernelGGL(( softmax_withRelPosBias_element4_kernel<float4, float>)
, dim3(grid), dim3(block), 0, stream, (float4*)qk_buf,
(const float4*)attn_mask,
(const float4*)relative_pos_bias,
batch_size,
num_head,
window_num,
window_len,
window_len * window_len,
qk_scale);
}
else if (std::is_same<T, half>::value) {
hipLaunchKernelGGL(( softmax_withRelPosBias_element4_kernel<half4, half>)
, dim3(grid), dim3(block), 0, stream, (half4*)qk_buf,
(const half4*)attn_mask,
(const half4*)relative_pos_bias,
batch_size,
num_head,
window_num,
window_len,
window_len * window_len,
qk_scale);
}
#ifdef ENABLE_BF16
else {
dim3 block((window_len + 31) / 32 * 32);
hipLaunchKernelGGL(( softmax_withRelPosBias_element1_kernel), dim3(grid), dim3(block), 0, stream, qk_buf,
attn_mask,
relative_pos_bias,
batch_size,
num_head,
window_num,
window_len,
window_len * window_len,
qk_scale);
}
#endif
}
else if (window_len % 2 == 0) {
dim3 block((window_len / 2 + 31) / 32 * 32);
if (std::is_same<T, float>::value) {
hipLaunchKernelGGL(( softmax_withRelPosBias_element2_kernel<float2, float>)
, dim3(grid), dim3(block), 0, stream, (float2*)qk_buf,
(const float2*)attn_mask,
(const float2*)relative_pos_bias,
batch_size,
num_head,
window_num,
window_len,
window_len * window_len,
qk_scale);
}
else if (std::is_same<T, half>::value) {
hipLaunchKernelGGL(( softmax_withRelPosBias_element2_kernel<half2, half>)
, dim3(grid), dim3(block), 0, stream, (half2*)qk_buf,
(const half2*)attn_mask,
(const half2*)relative_pos_bias,
batch_size,
num_head,
window_num,
window_len,
window_len * window_len,
qk_scale);
}
#ifdef ENABLE_BF16
else {
dim3 block((window_len + 31) / 32 * 32);
hipLaunchKernelGGL(( softmax_withRelPosBias_element1_kernel), dim3(grid), dim3(block), 0, stream, qk_buf,
attn_mask,
relative_pos_bias,
batch_size,
num_head,
window_num,
window_len,
window_len * window_len,
qk_scale);
}
#endif
}
else {
dim3 block((window_len + 31) / 32 * 32);
hipLaunchKernelGGL(( softmax_withRelPosBias_element1_kernel), dim3(grid), dim3(block), 0, stream, qk_buf,
attn_mask,
relative_pos_bias,
batch_size,
num_head,
window_num,
window_len,
window_len * window_len,
qk_scale);
}
}
#define INSTANTIATEMASKEDSOFTMAXWITHRELPOSBIAS(T) \
template void invokeMaskedSoftMaxWithRelPosBias(T* qk_buf, \
const T* attn_mask, \
const T* relative_pos_bias, \
const int batch_size, \
const int num_head, \
const int window_num, \
const int window_len, \
const float qk_scale, \
hipStream_t stream)
INSTANTIATEMASKEDSOFTMAXWITHRELPOSBIAS(float);
INSTANTIATEMASKEDSOFTMAXWITHRELPOSBIAS(half);
#ifdef ENABLE_BF16
INSTANTIATEMASKEDSOFTMAXWITHRELPOSBIAS(__nv_bfloat16);
#endif
#undef INSTANTIATEMASKEDSOFTMAXWITHRELPOSBIAS
template<typename T>
__global__ void transpose_attentions(
T* attentions_out, const T* attentions_in, size_t batch_size, size_t num_layers, size_t num_heads, size_t seq_len)
{
// attentions_in shape [B, H, S, S]
// attentions_out shape [B, L, H, S, S].
// Note that we write the L dimension as if it was index 0.
// In reality, the pointer has already been shifted to point to the correct layer.
const auto batch_idx = blockIdx.x;
const auto head_idx = blockIdx.y;
const auto dst_offset = (batch_idx * num_layers * num_heads + head_idx) * seq_len * seq_len;
const auto src_offset = (batch_idx * num_heads + head_idx) * seq_len * seq_len;
for (auto x = threadIdx.x; x < seq_len * seq_len; x += blockDim.x) {
attentions_out[dst_offset + x] = attentions_in[src_offset + x];
}
}
template<typename T>
void invokeTransposeAttentions(Tensor& attentions_out, const Tensor& attentions_in, hipStream_t stream)
{
const size_t batch_size = attentions_in.shape[0];
const size_t num_heads = attentions_in.shape[1];
const size_t seq_len = attentions_in.shape[2];
const size_t num_layers = attentions_out.shape[1];
const dim3 gridSize(batch_size, num_heads);
const dim3 blockSize(512);
hipLaunchKernelGGL(( transpose_attentions), dim3(gridSize), dim3(blockSize), 0, stream,
attentions_out.getPtr<T>(), attentions_in.getPtr<const T>(), batch_size, num_layers, num_heads, seq_len);
}
#define INSTANTIATETRANSPOSEATTENTIONS(T) \
template void invokeTransposeAttentions<T>( \
Tensor & attentions_out, const Tensor& attentions_in, hipStream_t stream)
INSTANTIATETRANSPOSEATTENTIONS(float);
INSTANTIATETRANSPOSEATTENTIONS(half);
#ifdef ENABLE_BF16
INSTANTIATETRANSPOSEATTENTIONS(__nv_bfloat16);
#endif
#undef INSTANTIATETRANSPOSEATTENTIONS
} // namespace fastertransformer
|
5b7990c8ad582d0fb95e2d5c3540dcc85acbd023.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2021, NAVER Corp. Authored by CLOVA.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/fastertransformer/kernels/decoder_masked_multihead_attention_utils.h"
#include "src/fastertransformer/kernels/reduce_kernel_utils.cuh"
#include "src/fastertransformer/kernels/unfused_attention_kernels.h"
#include "src/fastertransformer/utils/cuda_type_utils.cuh"
#include "src/fastertransformer/utils/cuda_utils.h"
namespace fastertransformer {
__inline__ __device__ int target_index(int id1, int id2, int id3, int id4, int dim_1, int dim_2, int dim_3, int dim_4)
{
return id1 * (dim_2 * dim_3 * dim_4) + id3 * (dim_2 * dim_4) + id2 * dim_4 + id4;
}
template<typename T>
__global__ void addQKVBiasIA3Transpose(T* q_out,
T* k_out,
T* v_out,
const T* __restrict q_in,
const T* __restrict bias_q,
const T* __restrict k_in,
const T* __restrict bias_k,
const T* __restrict v_in,
const T* __restrict bias_v,
const int* ia3_tasks,
const T* ia3_key_weights,
const T* ia3_value_weights,
const int batch_size,
const int seq_len,
const int head_num,
const int size_per_head)
{
const int n = head_num * size_per_head;
const int batch_id = blockIdx.x;
const int word_id = blockIdx.y;
const int row_id = batch_id * seq_len + word_id;
const bool use_ia3 = ia3_tasks != nullptr;
const int ia3_task = use_ia3 ? ia3_tasks[batch_id] : 0;
const bool use_ia3_key = use_ia3 && (ia3_key_weights != nullptr);
const bool use_ia3_value = use_ia3 && (ia3_value_weights != nullptr);
for (int col_id = threadIdx.x; col_id < n; col_id += blockDim.x) {
const int head_id = col_id / size_per_head;
const int size_id = col_id % size_per_head;
const int target_id = batch_id * (head_num * seq_len * size_per_head) + head_id * seq_len * size_per_head
+ word_id * size_per_head + size_id;
const int src_id = row_id * n + col_id;
T q = ldg(&q_in[src_id]);
q_out[target_id] = add(q, ldg(&bias_q[col_id]));
T k = add(ldg(&k_in[src_id]), ldg(&bias_k[col_id]));
if (use_ia3_key) {
k = k * ia3_key_weights[ia3_task * n + col_id];
}
k_out[target_id] = k;
T v = add(ldg(&v_in[src_id]), ldg(&bias_v[col_id]));
if (use_ia3_value) {
v = v * ia3_value_weights[ia3_task * n + col_id];
}
v_out[target_id] = v;
}
}
template<typename T>
__global__ void QKVIA3Transpose(T* q_out,
T* k_out,
T* v_out,
const T* __restrict q_in,
const T* __restrict k_in,
const T* __restrict v_in,
const int* ia3_tasks,
const T* __restrict ia3_key_weights,
const T* __restrict ia3_value_weights,
const int batch_size,
const int seq_len,
const int head_num,
const int size_per_head)
{
const int n = head_num * size_per_head;
const int batch_id = blockIdx.x;
const int word_id = blockIdx.y;
const int row_id = batch_id * seq_len + word_id;
const bool use_ia3 = ia3_tasks != nullptr;
const int ia3_task = use_ia3 ? ia3_tasks[batch_id] : 0;
const bool use_ia3_key = use_ia3 && (ia3_key_weights != nullptr);
const bool use_ia3_value = use_ia3 && (ia3_value_weights != nullptr);
for (int col_id = threadIdx.x; col_id < n; col_id += blockDim.x) {
const int head_id = col_id / size_per_head;
const int size_id = col_id % size_per_head;
const int target_id = batch_id * (head_num * seq_len * size_per_head) + head_id * seq_len * size_per_head
+ word_id * size_per_head + size_id;
const int src_id = row_id * n + col_id;
q_out[target_id] = ldg(&q_in[src_id]);
T k = ldg(&k_in[src_id]);
if (use_ia3_key) {
k = k * ia3_key_weights[ia3_task * n + col_id];
}
k_out[target_id] = k;
T v = ldg(&v_in[src_id]);
if (use_ia3_value) {
v = v * ia3_value_weights[ia3_task * n + col_id];
}
v_out[target_id] = v;
}
}
template<typename T>
void invokeAddQKVBiasIA3Transpose(T* q_buf,
T* k_buf,
T* v_buf,
T* Q,
const T* bias_Q,
T* K,
const T* bias_K,
T* V,
const T* bias_V,
const int batch_size,
const int seq_len,
const int head_num,
const int size_per_head,
const int* ia3_tasks,
const T* ia3_key_weights,
const T* ia3_value_weights,
cudaStream_t stream)
{
const int k = head_num * size_per_head;
dim3 grid(batch_size, seq_len);
bool is_add_bias = bias_Q != nullptr;
if (sizeof(T) == 4 || k % 2 != 0) {
dim3 block(min(k, 512));
if (is_add_bias) {
addQKVBiasIA3Transpose<T><<<grid, block, 0, stream>>>(q_buf,
k_buf,
v_buf,
Q,
bias_Q,
K,
bias_K,
V,
bias_V,
ia3_tasks,
ia3_key_weights,
ia3_value_weights,
batch_size,
seq_len,
head_num,
size_per_head);
}
else {
QKVIA3Transpose<T><<<grid, block, 0, stream>>>(q_buf,
k_buf,
v_buf,
Q,
K,
V,
ia3_tasks,
ia3_key_weights,
ia3_value_weights,
batch_size,
seq_len,
head_num,
size_per_head);
}
sync_check_cuda_error();
}
else {
using T2 = typename TypeConverter<T>::Type; // fp16 to half2, bf16 to bf162
dim3 block(min(k / 2, 512));
if (is_add_bias) {
addQKVBiasIA3Transpose<T2><<<grid, block, 0, stream>>>((T2*)q_buf,
(T2*)k_buf,
(T2*)v_buf,
(const T2*)Q,
(const T2*)bias_Q,
(const T2*)K,
(const T2*)bias_K,
(const T2*)V,
(const T2*)bias_V,
ia3_tasks,
(const T2*)ia3_key_weights,
(const T2*)ia3_value_weights,
batch_size,
seq_len,
head_num,
size_per_head / 2);
}
else {
QKVIA3Transpose<T2><<<grid, block, 0, stream>>>((T2*)q_buf,
(T2*)k_buf,
(T2*)v_buf,
(const T2*)Q,
(const T2*)K,
(const T2*)V,
ia3_tasks,
(const T2*)ia3_key_weights,
(const T2*)ia3_value_weights,
batch_size,
seq_len,
head_num,
size_per_head / 2);
}
sync_check_cuda_error();
}
}
#define INSTANTIATEADDQKVBIASIA3TRANSPOSE(T) \
template void invokeAddQKVBiasIA3Transpose(T* q_buf, \
T* k_buf, \
T* v_buf, \
T* Q, \
const T* bias_Q, \
T* K, \
const T* bias_K, \
T* V, \
const T* bias_V, \
const int batch_size, \
const int seq_len, \
const int head_num, \
const int size_per_head, \
const int* ia3_tasks, \
const T* ia3_key_weights, \
const T* ia3_value_weights, \
cudaStream_t stream)
INSTANTIATEADDQKVBIASIA3TRANSPOSE(float);
INSTANTIATEADDQKVBIASIA3TRANSPOSE(half);
#ifdef ENABLE_BF16
INSTANTIATEADDQKVBIASIA3TRANSPOSE(__nv_bfloat16);
#endif
#undef INSTANTIATEADDQKVBIASTRANSPOSE
template<typename T, typename T_IN, int ITEMS_PER_THREAD>
__global__ void softmax_kernel(T* attn_score,
const T_IN* qk,
const T* attn_mask,
const T* linear_bias_slopes,
const int batch_size,
const int head_num,
const int q_length,
const int k_length,
const float qk_scale)
{
// attn_score, [batch_size, num_heads, q_length, k_length]
// qk, [batch_size, num_heads, q_length, k_length]
// attn_mask, [batch_size, q_length, k_length]
// linear_bias_slopes, [num_heads]
const int64_t bi = blockIdx.y; // Batch index.
const int64_t hi = blockIdx.z; // Head index.
__shared__ float s_mean, s_max;
const float linear_bias_slope = linear_bias_slopes != nullptr ? (float)linear_bias_slopes[hi] : 0.0f;
// Loop along with Q dimension.
for (int64_t qi = blockIdx.x; qi < q_length; qi += gridDim.x) {
float data[ITEMS_PER_THREAD];
int64_t qk_offset;
float local_max = -1e20f;
// Loop along with K dimension.
for (int64_t i = 0; blockDim.x * i + threadIdx.x < k_length; i++) {
int64_t ki = blockDim.x * i + threadIdx.x; // Index of K dimension.
qk_offset = ((bi * head_num + hi) * q_length + qi) * k_length + ki;
float qk_val = static_cast<float>(qk[qk_offset]);
float qk_bias = 0.0f;
if (linear_bias_slopes != nullptr) {
// We don't handle the upper diagonal (ki > qi) separately, whose values
// are negligible due to the negative infinity mask. And it matches with
// the HF's implementation.
qk_bias += static_cast<float>(linear_bias_slope * (ki - qi));
}
int64_t mask_offset = (bi * q_length + qi) * k_length + ki;
float mask_val = static_cast<float>(ldg(&attn_mask[mask_offset]));
qk_bias += (1.0f - mask_val) * -10000.0f;
data[i] = qk_scale * qk_val + qk_bias;
local_max = fmax(local_max, data[i]);
}
float max_val = blockDim.x <= 32 ? warpReduceMax(local_max) : blockReduceMax<float>(local_max);
if (threadIdx.x == 0) {
s_max = max_val;
}
__syncthreads();
float local_sum = 0;
for (int64_t i = 0; blockDim.x * i + threadIdx.x < k_length; i++) {
data[i] = __expf(data[i] - s_max);
local_sum += data[i];
}
float sum_val = blockDim.x <= 32 ? warpReduceSum(local_sum) : blockReduceSum<float>(local_sum);
if (threadIdx.x == 0) {
s_mean = sum_val + 1e-6f;
s_mean = __fdividef(1.0f, s_mean);
}
__syncthreads();
for (int64_t i = 0; blockDim.x * i + threadIdx.x < k_length; i++) {
qk_offset = ((bi * head_num + hi) * q_length + qi) * k_length + blockDim.x * i + threadIdx.x;
attn_score[qk_offset] = (T)(data[i] * s_mean);
}
}
}
template<typename T, int ITEMS_PER_THREAD>
__global__ void softmax_kernel_h2(T* attn_score,
const T* qk_buf,
const T* attn_mask,
const T* linear_bias_slopes,
const int batch_size,
const int head_num,
const int q_length,
const int k_length,
const T qk_scale)
{
// attn_score, [batch_size, num_heads, q_length, k_length]
// qk, [batch_size, num_heads, q_length, k_length]
// attn_mask, [batch_size, q_length, k_length]
// linear_bias_slopes, [num_heads]
using T2 = typename TypeConverter<T>::Type;
T2* attn_score_h2 = reinterpret_cast<T2*>(attn_score);
const T2* qk_buf_h2 = reinterpret_cast<const T2*>(qk_buf);
const T2* attn_mask_h2 = reinterpret_cast<const T2*>(attn_mask);
const int bi = blockIdx.y; // Batch index
const int hi = blockIdx.z; // Head index.
__shared__ float s_mean, s_max;
// Constant values that will be used repeately in the q/k loop.
const T2 ONE = cuda_cast<T2>(1.0f);
const T2 ZERO = cuda_cast<T2>(0.0f);
const T2 NEG_INFTY = cuda_cast<T2>(-10000.0f);
// The normalization factor of QK.
const T2 qk_scale_h2 = cuda_cast<T2>(qk_scale);
// The slope of a linear position bias of the current attention head.
const T2 linear_bias_slope = linear_bias_slopes != nullptr ? cuda_cast<T2>(linear_bias_slopes[hi]) : ZERO;
// Loop over q dimension.
for (int qi = blockIdx.x; qi < q_length; qi += gridDim.x) {
T2 data[ITEMS_PER_THREAD];
int qk_offset;
float local_max = -1e20f;
// Loop over k dimension.
for (int i = 0; blockDim.x * i + threadIdx.x < (k_length / 2) && i < ITEMS_PER_THREAD; i++) {
// The half of the index of k dimension. We will use the elements at {2 * ki, 2 * ki + 1}.
int ki = blockDim.x * i + threadIdx.x;
qk_offset = ((bi * head_num + hi) * q_length + qi) * (k_length / 2) + ki;
int mask_offset = (bi * q_length + qi) * (k_length / 2) + ki;
// The value of QK^T matrix at (qi, ki).
T2 qk = qk_buf_h2[qk_offset];
// The bias value to the position (qi, ki) including both mask and positional bias.
T2 qk_bias = ZERO;
if (linear_bias_slopes != nullptr) {
// The position bias depends on the distance between qi/ki and is zero if qi >= 2*ki
// or qi >= 2*ki+1. For T2 vectorization, we should handle every two elements along
// with k-dim simultaneously. To do this, we check qi / 2 > ki at ones instead of
// qi >= 2*ki or 2*ki+1. It works because an diagonal element for an odd qi will be
// zero due to slope * (qi - 2*ki+1) = 0. Thus, we don't handle the upper diagonal
// separately, whose values are negligible due to the negative infinity mask.
T2 dist(2.0f * ki - qi, 2.0f * ki + 1 - qi);
qk_bias = hadd2<T2>(qk_bias, hmul2<T2>(linear_bias_slope, dist));
}
T2 mask_val = ldg(&attn_mask_h2[mask_offset]);
qk_bias = hadd2<T2>(qk_bias, hmul2<T2>(hsub2<T2>(ONE, mask_val), NEG_INFTY));
data[i] = hadd2<T2>(hmul2<T2>(qk, qk_scale_h2), qk_bias);
local_max = fmax(local_max, fmax((float)data[i].x, (float)data[i].y));
}
float max_val = blockDim.x <= 32 ? warpReduceMax(local_max) : blockReduceMax<float>(local_max);
if (threadIdx.x == 0) {
s_max = max_val;
}
__syncthreads();
float local_sum = 0.0f;
for (int i = 0; blockDim.x * i + threadIdx.x < (k_length / 2) && i < ITEMS_PER_THREAD; i++) {
data[i] = hexp2<T2>(hsub2<T2>(data[i], cuda_cast<T2>(s_max)));
local_sum += (float)(data[i].x + data[i].y);
}
float sum_val = blockDim.x <= 32 ? warpReduceSum(local_sum) : blockReduceSum<float>(local_sum);
if (threadIdx.x == 0) {
s_mean = sum_val + 1e-6f;
s_mean = __fdividef(1.0f, s_mean);
}
__syncthreads();
for (int i = 0; blockDim.x * i + threadIdx.x < (k_length / 2) && i < ITEMS_PER_THREAD; i++) {
qk_offset = ((bi * head_num + hi) * q_length + qi) * (k_length / 2) + blockDim.x * i + threadIdx.x;
attn_score_h2[qk_offset] = hmul2<T2>(data[i], cuda_cast<T2>(s_mean));
}
}
}
template<typename T, int K_ITEMS_PER_THREAD, int Q_ITEMS_PER_THREAD>
__global__ void softmax_kernel_h2_v2(T* attn_score,
const T* qk_buf,
const T* attn_mask,
const T* linear_bias_slopes,
const int batch_size,
const int head_num,
const int q_length,
const int k_length,
const T scalar)
{
// attn_score, [batch_size, num_heads, q_length, k_length]
// qk, [batch_size, num_heads, q_length, k_length]
// attn_mask, [batch_size, q_length, k_length]
// linear_bias_slopes, [num_heads]
using T2 = typename TypeConverter<T>::Type;
// QK^T matrix of shape (batch_size, head_num, q_length, k_length / 2)
T2* attn_score_h2 = reinterpret_cast<T2*>(attn_score);
const T2* qk_buf_h2 = reinterpret_cast<const T2*>(qk_buf);
const T2* attn_mask_h2 = reinterpret_cast<const T2*>(attn_mask);
const int bi = blockIdx.y; // Batch index
const int hi = blockIdx.z; // Head index.
// Constant values that will be used repeately in the q/k loop.
const T2 ONE = cuda_cast<T2>(1.0f);
const T2 ZERO = cuda_cast<T2>(0.0f);
const T2 NEG_INFTY = cuda_cast<T2>(-10000.0f);
// The normalization factor of QK.
const T2 qk_scale = cuda_cast<T2>(scalar);
// The slope of a linear position bias of the current attention head.
const T2 linear_bias_slope = linear_bias_slopes != nullptr ? cuda_cast<T2>(linear_bias_slopes[hi]) : ZERO;
__shared__ float s_sum[Q_ITEMS_PER_THREAD], s_max[Q_ITEMS_PER_THREAD];
// Loop over q dimension.
for (int qi = blockIdx.x; qi < q_length; qi += gridDim.x * Q_ITEMS_PER_THREAD) {
T2 data[Q_ITEMS_PER_THREAD][K_ITEMS_PER_THREAD];
int qk_offset[Q_ITEMS_PER_THREAD];
float local_max[Q_ITEMS_PER_THREAD];
#pragma unroll
for (int j = 0; j < Q_ITEMS_PER_THREAD; j++) {
local_max[j] = -1e20f;
}
// Loop over k dimension.
const int Q_ITEMS = min((q_length - qi + gridDim.x - 1) / gridDim.x, Q_ITEMS_PER_THREAD);
for (int i = 0; blockDim.x * i + threadIdx.x < k_length / 2 && i < K_ITEMS_PER_THREAD; ++i) {
// The half of the index of k dimension. We will use the elements at {2 * ki, 2 * ki + 1}.
int ki = blockDim.x * i + threadIdx.x;
int mask_offset[Q_ITEMS_PER_THREAD];
#pragma unroll
for (int j = 0; j < Q_ITEMS; j++) {
qk_offset[j] = ((bi * head_num + hi) * q_length + qi + j * gridDim.x) * (k_length / 2) + ki;
mask_offset[j] = (bi * q_length + qi + j * gridDim.x) * (k_length / 2) + ki;
}
T2 mask_val[Q_ITEMS_PER_THREAD];
#pragma unroll
for (int j = 0; j < Q_ITEMS; j++) {
mask_val[j] = ldg(&attn_mask_h2[mask_offset[j]]);
}
T2 qk[Q_ITEMS_PER_THREAD];
#pragma unroll
for (int j = 0; j < Q_ITEMS; j++) {
qk[j] = qk_buf_h2[qk_offset[j]];
}
T2 pos_bias[Q_ITEMS_PER_THREAD];
if (linear_bias_slopes != nullptr) {
#pragma unroll
for (int j = 0; j < Q_ITEMS; j++) {
// The position bias depends on the distance between qi/ki and is zero if qi >= 2*ki
// or qi >= 2*ki+1. For T2 vectorization, we should handle every two elements along
// with k-dim simultaneously. To do this, we check qi / 2 > ki at ones instead of
// qi >= 2*ki or 2*ki+1. It works because an diagonal element for an odd qi will be
// zero due to slope * (qi - 2*ki+1) = 0. Thus, we don't handle the upper diagonal
// separately, whose values are negligible due to the negative infinity mask.
int qidx = qi + j * gridDim.x;
T2 dist(2.0f * ki - qidx, 2.0f * ki + 1 - qidx);
pos_bias[j] = hmul2<T2>(linear_bias_slope, dist);
}
}
#pragma unroll
for (int j = 0; j < Q_ITEMS; j++) {
mask_val[j] = hmul2<T2>(hsub2<T2>(ONE, mask_val[j]), NEG_INFTY);
}
#pragma unroll
for (int j = 0; j < Q_ITEMS; j++) {
T2 val = hadd2<T2>(hmul2<T2>(qk_scale, qk[j]), mask_val[j]);
if (linear_bias_slopes != nullptr) {
val = hadd2<T2>(val, pos_bias[j]);
}
data[j][i] = val;
local_max[j] = fmax(local_max[j], fmax((float)data[j][i].x, (float)data[j][i].y));
}
}
if (blockDim.x <= 32) {
warpReduceMaxV2<float, Q_ITEMS_PER_THREAD>(local_max);
}
else {
blockReduceMaxV2<float, Q_ITEMS_PER_THREAD>(local_max);
}
if (threadIdx.x == 0) {
#pragma unroll
for (int j = 0; j < Q_ITEMS_PER_THREAD; j++) {
s_max[j] = local_max[j];
}
}
__syncthreads();
float local_sum[Q_ITEMS_PER_THREAD];
#pragma unroll
for (int j = 0; j < Q_ITEMS_PER_THREAD; j++) {
local_sum[j] = {0.f};
}
for (int i = 0; blockDim.x * i + threadIdx.x < k_length / 2 && i < K_ITEMS_PER_THREAD; ++i) {
#pragma unroll
for (int j = 0; j < Q_ITEMS; ++j) {
data[j][i] = hexp2<T2>(hsub2<T2>(data[j][i], cuda_cast<T2>(s_max[j])));
}
#pragma unroll
for (int j = 0; j < Q_ITEMS; j++) {
local_sum[j] += (float)(data[j][i].x + data[j][i].y);
}
}
if (blockDim.x <= 32) {
warpReduceSumV2<float, Q_ITEMS_PER_THREAD>(local_sum);
}
else {
blockReduceSumV2<float, Q_ITEMS_PER_THREAD>(local_sum);
}
if (threadIdx.x == 0) {
#pragma unroll
for (int j = 0; j < Q_ITEMS_PER_THREAD; j++) {
s_sum[j] = __fdividef(1.0f, local_sum[j] + 1e-6f);
}
}
__syncthreads();
for (int i = 0; blockDim.x * i + threadIdx.x < k_length / 2 && i < K_ITEMS_PER_THREAD; ++i) {
#pragma unroll
for (int j = 0; j < Q_ITEMS; j++) {
qk_offset[j] = ((bi * head_num + hi) * q_length + qi + j * gridDim.x) * (k_length / 2) + blockDim.x * i
+ threadIdx.x;
}
#pragma unroll
for (int j = 0; j < Q_ITEMS; j++) {
attn_score_h2[qk_offset[j]] = hmul2<T2>(data[j][i], cuda_cast<T2>(s_sum[j]));
}
}
}
}
#define LAUNCH_MAKSED_SOFTMAX_(T_, ITEMS_PER_THREAD) \
block.x /= ITEMS_PER_THREAD; \
block.x = (block.x + 31) / 32 * 32; \
assert(block.x <= 1024); \
if (is_half2) { \
if (grid.x % 4 == 0) { \
grid.x /= 4; \
softmax_kernel_h2_v2<T_, ITEMS_PER_THREAD, 4> \
<<<grid, block, 0, stream>>>((T_*)param.attention_score, \
(const T_*)param.qk, \
(const T_*)param.attention_mask, \
(const T_*)param.linear_bias_slopes, \
param.batch_size, \
param.num_heads, \
param.q_length, \
param.k_length, \
(const T_)param.qk_scale); \
} \
else { \
softmax_kernel_h2<T_, ITEMS_PER_THREAD><<<grid, block, 0, stream>>>((T_*)param.attention_score, \
(const T_*)param.qk, \
(const T_*)param.attention_mask, \
(const T_*)param.linear_bias_slopes, \
param.batch_size, \
param.num_heads, \
param.q_length, \
param.k_length, \
(const T_)param.qk_scale); \
} \
} \
else { \
softmax_kernel<T, T_IN, ITEMS_PER_THREAD><<<grid, block, 0, stream>>>(param.attention_score, \
param.qk, \
param.attention_mask, \
param.linear_bias_slopes, \
param.batch_size, \
param.num_heads, \
param.q_length, \
param.k_length, \
param.qk_scale); \
}
#define LAUNCH_MAKSED_SOFTMAX(ITEMS_PER_THREAD) LAUNCH_MAKSED_SOFTMAX_(half, ITEMS_PER_THREAD)
template<typename T, typename T_IN>
void invokeMaskedSoftmax(MaskedSoftmaxParam<T, T_IN>& param, cudaStream_t stream)
{
// attention_score, (batch_size, head_num, q_length, k_length), softmax output.
// qk, (batch_size, head_num, q_length, k_length), QK^T.
// attention_mask, (batch_size, q_length, k_length), attention mask.
// linear_bias_slopes, (head_num,) the slopes of the linear position bias.
dim3 grid(param.q_length, param.batch_size, param.num_heads);
if (param.batch_size * param.num_heads > 360) {
grid.x = ceil(float(param.q_length) / 32.0f);
}
bool is_half2 = sizeof(T) == 2 && sizeof(T_IN) == 2 && param.k_length % 2 == 0;
dim3 block((param.k_length / (is_half2 ? 2 : 1) + 31) / 32 * 32);
if (block.x > 2048 && block.x <= 4096) {
LAUNCH_MAKSED_SOFTMAX(4)
}
else if (block.x > 1024) {
LAUNCH_MAKSED_SOFTMAX(2)
}
else if (block.x > 0) {
LAUNCH_MAKSED_SOFTMAX(1)
}
else {
FT_CHECK(param.k_length <= 4096);
}
}
template void invokeMaskedSoftmax(MaskedSoftmaxParam<float, float>& param, cudaStream_t stream);
template void invokeMaskedSoftmax(MaskedSoftmaxParam<half, float>& param, cudaStream_t stream);
template void invokeMaskedSoftmax(MaskedSoftmaxParam<half, half>& param, cudaStream_t stream);
#ifdef ENABLE_BF16
template<>
void invokeMaskedSoftmax(MaskedSoftmaxParam<__nv_bfloat16, float>& param, cudaStream_t stream)
{
// attention_score, (batch_size, head_num, q_length, k_length), softmax output.
// qk, (batch_size, head_num, q_length, k_length), QK^T.
// attention_mask, (batch_size, q_length, k_length), attention mask.
// linear_bias_slopes, (head_num,) the slopes of the linear position bias.
using T = __nv_bfloat16;
using T_IN = float;
dim3 grid(param.q_length, param.batch_size, param.num_heads);
if (param.batch_size * param.num_heads > 360) {
grid.x = ceil(float(param.q_length) / 32.0f);
}
bool is_half2 = sizeof(T) == 2 && sizeof(T_IN) == 2 && param.k_length % 2 == 0;
dim3 block((param.k_length / (is_half2 ? 2 : 1) + 31) / 32 * 32);
if (block.x > 2048 && block.x <= 4096) {
LAUNCH_MAKSED_SOFTMAX_(__nv_bfloat16, 4);
}
else if (block.x > 1024) {
LAUNCH_MAKSED_SOFTMAX_(__nv_bfloat16, 2);
}
else if (block.x > 0) {
LAUNCH_MAKSED_SOFTMAX_(__nv_bfloat16, 1);
}
else {
FT_CHECK(param.k_length <= 4096);
}
}
template<>
void invokeMaskedSoftmax(MaskedSoftmaxParam<__nv_bfloat16, __nv_bfloat16>& param, cudaStream_t stream)
{
// attention_score, (batch_size, head_num, q_length, k_length), softmax output.
// qk, (batch_size, head_num, q_length, k_length), QK^T.
// attention_mask, (batch_size, q_length, k_length), attention mask.
// linear_bias_slopes, (head_num,) the slopes of the linear position bias.
using T = __nv_bfloat16;
using T_IN = __nv_bfloat16;
dim3 grid(param.q_length, param.batch_size, param.num_heads);
if (param.batch_size * param.num_heads > 360) {
grid.x = ceil(float(param.q_length) / 32.0f);
}
bool is_half2 = sizeof(T) == 2 && sizeof(T_IN) == 2 && param.k_length % 2 == 0;
dim3 block((param.k_length / (is_half2 ? 2 : 1) + 31) / 32 * 32);
if (block.x > 2048 && block.x <= 4096) {
LAUNCH_MAKSED_SOFTMAX_(__nv_bfloat16, 4);
}
else if (block.x > 1024) {
LAUNCH_MAKSED_SOFTMAX_(__nv_bfloat16, 2);
}
else if (block.x > 0) {
LAUNCH_MAKSED_SOFTMAX_(__nv_bfloat16, 1);
}
else {
FT_CHECK(param.k_length <= 4096);
}
}
#endif
#undef LAUNCH_MAKSED_SOFTMAX
#undef LAUNCH_MAKSED_SOFTMAX_
template<typename T>
__global__ void transpose(const T* src,
T* dst,
const int batch_size,
const int seq_len,
const int head_num,
const int size_per_head,
const float* scale,
int int8_mode)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int batch_id = tid / (head_num * seq_len * size_per_head);
int head_id = (tid % (head_num * seq_len * size_per_head)) / (seq_len * size_per_head);
int seq_id = (tid % (seq_len * size_per_head)) / size_per_head;
int id = tid % size_per_head;
int target_id = target_index(batch_id, head_id, seq_id, id, batch_size, head_num, seq_len, size_per_head);
if (int8_mode == 2) {
using Int8_Packed_T = typename packed_as<int8_t, num_elems<T>::value>::type;
using Float_Packed_T = typename packed_as<float, num_elems<T>::value>::type;
const Float_Packed_T scale_val = cuda_cast<Float_Packed_T>(*scale);
reinterpret_cast<Int8_Packed_T*>(dst)[target_id] =
cuda_cast<Int8_Packed_T>(cuda_cast<Float_Packed_T>(src[tid]) * scale_val);
}
else {
dst[target_id] = src[tid];
}
}
template<>
__global__ void transpose(const float* src,
float* dst,
const int batch_size,
const int seq_len,
const int head_num,
const int size_per_head,
const float* scale,
int int8_mode)
{
int batch_id = blockIdx.x / (head_num * seq_len);
int seq_id = blockIdx.x % seq_len;
int head_id = (blockIdx.x % (head_num * seq_len)) / seq_len;
const int target_id = batch_id * (head_num * seq_len * size_per_head) + seq_id * head_num * size_per_head
+ head_id * size_per_head + threadIdx.x;
const int src_id = blockIdx.x * size_per_head + threadIdx.x;
if (int8_mode == 2) {
const float scale_val = *scale;
reinterpret_cast<int8_t*>(dst)[target_id] = cuda_cast<int8_t>(src[src_id] * scale_val);
}
else {
dst[target_id] = src[src_id];
}
}
template<typename T>
void invokeTransposeQKV(T* dst,
T* src,
const int batch_size,
const int seq_len,
const int head_num,
const int size_per_head,
const float* scale,
const int int8_mode,
cudaStream_t stream)
{
dim3 grid, block;
if (sizeof(T) == 2) {
int seq_per_block = 1;
grid.x = batch_size * head_num * seq_len / seq_per_block;
while (seq_per_block < 4 && grid.x % 2 == 0) {
grid.x /= 2;
seq_per_block *= 2;
}
FT_CHECK(grid.x * seq_per_block == (size_t)batch_size * head_num * seq_len);
if (seq_per_block * size_per_head % 2 == 0) {
block.x = seq_per_block * size_per_head / 2;
if (std::is_same<T, half>::value) {
transpose<half2><<<grid, block, 0, stream>>>(
(half2*)src, (half2*)dst, batch_size, seq_len, head_num, size_per_head / 2, scale, int8_mode);
}
#ifdef ENABLE_BF16
else {
transpose<__nv_bfloat162><<<grid, block, 0, stream>>>((__nv_bfloat162*)src,
(__nv_bfloat162*)dst,
batch_size,
seq_len,
head_num,
size_per_head / 2,
scale,
int8_mode);
}
#endif
}
else {
block.x = seq_per_block * size_per_head;
transpose<T>
<<<grid, block, 0, stream>>>(src, dst, batch_size, seq_len, head_num, size_per_head, scale, int8_mode);
}
}
else {
const int seq_per_block = 1;
grid.x = batch_size * head_num * seq_len / seq_per_block;
block.x = seq_per_block * size_per_head;
transpose<T>
<<<grid, block, 0, stream>>>(src, dst, batch_size, seq_len, head_num, size_per_head, scale, int8_mode);
}
}
#define INSTANTIATETRANSPOSEQKV(T) \
template void invokeTransposeQKV(T* src, \
T* dst, \
const int batch_size, \
const int seq_len, \
const int head_num, \
const int size_per_head, \
const float* scale, \
const int int8_mode, \
cudaStream_t stream)
INSTANTIATETRANSPOSEQKV(float);
INSTANTIATETRANSPOSEQKV(half);
#ifdef ENABLE_BF16
INSTANTIATETRANSPOSEQKV(__nv_bfloat16);
#endif
#undef INSTANTIATETRANSPOSEQKV
template<typename T>
__global__ void add_QKV_bias_rebuild_padding_ia3(const T* Q,
const T* bias_Q,
const T* K,
const T* bias_K,
const T* V,
const T* bias_V,
T* q_buf_,
T* k_buf_,
T* v_buf_,
const int* ia3_tasks,
const T* ia3_key_weights,
const T* ia3_value_weights,
const int batch_size,
const int seq_len,
const int head_num,
const int size_per_head,
const int* mask_offset)
{
const int bid = blockIdx.x;
const int tgt_batch_id = (bid + mask_offset[bid]) / seq_len;
const int tgt_seq_id = (bid + mask_offset[bid]) % seq_len;
const int n = head_num * size_per_head;
const bool use_ia3 = ia3_tasks != nullptr;
const int ia3_task = use_ia3 ? ia3_tasks[tgt_batch_id] : 0;
const bool use_ia3_key = use_ia3 && (ia3_key_weights != nullptr);
const bool use_ia3_value = use_ia3 && (ia3_value_weights != nullptr);
for (int idx = threadIdx.x; idx < n; idx += blockDim.x) {
const int tgt_head_id = idx / size_per_head;
const int tgt_hidden_id = idx % size_per_head;
const int src_id = bid * n + idx;
const int tgt_id = tgt_batch_id * head_num * seq_len * size_per_head + tgt_head_id * seq_len * size_per_head
+ tgt_seq_id * size_per_head + tgt_hidden_id;
q_buf_[tgt_id] = add(ldg(&Q[src_id]), ldg(&bias_Q[idx]));
T k = ldg(&K[src_id]);
if (use_ia3_key) {
k = k * ia3_key_weights[ia3_task * n + idx];
}
k_buf_[tgt_id] = add(k, ldg(&bias_K[idx]));
T v = ldg(&V[src_id]);
if (use_ia3_value) {
v = v * ia3_value_weights[ia3_task * n + idx];
}
v_buf_[tgt_id] = add(v, ldg(&bias_V[idx]));
}
}
template<typename T>
__global__ void rebuild_padding_ia3(const T* Q,
const T* K,
const T* V,
T* q_buf_,
T* k_buf_,
T* v_buf_,
const int* ia3_tasks,
const T* ia3_key_weights,
const T* ia3_value_weights,
const int batch_size,
const int seq_len,
const int head_num,
const int size_per_head,
const int* mask_offset)
{
const int bid = blockIdx.x;
const int tgt_batch_id = (bid + mask_offset[bid]) / seq_len;
const int tgt_seq_id = (bid + mask_offset[bid]) % seq_len;
const int n = head_num * size_per_head;
const bool use_ia3 = ia3_tasks != nullptr;
const int ia3_task = use_ia3 ? ia3_tasks[tgt_batch_id] : 0;
const bool use_ia3_key = use_ia3 && (ia3_key_weights != nullptr);
const bool use_ia3_value = use_ia3 && (ia3_value_weights != nullptr);
for (int idx = threadIdx.x; idx < n; idx += blockDim.x) {
const int tgt_head_id = idx / size_per_head;
const int tgt_hidden_id = idx % size_per_head;
const int src_id = bid * n + idx;
const int tgt_id = tgt_batch_id * head_num * seq_len * size_per_head + tgt_head_id * seq_len * size_per_head
+ tgt_seq_id * size_per_head + tgt_hidden_id;
q_buf_[tgt_id] = ldg(&Q[src_id]);
T k = ldg(&K[src_id]);
if (use_ia3_key) {
k = k * ia3_key_weights[ia3_task * n + idx];
}
k_buf_[tgt_id] = k;
T v = ldg(&V[src_id]);
if (use_ia3_value) {
v = v * ia3_value_weights[ia3_task * n + idx];
}
v_buf_[tgt_id] = v;
}
}
template<typename T>
void invokeAddQKVBiasIA3RebuildPadding(T* Q,
const T* bias_Q,
T* K,
const T* bias_K,
T* V,
const T* bias_V,
T* q_buf,
T* k_buf,
T* v_buf,
const int batch_size,
const int seq_len,
const int head_num,
const int size_per_head,
const int valid_word_num,
const int* mask_offset,
const int* ia3_tasks,
const T* ia3_key_weights,
const T* ia3_value_weights,
cudaStream_t stream)
{
#ifdef ENABLE_BF16
bool is_half2 = (std::is_same<T, half>::value || std::is_same<T, __nv_bfloat16>::value) && (size_per_head % 2 == 0);
#else
bool is_half2 = (std::is_same<T, half>::value) && (size_per_head % 2 == 0);
#endif
using T2 = typename TypeConverter<T>::Type; // fp16 to half2, bf16 to bf162
int block_size = head_num * size_per_head;
if (is_half2) {
while (block_size > 512) {
if (block_size % 2 == 0) {
block_size /= 2;
}
else {
is_half2 = false;
block_size = std::min(block_size, 512);
break;
}
}
}
else {
block_size = std::min(block_size, 512);
}
if (bias_Q == nullptr && bias_K == nullptr && bias_V == nullptr) {
if (is_half2) {
rebuild_padding_ia3<<<valid_word_num, block_size, 0, stream>>>((T2*)Q,
(T2*)K,
(T2*)V,
(T2*)q_buf,
(T2*)k_buf,
(T2*)v_buf,
ia3_tasks,
(const T2*)ia3_key_weights,
(const T2*)ia3_value_weights,
batch_size,
seq_len,
head_num,
size_per_head / 2,
mask_offset);
}
else {
rebuild_padding_ia3<<<valid_word_num, block_size, 0, stream>>>(Q,
K,
V,
q_buf,
k_buf,
v_buf,
ia3_tasks,
ia3_key_weights,
ia3_value_weights,
batch_size,
seq_len,
head_num,
size_per_head,
mask_offset);
}
}
else if (bias_Q != nullptr && bias_K != nullptr && bias_V != nullptr) {
if (is_half2) {
add_QKV_bias_rebuild_padding_ia3<<<valid_word_num, block_size, 0, stream>>>((T2*)Q,
(const T2*)bias_Q,
(T2*)K,
(const T2*)bias_K,
(T2*)V,
(const T2*)bias_V,
(T2*)q_buf,
(T2*)k_buf,
(T2*)v_buf,
ia3_tasks,
(const T2*)ia3_key_weights,
(const T2*)ia3_value_weights,
batch_size,
seq_len,
head_num,
size_per_head / 2,
mask_offset);
}
else {
add_QKV_bias_rebuild_padding_ia3<<<valid_word_num, block_size, 0, stream>>>(Q,
bias_Q,
K,
bias_K,
V,
bias_V,
q_buf,
k_buf,
v_buf,
ia3_tasks,
ia3_key_weights,
ia3_value_weights,
batch_size,
seq_len,
head_num,
size_per_head,
mask_offset);
}
}
else {
FT_CHECK(false);
}
}
#define INSTANTIATEADDQKVBIASIA3REBUILDPADDING(T) \
template void invokeAddQKVBiasIA3RebuildPadding(T* Q, \
const T* bias_Q, \
T* K, \
const T* bias_K, \
T* V, \
const T* bias_V, \
T* q_buf, \
T* k_buf, \
T* v_buf, \
const int batch_size, \
const int seq_len, \
const int head_num, \
const int size_per_head, \
const int valid_word_num, \
const int* mask_offset, \
const int* ia3_tasks, \
const T* ia3_key_weights, \
const T* ia3_value_weights, \
cudaStream_t stream)
INSTANTIATEADDQKVBIASIA3REBUILDPADDING(float);
INSTANTIATEADDQKVBIASIA3REBUILDPADDING(half);
#ifdef ENABLE_BF16
INSTANTIATEADDQKVBIASIA3REBUILDPADDING(__nv_bfloat16);
#endif
#undef INSTANTIATEADDQKVBIASREBUILDPADDING
template<typename T>
__global__ void transpose_remove_padding(const T* src,
T* dst,
const int batch_size,
const int seq_len,
const int head_num,
const int size_per_head,
const int* mask_offset,
const float* scale,
const int int8_mode)
{
// TODO: optimize this kernel?
// do remove_sequence_length_padding
const int bid = blockIdx.x; // batch * seq_len or valid_word_num
const int src_batch_id = (bid + mask_offset[bid]) / seq_len;
const int src_seq_id = (bid + mask_offset[bid]) % seq_len;
const int dst_seq_id = bid;
const int src_offset_base = src_batch_id * seq_len * head_num * size_per_head + src_seq_id * size_per_head;
const int dst_offset_base = dst_seq_id * head_num * size_per_head;
using Int8_Packed_T = typename packed_as<int8_t, num_elems<T>::value>::type;
using Float_Packed_T = typename packed_as<float, num_elems<T>::value>::type;
const Float_Packed_T scale_val =
int8_mode == 2 ? cuda_cast<Float_Packed_T>(*scale) : cuda_cast<Float_Packed_T>(0.0f);
for (int idx = threadIdx.x; idx < head_num * size_per_head; idx += blockDim.x) {
const int head_id = idx / size_per_head;
const int hidden_id = idx % size_per_head;
const T src_elem = ldg(&src[src_offset_base + head_id * seq_len * size_per_head + hidden_id]);
if (int8_mode == 2) {
reinterpret_cast<Int8_Packed_T*>(dst)[dst_offset_base + idx] =
cuda_cast<Int8_Packed_T>(cuda_cast<Float_Packed_T>(src_elem) * scale_val);
}
else {
dst[dst_offset_base + idx] = src_elem;
}
}
}
// clang-format off
template<typename T>
void invokeTransposeAttentionOutRemovePadding(T* src,
T* dst,
const int valid_word_num,
const int batch_size,
const int seq_len,
const int head_num,
const int size_per_head,
const int* mask_offset,
const float* scale,
const int int8_mode,
cudaStream_t stream)
{
#ifdef ENABLE_BF16
bool is_half2 = (std::is_same<T, half>::value || std::is_same<T, __nv_bfloat16>::value) && (size_per_head % 2 == 0);
#else
bool is_half2 = (std::is_same<T, half>::value) && (size_per_head % 2 == 0);
#endif
using T2 = typename TypeConverter<T>::Type; // fp16 to half2, bf16 to bf162
int block_size = head_num * size_per_head;
if (is_half2) {
while (block_size > 512) {
if (block_size % 2 == 0) {
block_size /= 2;
}
else {
is_half2 = false;
block_size = std::min(block_size, 1024);
break;
}
}
}
else {
block_size = std::min(block_size, 1024);
}
if (is_half2) {
transpose_remove_padding<T2><<<valid_word_num, block_size, 0, stream>>>(
(T2*)src, (T2*)dst, batch_size, seq_len, head_num, size_per_head / 2, mask_offset, scale, int8_mode);
}
else {
transpose_remove_padding<<<valid_word_num, block_size, 0, stream>>>(
src, dst, batch_size, seq_len, head_num, size_per_head, mask_offset, scale, int8_mode);
}
}
// clang-format on
#define INSTANTIATETRANSPOSEATTENTIONOUTREMOVEPADDING(T) \
template void invokeTransposeAttentionOutRemovePadding(T* src, \
T* dst, \
const int valid_word_num, \
const int batch_size, \
const int seq_len, \
const int head_num, \
const int size_per_head, \
const int* mask_offset, \
const float* scale, \
const int int8_mode, \
cudaStream_t stream)
INSTANTIATETRANSPOSEATTENTIONOUTREMOVEPADDING(float);
INSTANTIATETRANSPOSEATTENTIONOUTREMOVEPADDING(half);
#ifdef ENABLE_BF16
INSTANTIATETRANSPOSEATTENTIONOUTREMOVEPADDING(__nv_bfloat16);
#endif
#undef INSTANTIATETRANSPOSEATTENTIONOUTREMOVEPADDING
template<typename T>
__global__ void add_fusedQKV_bias_transpose_kernel(T* q_buf,
T* k_buf,
T* v_buf,
T* QKV,
const T* __restrict qkv_bias,
const int* padding_offset,
const int batch_size,
const int seq_len,
const int token_num,
const int head_num,
const int size_per_head,
const float* scale,
const int int8_mode)
{
// QKV: [token_num, 3, n]
// qkv_bias: [3, n]
// q_buf, k_buf, v_buf: [batch, head_num, seq_len, size_per_head]
T* qkv_ptr[3] = {q_buf, k_buf, v_buf};
const int n = head_num * size_per_head;
for (int index = blockDim.x * blockIdx.x + threadIdx.x; index < token_num * 3 * n;
index += gridDim.x * blockDim.x) {
const int bias_id = index % (3 * n);
const int token_idx = index / (3 * n);
const int token_padded_idx = token_idx + (padding_offset == nullptr ? 0 : padding_offset[token_idx]);
const int target_batch_id = token_padded_idx / seq_len;
const int seq_id = token_padded_idx % seq_len;
const int qkv_id = (index % (3 * n)) / n;
const int head_id = (index % n) / size_per_head;
const int size_id = index % size_per_head;
T val;
if (int8_mode == 2) {
val = cuda_cast<T>(cuda_cast<float>(reinterpret_cast<const int8_t*>(QKV)[index]) * scale[qkv_id]);
}
else {
val = ldg(&QKV[index]);
}
val = val + ldg(&qkv_bias[bias_id]);
if (int8_mode == 2) {
// TODO(mseznec): add support for int8 BMM with FusedAtt
}
else {
QKV[index] = val;
}
qkv_ptr[qkv_id][target_batch_id * head_num * seq_len * size_per_head + head_id * seq_len * size_per_head
+ seq_id * size_per_head + size_id] = val;
}
}
template<typename T>
struct Vec_t {
static constexpr int size = 0;
};
template<>
struct Vec_t<float> {
using Type = float2;
static constexpr int size = 2;
};
template<>
struct Vec_t<half> {
using Type = uint32_t;
static constexpr int size = 2;
};
#ifdef ENABLE_BF16
template<>
struct Vec_t<__nv_bfloat16> {
using Type = __nv_bfloat162;
static constexpr int size = 2;
};
#endif
template<typename T, bool PREFIX_PROMPT>
__global__ void add_fusedQKV_bias_transpose_kernel(T* q_buf,
T* k_buf,
T* v_buf,
PrefixPromptBatchWeightsParam<T> param,
T* QKV,
const T* __restrict qkv_bias,
const int* padding_offset,
const int batch_size,
const int seq_len,
const int head_num,
const int size_per_head,
const int rotary_embedding_dim,
const bool neox_rotary_style)
{
// This kernel add bias to QKV, which has shape [batch_size, seq_len, 3, head_num, size_per_head], and
// QKV split to 3 split buffer q, k, v and transpose them to [batch_size, head_num, seq_len, size_per_head].
// For q and k, also apply the rotary embedding.
// When we pass prefix prompt, this kernel also concatenate the prefix prompt and key/value along
// seq_len dimension like [prompt, key/value].
// So, the final shape of q is same ([batch_size, head_num, seq_len, size_per_head]), but
// the shapes of key and values become [batch_size, head_num, max_prefix_prompt_length + seq_len, size_per_head].
// NOTE: QKV src shape (batch_size, seq_len, 3, head_num, size_per_head)
// QKV dst shape (3, batch_size, head_num, seq_len, size_per_head)
extern __shared__ __align__(sizeof(float2)) char smem_[]; // align on largest vector type
constexpr int vec_size = Vec_t<T>::size;
using Vec_t = typename Vec_t<T>::Type;
const int token_idx = blockIdx.x - batch_size * param.max_prefix_prompt_length;
const int token_padding_offset = (padding_offset == nullptr || token_idx < 0) ? 0 : padding_offset[token_idx];
const int tgt_token_idx = token_idx + token_padding_offset;
const int batch_idx = tgt_token_idx / seq_len;
const int seq_idx = tgt_token_idx % seq_len;
const int head_idx = blockIdx.y;
const int tidx = threadIdx.x;
const int total_seq_len = param.max_prefix_prompt_length + seq_len;
const bool is_masked = tidx * vec_size >= size_per_head;
// NOTE: blockIdx.x < batch_size * param.max_prefix_prompt_length really handles prefix prompts
if (PREFIX_PROMPT && token_idx < 0) {
const int prompt_batch_idx = blockIdx.x / param.max_prefix_prompt_length;
const int prompt_seq_idx = blockIdx.x % param.max_prefix_prompt_length;
const int prompt_length = param.d_prefix_prompt_lengths[prompt_batch_idx];
if (prompt_seq_idx < prompt_length) {
const int dest_kv_idx = prompt_batch_idx * size_per_head * total_seq_len * head_num
+ head_idx * size_per_head * total_seq_len + prompt_seq_idx * size_per_head
+ tidx * vec_size;
const int prefix_kv_idx =
size_per_head * prompt_length * head_idx + size_per_head * prompt_seq_idx + tidx * vec_size;
const T* prefix_prompt_k = param.d_prefix_prompt_batch[prompt_batch_idx]
+ param.prefix_prompt_layer_offset_per_seq * prompt_length;
const T* prefix_prompt_v = prefix_prompt_k + prompt_length * head_num * size_per_head;
if (!is_masked) {
*reinterpret_cast<Vec_t*>(&k_buf[dest_kv_idx]) =
*reinterpret_cast<const Vec_t*>(&prefix_prompt_k[prefix_kv_idx]);
*reinterpret_cast<Vec_t*>(&v_buf[dest_kv_idx]) =
*reinterpret_cast<const Vec_t*>(&prefix_prompt_v[prefix_kv_idx]);
}
}
return;
}
const int prefix_prompt_length = PREFIX_PROMPT ? param.d_prefix_prompt_lengths[batch_idx] : 0;
const int hidden_idx = head_idx * size_per_head + tidx * vec_size;
const int n = head_num * size_per_head;
// the [0..seq_len) indices really handle KV [max_pp_len..seq_len+max_pp_len)
// and Q [0..seq_len)
// Note: if !PREFIX_PROMPT, max_pp_len = 0, so it's no-op
const int dst_kv_seq_idx = seq_idx + prefix_prompt_length;
// NOTE: q has seq len excluding prefix prompt
// src QKV: [batch, time, 3, head, hidden]
const int src_q_idx = token_idx * 3 * n + hidden_idx;
const int src_k_idx = token_idx * 3 * n + hidden_idx + n;
const int src_v_idx = token_idx * 3 * n + hidden_idx + 2 * n;
Vec_t q, k, v;
Vec_t q_bias, k_bias, v_bias;
if (!is_masked) {
q = *reinterpret_cast<const Vec_t*>(&QKV[src_q_idx]);
k = *reinterpret_cast<const Vec_t*>(&QKV[src_k_idx]);
v = *reinterpret_cast<const Vec_t*>(&QKV[src_v_idx]);
q_bias = *reinterpret_cast<const Vec_t*>(&qkv_bias[hidden_idx]);
k_bias = *reinterpret_cast<const Vec_t*>(&qkv_bias[hidden_idx + n]);
v_bias = *reinterpret_cast<const Vec_t*>(&qkv_bias[hidden_idx + 2 * n]);
}
q = mmha::add(q, q_bias);
k = mmha::add(k, k_bias);
v = mmha::add(v, v_bias);
if (!neox_rotary_style) {
mmha::apply_rotary_embedding(q, k, tidx, rotary_embedding_dim, dst_kv_seq_idx);
}
else {
const bool do_rotary = !is_masked && vec_size * tidx < rotary_embedding_dim;
T* q_smem = reinterpret_cast<T*>(smem_);
T* k_smem = q_smem + rotary_embedding_dim;
const int half_rotary_dim = rotary_embedding_dim / 2;
const int half_idx = (tidx * vec_size) / half_rotary_dim;
const int intra_half_idx = (tidx * vec_size) % half_rotary_dim;
const int smem_pitch = half_rotary_dim; // TODO: adjust for bank conflicts?
if (do_rotary) {
*reinterpret_cast<Vec_t*>(q_smem + half_idx * smem_pitch + intra_half_idx) = q;
*reinterpret_cast<Vec_t*>(k_smem + half_idx * smem_pitch + intra_half_idx) = k;
}
__syncthreads();
const int transpose_idx = half_idx * (half_rotary_dim / 2) + intra_half_idx / 2;
constexpr int tidx_factor = vec_size / 2;
if (do_rotary) {
mmha::vec_from_smem_transpose(q, q_smem, transpose_idx, smem_pitch);
mmha::vec_from_smem_transpose(k, k_smem, transpose_idx, smem_pitch);
mmha::apply_rotary_embedding(q, k, transpose_idx / tidx_factor, rotary_embedding_dim, dst_kv_seq_idx);
mmha::write_smem_transpose(q, q_smem, transpose_idx, smem_pitch);
mmha::write_smem_transpose(k, k_smem, transpose_idx, smem_pitch);
}
__syncthreads();
if (do_rotary) {
q = *reinterpret_cast<Vec_t*>(q_smem + half_idx * smem_pitch + intra_half_idx);
k = *reinterpret_cast<Vec_t*>(k_smem + half_idx * smem_pitch + intra_half_idx);
}
}
if (!is_masked) {
*reinterpret_cast<Vec_t*>(&QKV[src_q_idx]) = q;
*reinterpret_cast<Vec_t*>(&QKV[src_k_idx]) = k;
*reinterpret_cast<Vec_t*>(&QKV[src_v_idx]) = v;
}
const int dest_q_idx = batch_idx * size_per_head * seq_len * head_num + head_idx * size_per_head * seq_len
+ seq_idx * size_per_head + tidx * vec_size;
const int dest_kv_idx = batch_idx * size_per_head * total_seq_len * head_num
+ head_idx * size_per_head * total_seq_len + dst_kv_seq_idx * size_per_head
+ tidx * vec_size;
if (!is_masked) {
*reinterpret_cast<Vec_t*>(&q_buf[dest_q_idx]) = q;
*reinterpret_cast<Vec_t*>(&k_buf[dest_kv_idx]) = k;
*reinterpret_cast<Vec_t*>(&v_buf[dest_kv_idx]) = v;
}
}
#define FUSED_QKV_BIAS_TRANSPOSE_LAUNCH(T, PREFIX_PROMPT) \
add_fusedQKV_bias_transpose_kernel<T, PREFIX_PROMPT><<<grid, block, smem_size, stream>>>(q_buf, \
k_buf, \
v_buf, \
param, \
QKV, \
qkv_bias, \
padding_offset, \
batch_size, \
seq_len, \
head_num, \
size_per_head, \
rotary_embedding_dim, \
neox_rotary_style);
template<typename T>
void invokeAddFusedQKVBiasTranspose(T* q_buf,
T* k_buf,
T* v_buf,
PrefixPromptBatchWeightsParam<T> param,
T* QKV,
const T* qkv_bias,
const int* padding_offset,
const int batch_size,
const int seq_len,
const int token_num,
const int head_num,
const int size_per_head,
const int rotary_embedding_dim,
const int neox_rotary_style,
const float* scale,
const int int8_mode,
cudaStream_t stream)
{
// [bs, seq_len, 3, head, Dh]
if (rotary_embedding_dim == 0 && param.max_prefix_prompt_length == 0) {
const int m = token_num;
const int n = head_num * size_per_head;
dim3 block(384);
dim3 grid((int)(ceil(1.0 * m * n / 384)));
add_fusedQKV_bias_transpose_kernel<<<grid, block, 0, stream>>>(q_buf,
k_buf,
v_buf,
QKV,
qkv_bias,
padding_offset,
batch_size,
seq_len,
token_num,
head_num,
size_per_head,
scale,
int8_mode);
}
else {
FT_CHECK_WITH_INFO(int8_mode != 2, "w8a8 not yet implemented with prefix prompt"); // TODO(mseznec)
// To implement rotary embeddings, each thread processes two QKV elems:
dim3 block((size_per_head / Vec_t<T>::size + 31) / 32 * 32);
dim3 grid(token_num + batch_size * param.max_prefix_prompt_length, head_num);
size_t smem_size = neox_rotary_style ? 2 * rotary_embedding_dim * sizeof(T) : 0;
// NOTE: add offset for rotary embedding
// add_fusedQKV_bias_transpose_kernel<<<grid, block, 0, stream>>>(
// q_buf, k_buf, v_buf, param, QKV, qkv_bias, batch_size, seq_len, head_num, size_per_head,
// rotary_embedding_dim);
if (param.max_prefix_prompt_length == 0) {
FUSED_QKV_BIAS_TRANSPOSE_LAUNCH(T, false);
}
else {
FUSED_QKV_BIAS_TRANSPOSE_LAUNCH(T, true);
}
}
}
#define INSTANTIATEADDFUSEDQKVBIASTRANSPOSE(T) \
template void invokeAddFusedQKVBiasTranspose(T* q_buf, \
T* k_buf, \
T* v_buf, \
PrefixPromptBatchWeightsParam<T> param, \
T* QKV, \
const T* qkv_bias, \
const int* padding_offset, \
const int batch_size, \
const int seq_len, \
const int token_num, \
const int head_num, \
const int size_per_head, \
const int rotary_embedding_dim, \
const int neox_rotary_style, \
const float* scale, \
const int int8_mode, \
cudaStream_t stream)
INSTANTIATEADDFUSEDQKVBIASTRANSPOSE(float);
INSTANTIATEADDFUSEDQKVBIASTRANSPOSE(half);
#ifdef ENABLE_BF16
INSTANTIATEADDFUSEDQKVBIASTRANSPOSE(__nv_bfloat16);
#endif
#undef INSTANTIATEADDFUSEDQKVBIASTRANSPOSE
template<typename T>
__global__ void transpose_4d(T* dst,
T* src,
const int dim0,
const int dim1,
const int dim2,
const int dim3,
const int dim0_leading_dim,
const int ite)
{
// transpose from [dim0, dim1, dim2, dim3] to [dim2, X, dim1, dim3]
// where the dimension of X is dim0_leading_dim, and offset is ite * dim0
for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < dim0 * dim1 * dim2 * dim3; i += blockDim.x * gridDim.x) {
int index = i;
const int d3 = index % dim3;
index = (index - d3) / dim3;
const int d2 = index % dim2;
index = (index - d2) / dim2;
const int d1 = index % dim1;
index = (index - d1) / dim1;
const int d0 = index % dim0;
index = (index - d0) / dim0;
dst[d2 * dim0_leading_dim * dim1 * dim3 + (d0 + dim0 * ite) * dim1 * dim3 + d1 * dim3 + d3] = src[i];
}
}
template<>
__global__ void transpose_4d(half* dst,
half* src,
const int dim0,
const int dim1,
const int dim2,
const int dim3,
const int dim0_leading_dim,
const int ite)
{
half2* dst_ptr = (half2*)dst;
half2* src_ptr = (half2*)src;
const int half_dim3 = dim3 / 2;
// transpose from [dim0, dim1, dim2, half_dim3] to [dim2, dim0, dim1, half_dim3]
// where the dimension of X is dim0_leading_dim, and offset is ite * dim0
for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < dim0 * dim1 * dim2 * half_dim3;
i += blockDim.x * gridDim.x) {
int index = i;
const int d3 = index % half_dim3;
index = (index - d3) / half_dim3;
const int d2 = index % dim2;
index = (index - d2) / dim2;
const int d1 = index % dim1;
index = (index - d1) / dim1;
const int d0 = index % dim0;
index = (index - d0) / dim0;
dst_ptr[d2 * dim0_leading_dim * dim1 * half_dim3 + (d0 + dim0 * ite) * dim1 * half_dim3 + d1 * half_dim3 + d3] =
src_ptr[i];
}
}
template<typename T>
void invokeTranspose4d(T* dst,
T* src,
const int local_batch_size,
const int seq_len,
const int size_per_head,
const int local_hidden_units,
const int local_head_num,
const int batch_size,
const int ite,
cudaStream_t stream)
{
transpose_4d<<<local_batch_size * seq_len * local_hidden_units / 512, 512 / (4 / (sizeof(T))), 0, stream>>>(
dst, src, local_batch_size, local_head_num, seq_len, size_per_head, batch_size, ite);
}
#define INSTANTIATETRANSPOSE4D(T) \
template void invokeTranspose4d(T* dst, \
T* src, \
const int local_batch_size, \
const int seq_len, \
const int size_per_head, \
const int local_hidden_units, \
const int local_head_num, \
const int batch_size, \
const int ite, \
cudaStream_t stream)
INSTANTIATETRANSPOSE4D(float);
INSTANTIATETRANSPOSE4D(half);
#undef INSTANTIATETRANSPOSE4D
template<typename T>
__global__ void transpose_4d_batch_major_k_cache(
T* k_dst, const T* k_src, const int head_num, const int size_per_head, const int seq_len, const int max_seq_len)
{
const int batch_id = blockIdx.y;
const int head_id = blockIdx.z;
constexpr int X_ELEMS = (sizeof(T) == 4) ? 4 : 8;
auto key_src = reinterpret_cast<const uint4*>(k_src + batch_id * head_num * size_per_head * seq_len
+ head_id * size_per_head * seq_len);
auto key_dst = reinterpret_cast<uint4*>(k_dst + batch_id * head_num * size_per_head * max_seq_len
+ head_id * size_per_head * max_seq_len);
const int out_idx = blockIdx.x * blockDim.x + threadIdx.x;
int size_per_head_div_x = size_per_head / X_ELEMS;
if (out_idx >= size_per_head_div_x * max_seq_len) {
return;
}
int idx = out_idx;
const int k_seq_len_id = idx % max_seq_len;
idx = (idx - k_seq_len_id) / max_seq_len;
const int k_head_size_id = idx % size_per_head_div_x;
if (k_seq_len_id < seq_len) {
key_dst[out_idx] = key_src[k_seq_len_id * size_per_head_div_x + k_head_size_id];
}
}
template<typename T>
__global__ void transpose_4d_batch_major_v_cache(
T* v_dst, const T* v_src, const int head_num, const int size_per_head, const int seq_len, const int max_seq_len)
{
const int batch_id = blockIdx.y;
const int head_id = blockIdx.z;
// 16 byte loads will handle "x" dimension
auto val_src = reinterpret_cast<const uint4*>(v_src + batch_id * head_num * size_per_head * seq_len
+ head_id * size_per_head * seq_len);
auto val_dst = reinterpret_cast<uint4*>(v_dst + batch_id * head_num * size_per_head * max_seq_len
+ head_id * size_per_head * max_seq_len);
// idx is over output dimension L * size_per_head / x for values
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
constexpr int X_ELEMS = (sizeof(T) == 4) ? 4 : 8;
const int size_per_head_div_x = size_per_head / X_ELEMS;
if (idx >= size_per_head_div_x * seq_len) {
return;
}
val_dst[idx] = val_src[idx];
}
template<typename T>
void invokeTranspose4dBatchMajor(T* k_dst,
T* v_dst,
const T* k_src,
const T* v_src,
const int local_batch_size,
const int seq_len,
const int max_seq_len,
const int size_per_head,
const int local_head_num,
cudaStream_t stream)
{
constexpr int block_sz = 128;
constexpr int x = (sizeof(T) == 4) ? 4 : 8;
int size = max_seq_len * size_per_head / x;
dim3 grid((size + block_sz - 1) / block_sz, local_batch_size, local_head_num);
dim3 grid_v((seq_len * size_per_head / x + block_sz - 1) / block_sz, local_batch_size, local_head_num);
transpose_4d_batch_major_k_cache<<<grid, block_sz, 0, stream>>>(
k_dst, k_src, local_head_num, size_per_head, seq_len, max_seq_len);
transpose_4d_batch_major_v_cache<<<grid_v, block_sz, 0, stream>>>(
v_dst, v_src, local_head_num, size_per_head, seq_len, max_seq_len);
}
#define INSTANTIATETRANSPOSE4DBATCHMAJOR(T) \
template void invokeTranspose4dBatchMajor(T* k_dst, \
T* v_dst, \
const T* k_src, \
const T* v_src, \
const int local_batch_size, \
const int seq_len, \
const int max_seq_len, \
const int size_per_head, \
const int local_head_num, \
cudaStream_t stream)
INSTANTIATETRANSPOSE4DBATCHMAJOR(float);
INSTANTIATETRANSPOSE4DBATCHMAJOR(half);
#ifdef ENABLE_BF16
INSTANTIATETRANSPOSE4DBATCHMAJOR(__nv_bfloat16);
#endif
#undef INSTANTIATETRANSPOSE4DBATCHMAJOR
template<typename T>
__global__ void addRelativeAttentionBias(
T* qk_buf, const T* relative_attention_bias, const int batch_size, const int head_num, const int seq_len)
{
for (int i = threadIdx.x; i < batch_size * seq_len; i += blockDim.x) {
int batch_id = i / seq_len;
int seq_id = i % seq_len;
const int bias_index = blockIdx.x * seq_len + seq_id;
const int qk_index = batch_id * gridDim.x * seq_len + bias_index;
qk_buf[qk_index] = add(qk_buf[qk_index], relative_attention_bias[bias_index]);
}
}
template<typename T>
void invokeAddRelativeAttentionBias(T* qk_buf,
const T* relative_attention_bias,
const int batch_size,
const int head_num,
const int seq_len,
cudaStream_t stream)
{
// qk_buf: [batch_size, head_num, seq_len, seq_len]
// relative_attention_bias: [1, head_num, seq_len, seq_len]
dim3 grid(head_num * seq_len);
dim3 block(512);
using T2 = typename TypeConverter<T>::Type;
#ifdef ENABLE_BF16
const bool is_half2 = (std::is_same<T, half>::value || std::is_same<T, __nv_bfloat16>::value) && (seq_len % 2 == 0);
#else
const bool is_half2 = (std::is_same<T, half>::value) && (seq_len % 2 == 0);
#endif
if (is_half2) {
addRelativeAttentionBias<T2><<<grid, block, 0, stream>>>(
(T2*)qk_buf, (const T2*)relative_attention_bias, batch_size, head_num, seq_len / 2);
}
else {
addRelativeAttentionBias<<<grid, block, 0, stream>>>(
qk_buf, relative_attention_bias, batch_size, head_num, seq_len);
}
}
#define INSTANTIATEADDRELATIVEATTENTIONBIAS(T) \
template void invokeAddRelativeAttentionBias(T* qk_buf, \
const T* relative_attention_bias, \
const int batch_size, \
const int head_num, \
const int seq_len, \
cudaStream_t stream)
INSTANTIATEADDRELATIVEATTENTIONBIAS(float);
INSTANTIATEADDRELATIVEATTENTIONBIAS(half);
#ifdef ENABLE_BF16
INSTANTIATEADDRELATIVEATTENTIONBIAS(__nv_bfloat16);
#endif
#undef INSTANTIATEADDRELATIVEATTENTIONBIAS
/******************* invokeAddHead3SizeQKVBias ***********************/
// m = batch*window_num*window_len
// mm_qkv is [m, head*3*size_per_head] row-major
// bias_qkv is [head*3*size_per_head]
// q_buf_, k_buf_, v_buf_ is [batch*window_num, num_head, window_len, size_per_head] row-major
// grid(window_len, window_num, 3*batch);
// block(num_head * size_per_head)
template<typename T>
__global__ void add_head3Size_QKV_bias(const T* mm_qkv,
const T* bias_qkv,
T* q_buf_,
T* k_buf_,
T* v_buf_,
const int batch,
const int window_num,
const int window_len,
const int num_head,
const int size_per_head)
{
T* buf_ptr;
int qkv_id = blockIdx.z / batch;
if (qkv_id == 0) {
buf_ptr = q_buf_;
}
else if (qkv_id == 1) {
buf_ptr = k_buf_;
}
else {
buf_ptr = v_buf_;
}
const int batch_id = blockIdx.z % batch;
const int token_id = blockIdx.x;
const int window_id = blockIdx.y;
const int head_id = threadIdx.x / size_per_head;
const int id_in_head = threadIdx.x % size_per_head;
const int bias_idx = (head_id * 3 + qkv_id) * size_per_head + id_in_head;
const T bias = ldg(bias_qkv + bias_idx);
const int input_idx =
((batch_id * window_num + window_id) * window_len + token_id) * num_head * 3 * size_per_head + bias_idx;
T tmp = mm_qkv[input_idx] + bias;
int target_id = (((batch_id * window_num + window_id) * num_head + head_id) * window_len + token_id) * size_per_head
+ id_in_head;
;
buf_ptr[target_id] = tmp;
}
// for float2, size_per_head /= 2
// m = batch*window_num*window_len
// mm_qkv is [m, head*3*size_per_head] row-major
// bias_qkv is [head*3*size_per_head]
// q_buf_, k_buf_, v_buf_ is [batch*window_num, num_head, window_len, size_per_head] row-major
// grid(window_len, window_num, 3*batch);
// block(num_head * size_per_head)
template<>
__global__ void add_head3Size_QKV_bias(const float2* mm_qkv,
const float2* bias_qkv,
float2* q_buf_,
float2* k_buf_,
float2* v_buf_,
const int batch,
const int window_num,
const int window_len,
const int num_head,
const int size_per_head)
{
float2* buf_ptr;
int qkv_id = blockIdx.z / batch;
if (qkv_id == 0) {
buf_ptr = q_buf_;
}
else if (qkv_id == 1) {
buf_ptr = k_buf_;
}
else {
buf_ptr = v_buf_;
}
const int batch_id = blockIdx.z % batch;
const int token_id = blockIdx.x;
const int window_id = blockIdx.y;
const int head_id = threadIdx.x / size_per_head;
const int id_in_head = threadIdx.x % size_per_head;
const int bias_idx = (head_id * 3 + qkv_id) * size_per_head + id_in_head;
const float2 bias = ldg(bias_qkv + bias_idx);
const int input_idx =
((batch_id * window_num + window_id) * window_len + token_id) * num_head * 3 * size_per_head + bias_idx;
float2 tmp = mm_qkv[input_idx];
tmp.x += bias.x;
tmp.y += bias.y;
int target_id = (((batch_id * window_num + window_id) * num_head + head_id) * window_len + token_id) * size_per_head
+ id_in_head;
;
buf_ptr[target_id] = tmp;
}
// for half2, size_per_head /= 2
// m = batch*window_num*window_len
// mm_qkv is [m, head*3*size_per_head] row-major
// bias_qkv is [head*3*size_per_head]
// q_buf_, k_buf_, v_buf_ is [batch*window_num, num_head, window_len, size_per_head] row-major
// grid(window_len, window_num, batch);
// block(num_head * size_per_head)
template<>
__global__ void add_head3Size_QKV_bias(const half2* mm_qkv,
const half2* bias_qkv,
half2* q_buf_,
half2* k_buf_,
half2* v_buf_,
const int batch,
const int window_num,
const int window_len,
const int num_head,
const int size_per_head)
{
const int batch_id = blockIdx.z;
const int token_id = blockIdx.x;
const int window_id = blockIdx.y;
const int head_id = threadIdx.x / size_per_head;
const int id_in_head = threadIdx.x % size_per_head;
const int input_offset =
((batch_id * window_num + window_id) * window_len + token_id) * num_head * 3 * size_per_head;
const int target_id =
(((batch_id * window_num + window_id) * num_head + head_id) * window_len + token_id) * size_per_head
+ id_in_head;
int qkv_id = 0;
int bias_idx = (head_id * 3 + qkv_id) * size_per_head + id_in_head;
half2 bias = __ldg(bias_qkv + bias_idx);
int input_idx = input_offset + bias_idx;
half2 tmp = mm_qkv[input_idx];
tmp = __hadd2(tmp, bias);
q_buf_[target_id] = tmp;
qkv_id = 1;
bias_idx = (head_id * 3 + qkv_id) * size_per_head + id_in_head;
bias = __ldg(bias_qkv + bias_idx);
input_idx = input_offset + bias_idx;
tmp = mm_qkv[input_idx];
tmp = __hadd2(tmp, bias);
k_buf_[target_id] = tmp;
qkv_id = 2;
bias_idx = (head_id * 3 + qkv_id) * size_per_head + id_in_head;
bias = __ldg(bias_qkv + bias_idx);
input_idx = input_offset + bias_idx;
tmp = mm_qkv[input_idx];
tmp = __hadd2(tmp, bias);
v_buf_[target_id] = tmp;
}
#ifdef ENABLE_BF16
template<>
__global__ void add_head3Size_QKV_bias(const __nv_bfloat162* mm_qkv,
const __nv_bfloat162* bias_qkv,
__nv_bfloat162* q_buf_,
__nv_bfloat162* k_buf_,
__nv_bfloat162* v_buf_,
const int batch,
const int window_num,
const int window_len,
const int num_head,
const int size_per_head)
{
const int batch_id = blockIdx.z;
const int token_id = blockIdx.x;
const int window_id = blockIdx.y;
const int head_id = threadIdx.x / size_per_head;
const int id_in_head = threadIdx.x % size_per_head;
const int input_offset =
((batch_id * window_num + window_id) * window_len + token_id) * num_head * 3 * size_per_head;
const int target_id =
(((batch_id * window_num + window_id) * num_head + head_id) * window_len + token_id) * size_per_head
+ id_in_head;
int qkv_id = 0;
int bias_idx = (head_id * 3 + qkv_id) * size_per_head + id_in_head;
__nv_bfloat162 bias = ldg(bias_qkv + bias_idx);
int input_idx = input_offset + bias_idx;
__nv_bfloat162 tmp = mm_qkv[input_idx];
tmp = bf16hadd2(tmp, bias);
q_buf_[target_id] = tmp;
qkv_id = 1;
bias_idx = (head_id * 3 + qkv_id) * size_per_head + id_in_head;
bias = ldg(bias_qkv + bias_idx);
input_idx = input_offset + bias_idx;
tmp = mm_qkv[input_idx];
tmp = bf16hadd2(tmp, bias);
k_buf_[target_id] = tmp;
qkv_id = 2;
bias_idx = (head_id * 3 + qkv_id) * size_per_head + id_in_head;
bias = ldg(bias_qkv + bias_idx);
input_idx = input_offset + bias_idx;
tmp = mm_qkv[input_idx];
tmp = bf16hadd2(tmp, bias);
v_buf_[target_id] = tmp;
}
#endif
template<typename T>
void invokeAddHead3SizeQKVBias(const T* mm_qkv,
const T* bias_qkv,
T* q_buf_,
T* k_buf_,
T* v_buf_,
const int batch,
const int window_num,
const int window_len,
const int num_head,
const int size_per_head,
cudaStream_t stream)
{
if (std::is_same<T, float>::value) {
dim3 grid(window_len, window_num, 3 * batch);
dim3 block(num_head * size_per_head);
if (block.x < 1024) {
add_head3Size_QKV_bias<<<grid, block, 0, stream>>>(
mm_qkv, bias_qkv, q_buf_, k_buf_, v_buf_, batch, window_num, window_len, num_head, size_per_head);
}
else if ((block.x % 2 == 0) && (block.x / 2 < 1024)) {
block.x /= 2;
add_head3Size_QKV_bias<<<grid, block, 0, stream>>>((const float2*)mm_qkv,
(const float2*)bias_qkv,
(float2*)q_buf_,
(float2*)k_buf_,
(float2*)v_buf_,
batch,
window_num,
window_len,
num_head,
size_per_head / 2);
}
else {
printf("[ERROR][invokeAddHead3SizeQKVBias] unsupported block.x!\n");
exit(-1);
}
}
#ifdef ENABLE_BF16
else if (std::is_same<T, half>::value || std::is_same<T, __nv_bfloat16>::value) {
#else
else if (std::is_same<T, half>::value) {
#endif
dim3 grid(window_len, window_num, batch);
dim3 block(num_head * size_per_head / 2);
using T2 = typename TypeConverter<T>::Type; // half2 or bfloat16
if (block.x > 1024) {
printf("[ERROR][invokeAddHead3SizeQKVBias] block.x > 1024!\n");
exit(-1);
}
add_head3Size_QKV_bias<<<grid, block, 0, stream>>>((const T2*)mm_qkv,
(const T2*)bias_qkv,
(T2*)q_buf_,
(T2*)k_buf_,
(T2*)v_buf_,
batch,
window_num,
window_len,
num_head,
size_per_head / 2);
}
}
#define INSTANTIATEADDHEAD3SIZEQKVBIAS(T) \
template void invokeAddHead3SizeQKVBias<T>(const T* mm_qkv, \
const T* bias_qkv, \
T* q_buf_, \
T* k_buf_, \
T* v_buf_, \
const int batch, \
const int window_num, \
const int window_len, \
const int num_head, \
const int size_per_head, \
cudaStream_t stream)
INSTANTIATEADDHEAD3SIZEQKVBIAS(float);
INSTANTIATEADDHEAD3SIZEQKVBIAS(half);
#ifdef ENABLE_BF16
INSTANTIATEADDHEAD3SIZEQKVBIAS(__nv_bfloat16);
#endif
#undef INSTANTIATEADDHEAD3SIZEQKVBIAS
/******************* invokeMaskedSoftMaxWithRelPosBias ***********************/
// grid = (window_len/word_per_thread, window_num*num_head, batch_size)
// block.x = max(32, (window_len + 31)/32*32)
// qk_buf is [batch, window_num, num_head, window_len, window_len]
// attn_mask is [window_num, window_len, window_len] + row-major
// relative_pos_bias is [num_head, window_len, window_len] + row-majot
template<typename T>
__global__ void softmax_withRelPosBias_element1_kernel(T* qk_buf,
const T* attn_mask,
const T* relative_pos_bias,
const int batch_size,
const int num_head,
const int window_num,
const int window_len,
const int window_len_x_window_len,
const float qk_scale)
{
bool qual = threadIdx.x < window_len;
for (int window_id = blockIdx.x; window_id < window_len; window_id += gridDim.x) {
float tmp = -1e20f;
__shared__ float s_mean, s_max;
int qk_offset;
if (qual) {
const int offset_in_window = window_id * window_len + threadIdx.x;
qk_offset = (blockIdx.z * gridDim.y + blockIdx.y) * window_len_x_window_len + offset_in_window;
const int relative_pos_bias_offset = (blockIdx.y % num_head) * window_len_x_window_len + offset_in_window;
float mask_val =
(attn_mask == nullptr) ?
0.0f :
static_cast<float>(
ldg(attn_mask + ((blockIdx.y / num_head) * window_len_x_window_len + offset_in_window)));
tmp = qk_scale * static_cast<float>(qk_buf[qk_offset]) + mask_val
+ static_cast<float>(ldg(relative_pos_bias + relative_pos_bias_offset));
}
float max_val = blockReduceMax<float>(tmp);
if (threadIdx.x == 0) {
s_max = max_val;
}
__syncthreads();
float qk_tmp = qual ? __expf(tmp - s_max) : 0.0f;
float sum_val = blockReduceSum<float>(qk_tmp);
if (threadIdx.x == 0) {
s_mean = sum_val + 1e-6f;
s_mean = __fdividef(1.0f, s_mean);
}
__syncthreads();
if (qual) {
qk_buf[qk_offset] = (T)(qk_tmp * s_mean);
}
}
}
// grid = (window_len/word_per_thread, window_num*num_head, batch_size)
// block.x = max(32, (window_len/2 + 31)/32*32)
// qk_buf is [batch, window_num, num_head, window_len, window_len]
// attn_mask is [window_num, window_len, window_len] + row-major
// relative_pos_bias is [num_head, window_len, window_len] + row-majot
template<typename T2, typename T>
__global__ void softmax_withRelPosBias_element2_kernel(T2* qk_buf,
const T2* attn_mask,
const T2* relative_pos_bias,
const int batch_size,
const int num_head,
const int window_num,
const int window_len,
const int window_len_x_window_len,
const float qk_scale)
{
const int window_len_2 = window_len / 2;
const int tidx = threadIdx.x;
bool qual = tidx < window_len_2;
const T2 zero = {T(0.0f), T(0.0f)};
const int bdim = blockDim.x;
for (int window_id = blockIdx.x; window_id < window_len; window_id += gridDim.x) {
float tmp = -1e20f;
__shared__ float s_mean, s_max;
int qk_offset;
float2 local_qk_val;
T2 qk_val;
if (qual) {
const int offset_in_window = window_id * window_len + 2 * tidx;
qk_offset = ((blockIdx.z * gridDim.y + blockIdx.y) * window_len_x_window_len + offset_in_window) / 2;
const int relative_pos_bias_offset =
((blockIdx.y % num_head) * window_len_x_window_len + offset_in_window) / 2;
T2 mask_val =
(attn_mask == nullptr) ?
zero :
ldg(attn_mask + ((blockIdx.y / num_head) * window_len_x_window_len + offset_in_window) / 2);
qk_val = qk_buf[qk_offset];
local_qk_val.x = static_cast<float>(qk_val.x);
local_qk_val.y = static_cast<float>(qk_val.y);
const T2 bias_val = ldg(relative_pos_bias + relative_pos_bias_offset);
local_qk_val.x =
qk_scale * local_qk_val.x + static_cast<float>(mask_val.x) + static_cast<float>(bias_val.x);
local_qk_val.y =
qk_scale * local_qk_val.y + static_cast<float>(mask_val.y) + static_cast<float>(bias_val.y);
tmp = local_qk_val.x > local_qk_val.y ? local_qk_val.x : local_qk_val.y;
}
float max_val = bdim <= 32 ? warpReduceMax<float>(tmp) : blockReduceMax<float>(tmp);
if (tidx == 0) {
s_max = max_val;
}
__syncthreads();
local_qk_val.x = qual ? __expf(local_qk_val.x - s_max) : 0.0f;
local_qk_val.y = qual ? __expf(local_qk_val.y - s_max) : 0.0f;
float sum_val = bdim <= 32 ? warpReduceSum<float>(local_qk_val.x + local_qk_val.y) :
blockReduceSum<float>(local_qk_val.x + local_qk_val.y);
if (tidx == 0) {
s_mean = sum_val + 1e-6f;
s_mean = __fdividef(1.0f, s_mean);
}
__syncthreads();
if (qual) {
local_qk_val.x = local_qk_val.x * s_mean;
local_qk_val.y = local_qk_val.y * s_mean;
qk_val.x = T(local_qk_val.x);
qk_val.y = T(local_qk_val.y);
qk_buf[qk_offset] = qk_val;
}
}
}
// grid = (window_len/word_per_thread, window_num*num_head, batch_size)
// block.x = max(32, (window_len/4 + 31)/32*32)
// qk_buf is [batch, window_num, num_head, window_len, window_len]
// attn_mask is [window_num, window_len, window_len] + row-major
// relative_pos_bias is [num_head, window_len, window_len] + row-majot
template<typename T4, typename T>
__global__ void softmax_withRelPosBias_element4_kernel(T4* qk_buf,
const T4* attn_mask,
const T4* relative_pos_bias,
const int batch_size,
const int num_head,
const int window_num,
const int window_len,
const int window_len_x_window_len,
const float qk_scale)
{
const int window_len_4 = window_len / 4;
const int tidx = threadIdx.x;
bool qual = tidx < window_len_4;
const T4 zero = {T(0.0f), T(0.0f), T(0.0f), T(0.0f)};
const int bdim = blockDim.x;
for (int window_id = blockIdx.x; window_id < window_len; window_id += gridDim.x) {
float tmp = -1e20f;
__shared__ float s_mean, s_max;
int qk_offset;
float4 local_qk_val;
T4 qk_val;
if (qual) {
const int offset_in_window = window_id * window_len + 4 * tidx;
qk_offset = ((blockIdx.z * gridDim.y + blockIdx.y) * window_len_x_window_len + offset_in_window) / 4;
const int relative_pos_bias_offset =
((blockIdx.y % num_head) * window_len_x_window_len + offset_in_window) / 4;
T4 mask_val = (attn_mask == nullptr) ?
zero :
attn_mask[((blockIdx.y / num_head) * window_len_x_window_len + offset_in_window) / 4];
qk_val = qk_buf[qk_offset];
local_qk_val.x = static_cast<float>(qk_val.x);
local_qk_val.y = static_cast<float>(qk_val.y);
local_qk_val.z = static_cast<float>(qk_val.z);
local_qk_val.w = static_cast<float>(qk_val.w);
const T4 bias_val = relative_pos_bias[relative_pos_bias_offset];
local_qk_val.x =
qk_scale * local_qk_val.x + static_cast<float>(mask_val.x) + static_cast<float>(bias_val.x);
local_qk_val.y =
qk_scale * local_qk_val.y + static_cast<float>(mask_val.y) + static_cast<float>(bias_val.y);
local_qk_val.z =
qk_scale * local_qk_val.z + static_cast<float>(mask_val.z) + static_cast<float>(bias_val.z);
local_qk_val.w =
qk_scale * local_qk_val.w + static_cast<float>(mask_val.w) + static_cast<float>(bias_val.w);
tmp = local_qk_val.x > local_qk_val.y ? local_qk_val.x : local_qk_val.y;
tmp = tmp > local_qk_val.z ? tmp : local_qk_val.z;
tmp = tmp > local_qk_val.w ? tmp : local_qk_val.w;
}
float max_val = bdim <= 32 ? warpReduceMax<float>(tmp) : blockReduceMax<float>(tmp);
if (tidx == 0) {
s_max = max_val;
}
__syncthreads();
local_qk_val.x = qual ? __expf(local_qk_val.x - s_max) : 0.0f;
local_qk_val.y = qual ? __expf(local_qk_val.y - s_max) : 0.0f;
local_qk_val.z = qual ? __expf(local_qk_val.z - s_max) : 0.0f;
local_qk_val.w = qual ? __expf(local_qk_val.w - s_max) : 0.0f;
float sum_val = bdim <= 32 ?
warpReduceSum<float>(local_qk_val.x + local_qk_val.y + local_qk_val.z + local_qk_val.w) :
blockReduceSum<float>(local_qk_val.x + local_qk_val.y + local_qk_val.z + local_qk_val.w);
if (tidx == 0) {
s_mean = sum_val + 1e-6f;
s_mean = __fdividef(1.0f, s_mean);
}
__syncthreads();
if (qual) {
local_qk_val.x = local_qk_val.x * s_mean;
local_qk_val.y = local_qk_val.y * s_mean;
local_qk_val.z = local_qk_val.z * s_mean;
local_qk_val.w = local_qk_val.w * s_mean;
qk_val.x = T(local_qk_val.x);
qk_val.y = T(local_qk_val.y);
qk_val.z = T(local_qk_val.z);
qk_val.w = T(local_qk_val.w);
qk_buf[qk_offset] = qk_val;
}
}
}
template<typename T>
void invokeMaskedSoftMaxWithRelPosBias(T* qk_buf,
const T* attn_mask,
const T* relative_pos_bias,
const int batch_size,
const int num_head,
const int window_num,
const int window_len,
float qk_scale,
cudaStream_t stream)
{
const int word_per_thread = 1;
dim3 grid((window_len + word_per_thread - 1) / word_per_thread, window_num * num_head, batch_size);
if ((window_len % 4 == 0) && window_len / 4 >= 32) {
dim3 block((window_len / 4 + 31) / 32 * 32);
if (std::is_same<T, float>::value) {
softmax_withRelPosBias_element4_kernel<float4, float>
<<<grid, block, 0, stream>>>((float4*)qk_buf,
(const float4*)attn_mask,
(const float4*)relative_pos_bias,
batch_size,
num_head,
window_num,
window_len,
window_len * window_len,
qk_scale);
}
else if (std::is_same<T, half>::value) {
softmax_withRelPosBias_element4_kernel<half4, half>
<<<grid, block, 0, stream>>>((half4*)qk_buf,
(const half4*)attn_mask,
(const half4*)relative_pos_bias,
batch_size,
num_head,
window_num,
window_len,
window_len * window_len,
qk_scale);
}
#ifdef ENABLE_BF16
else {
dim3 block((window_len + 31) / 32 * 32);
softmax_withRelPosBias_element1_kernel<<<grid, block, 0, stream>>>(qk_buf,
attn_mask,
relative_pos_bias,
batch_size,
num_head,
window_num,
window_len,
window_len * window_len,
qk_scale);
}
#endif
}
else if (window_len % 2 == 0) {
dim3 block((window_len / 2 + 31) / 32 * 32);
if (std::is_same<T, float>::value) {
softmax_withRelPosBias_element2_kernel<float2, float>
<<<grid, block, 0, stream>>>((float2*)qk_buf,
(const float2*)attn_mask,
(const float2*)relative_pos_bias,
batch_size,
num_head,
window_num,
window_len,
window_len * window_len,
qk_scale);
}
else if (std::is_same<T, half>::value) {
softmax_withRelPosBias_element2_kernel<half2, half>
<<<grid, block, 0, stream>>>((half2*)qk_buf,
(const half2*)attn_mask,
(const half2*)relative_pos_bias,
batch_size,
num_head,
window_num,
window_len,
window_len * window_len,
qk_scale);
}
#ifdef ENABLE_BF16
else {
dim3 block((window_len + 31) / 32 * 32);
softmax_withRelPosBias_element1_kernel<<<grid, block, 0, stream>>>(qk_buf,
attn_mask,
relative_pos_bias,
batch_size,
num_head,
window_num,
window_len,
window_len * window_len,
qk_scale);
}
#endif
}
else {
dim3 block((window_len + 31) / 32 * 32);
softmax_withRelPosBias_element1_kernel<<<grid, block, 0, stream>>>(qk_buf,
attn_mask,
relative_pos_bias,
batch_size,
num_head,
window_num,
window_len,
window_len * window_len,
qk_scale);
}
}
#define INSTANTIATEMASKEDSOFTMAXWITHRELPOSBIAS(T) \
template void invokeMaskedSoftMaxWithRelPosBias(T* qk_buf, \
const T* attn_mask, \
const T* relative_pos_bias, \
const int batch_size, \
const int num_head, \
const int window_num, \
const int window_len, \
const float qk_scale, \
cudaStream_t stream)
INSTANTIATEMASKEDSOFTMAXWITHRELPOSBIAS(float);
INSTANTIATEMASKEDSOFTMAXWITHRELPOSBIAS(half);
#ifdef ENABLE_BF16
INSTANTIATEMASKEDSOFTMAXWITHRELPOSBIAS(__nv_bfloat16);
#endif
#undef INSTANTIATEMASKEDSOFTMAXWITHRELPOSBIAS
template<typename T>
__global__ void transpose_attentions(
T* attentions_out, const T* attentions_in, size_t batch_size, size_t num_layers, size_t num_heads, size_t seq_len)
{
// attentions_in shape [B, H, S, S]
// attentions_out shape [B, L, H, S, S].
// Note that we write the L dimension as if it was index 0.
// In reality, the pointer has already been shifted to point to the correct layer.
const auto batch_idx = blockIdx.x;
const auto head_idx = blockIdx.y;
const auto dst_offset = (batch_idx * num_layers * num_heads + head_idx) * seq_len * seq_len;
const auto src_offset = (batch_idx * num_heads + head_idx) * seq_len * seq_len;
for (auto x = threadIdx.x; x < seq_len * seq_len; x += blockDim.x) {
attentions_out[dst_offset + x] = attentions_in[src_offset + x];
}
}
template<typename T>
void invokeTransposeAttentions(Tensor& attentions_out, const Tensor& attentions_in, cudaStream_t stream)
{
const size_t batch_size = attentions_in.shape[0];
const size_t num_heads = attentions_in.shape[1];
const size_t seq_len = attentions_in.shape[2];
const size_t num_layers = attentions_out.shape[1];
const dim3 gridSize(batch_size, num_heads);
const dim3 blockSize(512);
transpose_attentions<<<gridSize, blockSize, 0, stream>>>(
attentions_out.getPtr<T>(), attentions_in.getPtr<const T>(), batch_size, num_layers, num_heads, seq_len);
}
#define INSTANTIATETRANSPOSEATTENTIONS(T) \
template void invokeTransposeAttentions<T>( \
Tensor & attentions_out, const Tensor& attentions_in, cudaStream_t stream)
INSTANTIATETRANSPOSEATTENTIONS(float);
INSTANTIATETRANSPOSEATTENTIONS(half);
#ifdef ENABLE_BF16
INSTANTIATETRANSPOSEATTENTIONS(__nv_bfloat16);
#endif
#undef INSTANTIATETRANSPOSEATTENTIONS
} // namespace fastertransformer
|
a328526bead0ea6bcd81739b6fa39b770874b1c5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "saber/utils.h"
#include "saber/core/common.h"
#include "saber/core/tensor.h"
#include <vector>
#include "thrust/functional.h"
#include "thrust/sort.h"
namespace anakin {
namespace saber {
// caffe util_nms.cu
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
int const threadsPerBlock = sizeof(unsigned long long) * 8;
__device__ inline float devIoU(float const *const a, float const *const b) {
float left = max(a[0], b[0]), right = min(a[2], b[2]);
float top = max(a[1], b[1]), bottom = min(a[3], b[3]);
float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
return interS / (Sa + Sb - interS);
}
__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh,
const float *dev_boxes, unsigned long long *dev_mask) {
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const float *cur_box = dev_boxes + cur_box_idx * 5;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
const std::vector<bool> nms_voting0(const float *boxes_dev, unsigned long long *mask_dev,
int boxes_num, float nms_overlap_thresh,
const int max_candidates,
const int top_n) {
if ((max_candidates > 0) && (boxes_num > max_candidates)) {
boxes_num = max_candidates;
}
// float *boxes_dev = NULL;
// unsigned long long *mask_dev = NULL;
const int col_blocks = DIVUP(boxes_num, threadsPerBlock);
// CUDA_CHECK(hipMalloc(&mask_dev,
// boxes_num * col_blocks * sizeof(unsigned long long)));
dim3 blocks(DIVUP(boxes_num, threadsPerBlock),
DIVUP(boxes_num, threadsPerBlock));
dim3 threads(threadsPerBlock);
nms_kernel << < blocks, threads >> > (boxes_num,
nms_overlap_thresh,
boxes_dev,
mask_dev);
std::vector<unsigned long long> mask_host(boxes_num * col_blocks);
CUDA_CHECK(hipMemcpy(&mask_host[0],
mask_dev,
sizeof(unsigned long long) * boxes_num * col_blocks,
hipMemcpyDeviceToHost));
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
std::vector<bool> mask(boxes_num, false);
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
++num_to_keep;
mask[i] = true;
unsigned long long *p = &mask_host[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
if ((top_n > 0) && (num_to_keep >= top_n)) {
break;
}
}
}
// CUDA_CHECK(hipFree(mask_dev));
return mask;
}
template <typename Dtype>
__global__ void rpn_cmp_conf_bbox_kernel(
const int threads, const int num_anchors,
const int map_height, const int map_width,
const Dtype input_height, const Dtype input_width,
const Dtype heat_map_a, const Dtype heat_map_b,
const Dtype allow_border, const Dtype allow_border_ratio,
const Dtype min_size_w, const Dtype min_size_h,
const bool min_size_mode_and_else_or, const Dtype thr_obj,
const Dtype bsz01, const bool do_bbox_norm,
const Dtype mean0, const Dtype mean1,
const Dtype mean2, const Dtype mean3,
const Dtype std0, const Dtype std1,
const Dtype std2, const Dtype std3,
const bool refine_out_of_map_bbox, const Dtype* anc_data,
const Dtype* prob_data, const Dtype* tgt_data,
Dtype* conf_data, Dtype* bbox_data) {
int map_size = map_height * map_width;
CUDA_KERNEL_LOOP(index, threads) {
int w = index % map_width;
int h = (index / map_width) % map_height;
int a = index / map_size;
int off = h * map_width + w;
Dtype score = prob_data[(num_anchors + a) * map_size + off];
if (score < thr_obj) {
conf_data[index] = 0.0;
continue;
}
int ax4 = a * 4;
Dtype anchor_ctr_x = anc_data[ax4];
Dtype anchor_ctr_y = anc_data[ax4 + 1];
Dtype anchor_width = anc_data[ax4 + 2];
Dtype anchor_height = anc_data[ax4 + 3];
Dtype input_ctr_x = w * heat_map_a + heat_map_b + anchor_ctr_x;
Dtype input_ctr_y = h * heat_map_a + heat_map_b + anchor_ctr_y;
if (allow_border >= Dtype(0.0)
|| allow_border_ratio >= Dtype(0.0)) {
Dtype x1 = input_ctr_x - 0.5 * (anchor_width - bsz01);
Dtype y1 = input_ctr_y - 0.5 * (anchor_height - bsz01);
Dtype x2 = x1 + anchor_width - bsz01;
Dtype y2 = y1 + anchor_height - bsz01;
if (allow_border >= Dtype(0.0) && (
x1 < -allow_border || y1 < -allow_border
|| x2 > input_width - 1 + allow_border ||
y2 > input_height - 1 + allow_border)) {
conf_data[index] = 0.0;
continue;
} else if (allow_border_ratio >= Dtype(0.0)) {
Dtype x11 = max(Dtype(0), x1);
Dtype y11 = max(Dtype(0), y1);
Dtype x22 = min(input_width - 1, x2);
Dtype y22 = min(input_height - 1, y2);
if ((y22 - y11 + bsz01) * (x22 - x11 + bsz01)
/ ((y2 - y1 + bsz01) * (x2 - x1 + bsz01))
< (1.0 - allow_border_ratio)) {
conf_data[index] = 0.0;
continue;
}
}
}
Dtype tg0 = tgt_data[ax4 * map_size + off];
Dtype tg1 = tgt_data[(ax4 + 1) * map_size + off];
Dtype tg2 = tgt_data[(ax4 + 2) * map_size + off];
Dtype tg3 = tgt_data[(ax4 + 3) * map_size + off];
if (do_bbox_norm) {
tg0 = tg0 * std0 + mean0;
tg1 = tg1 * std1 + mean1;
tg2 = tg2 * std2 + mean2;
tg3 = tg3 * std3 + mean3;
}
Dtype tw = anchor_width * exp(tg2);
Dtype th = anchor_height * exp(tg3);
Dtype ctx = tg0 * anchor_width + input_ctr_x;
Dtype cty = tg1 * anchor_height + input_ctr_y;
Dtype ltx = ctx - 0.5 * (tw - bsz01);
Dtype lty = cty - 0.5 * (th - bsz01);
Dtype rbx = ltx + tw - bsz01;
Dtype rby = lty + th - bsz01;
if (refine_out_of_map_bbox) {
ltx = min(max(ltx, Dtype(0.0)), input_width -1);
lty = min(max(lty, Dtype(0.0)), input_height -1);
rbx = min(max(rbx, Dtype(0.0)), input_width -1);
rby = min(max(rby, Dtype(0.0)), input_height -1);
}
if (min_size_mode_and_else_or) {
if ((rbx - ltx + bsz01) < min_size_w
|| (rby - lty + bsz01) < min_size_h) {
conf_data[index] = 0.0;
continue;
}
} else {
if ((rbx - ltx + bsz01) < min_size_w
&& (rby - lty + bsz01) < min_size_h) {
conf_data[index] = 0.0;
continue;
}
}
conf_data[index] = score;
bbox_data[index * 4] = ltx;
bbox_data[index * 4 + 1] = lty;
bbox_data[index * 4 + 2] = rbx;
bbox_data[index * 4 + 3] = rby;
}
}
template <typename Dtype>
void rpn_cmp_conf_bbox_gpu(const int num_anchors,
const int map_height, const int map_width,
const Dtype input_height, const Dtype input_width,
const Dtype heat_map_a, const Dtype heat_map_b,
const Dtype allow_border, const Dtype allow_border_ratio,
const Dtype min_size_w, const Dtype min_size_h,
const bool min_size_mode_and_else_or, const Dtype thr_obj,
const Dtype bsz01, const bool do_bbox_norm,
const Dtype mean0, const Dtype mean1,
const Dtype mean2, const Dtype mean3,
const Dtype std0, const Dtype std1,
const Dtype std2, const Dtype std3,
const bool refine_out_of_map_bbox, const Dtype* anc_data,
const Dtype* prob_data, const Dtype* tgt_data,
Dtype* conf_data, Dtype* bbox_data, Context<NV> *ctx) {
#ifdef ENABLE_DEBUG
#undef CUDA_NUM_THREADS
#define CUDA_NUM_THREADS 256
#endif
int threads = num_anchors * map_height * map_width;
hipLaunchKernelGGL(( rpn_cmp_conf_bbox_kernel<Dtype>), dim3(CUDA_GET_BLOCKS(threads)),
dim3(CUDA_NUM_THREADS), 0, ctx->get_compute_stream(), threads, num_anchors,
map_height, map_width,
input_height, input_width,
heat_map_a, heat_map_b,
allow_border, allow_border_ratio,
min_size_w, min_size_h,
min_size_mode_and_else_or, thr_obj,
bsz01, do_bbox_norm,
mean0, mean1, mean2, mean3,
std0, std1, std2, std3,
refine_out_of_map_bbox, anc_data,
prob_data, tgt_data,
conf_data, bbox_data);
CUDA_POST_KERNEL_CHECK;
}
template void rpn_cmp_conf_bbox_gpu(const int num_anchors,
const int map_height, const int map_width,
const float input_height, const float input_width,
const float heat_map_a, const float heat_map_b,
const float allow_border, const float allow_border_ratio,
const float min_size_w, const float min_size_h,
const bool min_size_mode_and_else_or, const float thr_obj,
const float bsz01, const bool do_bbox_norm,
const float mean0, const float mean1,
const float mean2, const float mean3,
const float std0, const float std1,
const float std2, const float std3,
const bool refine_out_of_map_bbox, const float* anc_data,
const float* prob_data, const float* tgt_data,
float* conf_data, float* bbox_data, Context<NV> *ctx);
// rcnn
template <typename Dtype>
__global__ void rcnn_cmp_conf_bbox_kernel(const int num_rois,
const Dtype input_height, const Dtype input_width,
const Dtype allow_border, const Dtype allow_border_ratio,
const Dtype min_size_w, const Dtype min_size_h,
const bool min_size_mode_and_else_or, const Dtype thr_obj,
const Dtype bsz01, const bool do_bbox_norm,
const Dtype mean0, const Dtype mean1,
const Dtype mean2, const Dtype mean3,
const Dtype std0, const Dtype std1,
const Dtype std2, const Dtype std3,
const bool refine_out_of_map_bbox, const bool regress_agnostic,
const int num_class, const Dtype* thr_cls,
const Dtype* rois_data, const Dtype* prob_data,
const Dtype* tgt_data, Dtype* conf_data,
Dtype* bbox_data) {
int probs_dim = num_class + 1;
int cords_dim = (regress_agnostic ? 2 : (num_class + 1)) * 4;
CUDA_KERNEL_LOOP(index, num_rois) {
const Dtype* probs = prob_data + index * probs_dim;
const Dtype* cords = tgt_data + index * cords_dim;
const Dtype* rois = rois_data + index * 5;
if ((1.0 - probs[0]) < thr_obj) {
conf_data[index] = 0.0;
continue;
}
if (int(rois[0]) == -1) {
conf_data[index] = 0.0;
continue;
}
Dtype score_max = -10e6;
int cls_max = -1;
for (int c = 0; c < num_class; c++) {
Dtype score_c = probs[c + 1] - thr_cls[c];
if (score_c > score_max) {
score_max = score_c;
cls_max = c;
}
}
if (score_max < 0) {
conf_data[index] = 0.0;
continue;
}
if (allow_border >= 0.0
|| allow_border_ratio >= 0.0) {
Dtype x1 = rois[1];
Dtype y1 = rois[2];
Dtype x2 = rois[3];
Dtype y2 = rois[4];
if (allow_border >= 0.0 && (
x1 < -allow_border || y1 < -allow_border
|| x2 > input_width - 1 + allow_border ||
y2 > input_height - 1 + allow_border )) {
conf_data[index] = 0.0;
continue;
} else if (allow_border_ratio >= 0.0) {
Dtype x11 = max(Dtype(0.0), x1);
Dtype y11 = max(Dtype(0.0), y1);
Dtype x22 = min(input_width - 1, x2);
Dtype y22 = min(input_height - 1, y2);
if ((y22 - y11 + bsz01) * (x22 - x11 + bsz01)
/ ((y2 - y1 + bsz01) * (x2 - x1 +bsz01))
< (1.0 - allow_border_ratio)) {
conf_data[index] = 0.0;
continue;
}
}
}
Dtype rois_w = rois[3] - rois[1] + bsz01;
Dtype rois_h = rois[4] - rois[2] + bsz01;
Dtype rois_ctr_x = rois[1] + 0.5 * (rois_w - bsz01);
Dtype rois_ctr_y = rois[2] + 0.5 * (rois_h - bsz01);
int cdst = regress_agnostic ? 4 : ((cls_max + 1) * 4);
Dtype tg0 = cords[cdst];
Dtype tg1 = cords[cdst + 1];
Dtype tg2 = cords[cdst + 2];
Dtype tg3 = cords[cdst + 3];
if (do_bbox_norm) {
tg0 = tg0 * std0 + mean0;
tg1 = tg1 * std1 + mean1;
tg2 = tg2 * std2 + mean2;
tg3 = tg3 * std3 + mean3;
}
Dtype tw = rois_w * exp(tg2);
Dtype th = rois_h * exp(tg3);
Dtype ctx = tg0 * rois_w + rois_ctr_x;
Dtype cty = tg1 * rois_h + rois_ctr_y;
Dtype ltx = ctx - 0.5 * (tw - bsz01);
Dtype lty = cty - 0.5 * (th - bsz01);
Dtype rbx = ltx + tw - bsz01;
Dtype rby = lty + th - bsz01;
if (refine_out_of_map_bbox) {
ltx = min(max(ltx, Dtype(0.0)), input_width -1);
lty = min(max(lty, Dtype(0.0)), input_height -1);
rbx = min(max(rbx, Dtype(0.0)), input_width -1);
rby = min(max(rby, Dtype(0.0)), input_height -1);
}
if (min_size_mode_and_else_or) {
if ((rbx - ltx + bsz01) < min_size_w
|| (rby - lty + bsz01) < min_size_h) {
conf_data[index] = 0.0;
continue;
}
} else {
if ((rbx - ltx + bsz01) < min_size_w
&& (rby - lty + bsz01) < min_size_h) {
conf_data[index] = 0.0;
continue;
}
}
conf_data[index] = probs[cls_max + 1];
bbox_data[index * 4] = ltx;
bbox_data[index * 4 + 1] = lty;
bbox_data[index * 4 + 2] = rbx;
bbox_data[index * 4 + 3] = rby;
}
}
template <typename Dtype>
void rcnn_cmp_conf_bbox_gpu(const int num_rois,
const Dtype input_height, const Dtype input_width,
const Dtype allow_border, const Dtype allow_border_ratio,
const Dtype min_size_w, const Dtype min_size_h,
const bool min_size_mode_and_else_or, const Dtype thr_obj,
const Dtype bsz01, const bool do_bbox_norm,
const Dtype mean0, const Dtype mean1,
const Dtype mean2, const Dtype mean3,
const Dtype std0, const Dtype std1,
const Dtype std2, const Dtype std3,
const bool refine_out_of_map_bbox, const bool regress_agnostic,
const int num_class, const Dtype* thr_cls,
const Dtype* rois_data, const Dtype* prob_data,
const Dtype* tgt_data, Dtype* conf_data,
Dtype* bbox_data, Context<NV> *ctx) {
int threads = num_rois;
hipLaunchKernelGGL(( rcnn_cmp_conf_bbox_kernel<Dtype>), dim3(CUDA_GET_BLOCKS(threads)),
dim3(CUDA_NUM_THREADS), 0, ctx->get_compute_stream(), num_rois,
input_height, input_width,
allow_border, allow_border_ratio,
min_size_w, min_size_h,
min_size_mode_and_else_or, thr_obj,
bsz01, do_bbox_norm,
mean0, mean1,
mean2, mean3,
std0, std1,
std2, std3,
refine_out_of_map_bbox, regress_agnostic,
num_class, thr_cls,
rois_data, prob_data,
tgt_data, conf_data,
bbox_data);
CUDA_POST_KERNEL_CHECK;
}
template void rcnn_cmp_conf_bbox_gpu(const int num_rois,
const float input_height, const float input_width,
const float allow_border, const float allow_border_ratio,
const float min_size_w, const float min_size_h,
const bool min_size_mode_and_else_or, const float thr_obj,
const float bsz01, const bool do_bbox_norm,
const float mean0, const float mean1,
const float mean2, const float mean3,
const float std0, const float std1,
const float std2, const float std3,
const bool refine_out_of_map_bbox, const bool regress_agnostic,
const int num_class, const float* thr_cls,
const float* rois_data, const float* prob_data,
const float* tgt_data, float* conf_data,
float* bbox_data, Context<NV> *ctx);
// nms, copy and modify some cuda codes form yolo
template <typename Dtype>
__host__ __device__ Dtype bbox_size_gpu(const Dtype *bbox, const Dtype bsz01) {
if (bbox[2] < bbox[0] || bbox[3] < bbox[1]) {
return Dtype(0.);
} else {
return (bbox[2] - bbox[0] + bsz01) * (bbox[3] - bbox[1] + bsz01);
}
}
template <typename Dtype>
__host__ __device__ Dtype jaccard_overlap_gpu(const Dtype *bbox1,
const Dtype *bbox2, const Dtype bsz01) {
if (bbox2[0] > bbox1[2] || bbox2[2] < bbox1[0] ||
bbox2[1] > bbox1[3] || bbox2[3] < bbox1[1]) {
return Dtype(0.);
} else {
const Dtype inter_xmin = max(bbox1[0], bbox2[0]);
const Dtype inter_ymin = max(bbox1[1], bbox2[1]);
const Dtype inter_xmax = min(bbox1[2], bbox2[2]);
const Dtype inter_ymax = min(bbox1[3], bbox2[3]);
const Dtype inter_width = inter_xmax - inter_xmin + bsz01;
const Dtype inter_height = inter_ymax - inter_ymin + bsz01;
const Dtype inter_size = inter_width * inter_height;
const Dtype bbox1_size = bbox_size_gpu(bbox1, bsz01);
const Dtype bbox2_size = bbox_size_gpu(bbox2, bsz01);
return inter_size / (bbox1_size + bbox2_size - inter_size);
}
}
template <typename Dtype>
__global__ void compute_overlapped_by_idx_kernel(
const int nthreads, const Dtype *bbox_data, const int bbox_step,
const Dtype overlap_threshold, const int *idx, const int num_idx,
const Dtype bsz01, bool *overlapped_data) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < (nthreads); index += blockDim.x * gridDim.x) {
const int j = index % num_idx;
const int i = index / num_idx;
if (i == j) {
// Ignore same bbox.
return;
}
// Compute overlap between i-th bbox and j-th bbox.
const int start_loc_i = idx[i] * bbox_step;
const int start_loc_j = idx[j] * bbox_step;
const Dtype overlap = jaccard_overlap_gpu(bbox_data + start_loc_i,
bbox_data + start_loc_j,
bsz01);
overlapped_data[index] = overlap > overlap_threshold;
}
}
//template <typename Dtype>
//void compute_overlapped_by_idx_gpu(
// const int nthreads, const Dtype *bbox_data, const int bbox_step,
// const Dtype overlap_threshold, const int *idx, const int num_idx,
// const Dtype bsz01, bool *overlapped_data) {
// // NOLINT_NEXT_LINE(whitespace/operators)
// const int thread_size = 256;
// int block_size = (nthreads + thread_size - 1) / thread_size;
// compute_overlapped_by_idx_kernel << < block_size, thread_size >> > (
// nthreads, bbox_data, bbox_step, overlap_threshold, idx, num_idx,
// bsz01, overlapped_data);
//}
template <typename Dtype>
void compute_overlapped_by_idx_gpu(
const int nthreads, const Dtype *bbox_data, const int bbox_step,
const Dtype overlap_threshold, const int *idx, const int num_idx,
const Dtype bsz01, bool *overlapped_data, const hipStream_t &stream) {
// NOLINT_NEXT_LINE(whitespace/operators)
const int thread_size = 256;
int block_size = (nthreads + thread_size - 1) / thread_size;
// printf("thread_size = %d, block_size = %d\n", thread_size, block_size);
compute_overlapped_by_idx_kernel << < block_size, thread_size, 0, stream >> > (
nthreads, bbox_data, bbox_step, overlap_threshold, idx, num_idx,
bsz01, overlapped_data);
hipDeviceSynchronize();
}
// Do nms, modified by mingli.
void apply_nms(const bool *overlapped, const int num, const int top_k,
const std::vector<int> &idxes, std::vector<int> *indices,
const int nmsed_num = 0, const int nmsed_loc = 0) {
std::vector<bool> mask(num, false);
if (nmsed_num > 0) {
int k_x_num_add_nmsed_num = nmsed_num;
for (int k = 0; k < nmsed_num; k++) {
int k_x_num_add_p = k_x_num_add_nmsed_num;
for (int p = nmsed_num; p < num; p++) {
if (overlapped[k_x_num_add_p++]) {
mask[p] = true;
}
}
k_x_num_add_nmsed_num += num;
}
}
int count = nmsed_num;
int k_x_num = (nmsed_num -1) * num;
for (int k = nmsed_num; k < num; k++) {
k_x_num += num;
if (mask[k]) {
continue;
} else {
indices->push_back(idxes[nmsed_loc + k - nmsed_num]);
if (++count >= top_k) {
break;
}
int k_x_num_add_p = k_x_num + k + 1;
for (int p = k + 1; p < num; p++) {
if (overlapped[k_x_num_add_p++]) {
mask[p] = true;
}
}
}
}
}
template <typename Dtype, typename PGlue_nv>
void apply_nms_gpu(const Dtype *bbox_data, const Dtype *conf_data,
const int num_bboxes, const int bbox_step, const Dtype confidence_threshold,
const int max_candidate_n, const int top_k, const Dtype nms_threshold,
const Dtype bsz01, std::vector<int> *indices,
PGlue_nv *overlapped, PGlue_nv *idx_sm,
Context<NV> *ctx, std::vector<int> *idx_ptr,
const int conf_step, const int conf_idx,
const int nms_gpu_max_n_per_time) {
indices->clear();
std::vector<int> idx;
std::vector<Dtype> confidences;
if (idx_ptr == NULL) {
if (conf_step == 1) {
for (int i = 0; i < num_bboxes; ++i) {
if (conf_data[i] > confidence_threshold) {
idx.push_back(i);
confidences.push_back(conf_data[i]);
}
}
} else {
int i_x_step_add_idx = conf_idx;
for (int i = 0; i < num_bboxes; ++i) {
if (conf_data[i_x_step_add_idx] > confidence_threshold) {
idx.push_back(i);
confidences.push_back(conf_data[i_x_step_add_idx]);
}
i_x_step_add_idx += conf_step;
}
}
} else {
if (conf_step == 1) {
for (int k = 0; k < idx_ptr->size(); k++) {
int i = (*idx_ptr)[k];
if (conf_data[i] > confidence_threshold) {
idx.push_back(i);
confidences.push_back(conf_data[i]);
}
}
} else {
for (int k = 0; k < idx_ptr->size(); k++) {
int i = (*idx_ptr)[k];
int i_x_step_add_idx = i * conf_step + conf_idx;
if (conf_data[i_x_step_add_idx] > confidence_threshold) {
idx.push_back(i);
confidences.push_back(conf_data[i_x_step_add_idx]);
}
}
}
}
int num_remain = confidences.size();
if (num_remain == 0) {
return;
}
if (nms_threshold >= Dtype(1.0)) {
for (int i = 0; i < idx.size(); i++) {
indices->push_back(idx[i]);
}
return;
}
thrust::sort_by_key(&confidences[0], &confidences[0] + num_remain, &idx[0],
thrust::greater<Dtype>());
if (max_candidate_n > -1 && max_candidate_n < num_remain) {
num_remain = max_candidate_n;
}
int idx_loc = 0;
int indices_size_pre = 0;
while (idx_loc < num_remain && indices->size() < top_k) {
int *idx_data = (int*)idx_sm->host_mutable_data(ctx);
std::copy(indices->begin() + indices_size_pre,
indices->end(), idx_data + indices_size_pre);
int idx_num_cur_time = min(int(nms_gpu_max_n_per_time - indices->size()),
int(num_remain - idx_loc));
std::copy(idx.begin() + idx_loc, idx.begin() + idx_loc + idx_num_cur_time,
idx_data + indices->size());
int candidate_n_cur_time = indices->size() + idx_num_cur_time;
int total_bboxes = candidate_n_cur_time * candidate_n_cur_time;
bool *overlapped_data = (bool*)overlapped->device_mutable_data(ctx);
compute_overlapped_by_idx_gpu(total_bboxes, bbox_data, bbox_step,
nms_threshold, (const int*)idx_sm->device_data(ctx),
candidate_n_cur_time, bsz01, overlapped_data, ctx->get_compute_stream());
const bool *overlapped_results = (const bool*)overlapped->host_data(ctx);
indices_size_pre = indices->size();
apply_nms(overlapped_results, candidate_n_cur_time, top_k,
idx, indices, indices->size(), idx_loc);
idx_loc += idx_num_cur_time;
}
}
template void apply_nms_gpu(const float *bbox_data, const float *conf_data,
const int num_bboxes, const int bbox_step, const float confidence_threshold,
const int max_candidate_n, const int top_k, const float nms_threshold,
const float bsz01, std::vector<int> *indices,
PGlue<Tensor<NV>, Tensor<NVHX86> > *overlapped,
PGlue<Tensor<NV>, Tensor<NVHX86> > *idx_sm,
Context<NV> *ctx, std::vector<int> *idx_ptr,
const int conf_step, const int conf_idx, const int nms_gpu_max_n_per_time);
template <typename Dtype>
void GenGrdFt_cpu(unsigned int im_width,
unsigned int im_height, unsigned int blob_width,
unsigned int blob_height, Dtype std_height,
const std::vector<Dtype> & cam_params, Dtype* grd_ft,
Dtype read_width_scale, Dtype read_height_scale,
unsigned int read_height_offset, unsigned int valid_param_idx_st,
bool trans_cam_pitch_to_zero, bool normalize_grd_ft,
unsigned int normalize_grd_ft_dim) {
CHECK_GT(im_width, 0);
CHECK_GT(im_height, 0);
CHECK_GE(blob_width, im_width);
CHECK_GE(blob_height, im_height);
CHECK_GT(read_width_scale, 0);
CHECK_GT(read_height_scale, 0);
CHECK_LE(valid_param_idx_st + 6, cam_params.size());
Dtype cam_xpz = cam_params[valid_param_idx_st + 0];
Dtype cam_xct = cam_params[valid_param_idx_st + 1];
Dtype cam_ypz = cam_params[valid_param_idx_st + 2];
Dtype cam_yct = cam_params[valid_param_idx_st + 3];
Dtype cam_hgrd = cam_params[valid_param_idx_st + 4];
Dtype cam_pitch = cam_params[valid_param_idx_st + 5];
CHECK_GT(cam_xpz, 0);
CHECK_GT(cam_ypz, 0);
CHECK_GT(cam_hgrd, 0);
Dtype min_py_grd = cam_yct + cam_ypz * tan(cam_pitch);
Dtype min_r_grd = (min_py_grd - read_height_offset)
* read_height_scale;
for (int r = 0; r < im_height; r++) {
Dtype py_grd;
Dtype z_grd, y_grd;
Dtype z_std_h_upon_grd, y_std_h_upon_grd;
Dtype py_std_h_upon_grd, r_std_h_upon_grd;
if (r > min_r_grd) {
py_grd = r / read_height_scale + read_height_offset;
z_grd = cam_ypz * cam_hgrd
/ (py_grd - cam_yct - cam_ypz * tan(cam_pitch));
y_grd = cam_hgrd + z_grd * tan(cam_pitch);
z_std_h_upon_grd = z_grd + std_height
* (trans_cam_pitch_to_zero?0.0:tan(cam_pitch));
y_std_h_upon_grd = y_grd - std_height
* (trans_cam_pitch_to_zero?1.0:cos(cam_pitch));
py_std_h_upon_grd = cam_ypz * y_std_h_upon_grd
/ z_std_h_upon_grd + cam_yct;
r_std_h_upon_grd = (py_std_h_upon_grd - read_height_offset)
* read_height_scale;
}
for (int c = 0; c < im_width; c++) {
if (r <= min_r_grd) {
grd_ft[r * blob_width + c] = Dtype(0.0);
} else {
Dtype px_grd = c / read_width_scale;
Dtype x_grd = (px_grd - cam_xct) * z_grd / cam_xpz;
Dtype x_std_h_upon_grd = x_grd;
Dtype px_std_h_upon_grd = cam_xpz * x_std_h_upon_grd
/ z_std_h_upon_grd + cam_xct;
Dtype c_std_h_upon_grd = px_std_h_upon_grd
* read_width_scale;
Dtype std_h_prj_scale =
sqrt((c_std_h_upon_grd - c) * (c_std_h_upon_grd - c)
+ (r_std_h_upon_grd - r) * (r_std_h_upon_grd - r));
if (!normalize_grd_ft) {
grd_ft[r * blob_width + c] = std_h_prj_scale;
} else {
int norm_chl = std::min<int>(normalize_grd_ft_dim - 1,
std::max<int>(0, static_cast<int>(
::ceil(::log(std_h_prj_scale) / ::log(2.0)))));
grd_ft[(norm_chl * blob_height + r) * blob_width + c] =
std_h_prj_scale / ::pow(2.0, norm_chl);
}
}
}
}
}
template void GenGrdFt_cpu(unsigned int im_width,
unsigned int im_height, unsigned int blob_width,
unsigned int blob_height, float std_height,
const std::vector<float> & cam_params, float* grd_ft,
float read_width_scale, float read_height_scale,
unsigned int read_height_offset, unsigned int valid_param_idx_st,
bool trans_cam_pitch_to_zero,bool normalize_grd_ft,
unsigned int normalize_grd_ft_dim);
template <typename Dtype>
__global__ void GenGrdFt_kernel(unsigned int im_width, unsigned int blob_width,
unsigned int blob_height, unsigned int n, Dtype std_height, Dtype cam_xpz,
Dtype cam_xct, Dtype cam_ypz, Dtype cam_yct, Dtype cam_hgrd, Dtype cam_pitch,
Dtype cam_tanh, Dtype cam_ypz_x_tanh, Dtype std_height_x_tanh, Dtype std_height_x_cos,
Dtype cam_ypz_x_cam_hgrd, Dtype read_width_scale, Dtype read_height_scale,
unsigned int read_height_offset, Dtype min_py_grd, Dtype min_r_grd,
bool normalize_grd_ft, unsigned int normalize_grd_ft_dim, Dtype* grd_ft_gpu_data) {
CUDA_KERNEL_LOOP(index, n) {
int r = index / im_width;
int c = index % im_width;
if (r <= min_r_grd) {
grd_ft_gpu_data[r * blob_width + c] = Dtype(0.0);
} else {
Dtype py_grd = r / read_height_scale + read_height_offset;
Dtype z_grd = cam_ypz_x_cam_hgrd
/ (py_grd - cam_yct - cam_ypz_x_tanh);
Dtype y_grd = cam_hgrd + z_grd * cam_tanh;
Dtype z_std_h_upon_grd = z_grd + std_height_x_tanh;
Dtype y_std_h_upon_grd = y_grd - std_height_x_cos;
Dtype py_std_h_upon_grd = cam_ypz * y_std_h_upon_grd
/ z_std_h_upon_grd + cam_yct;
Dtype r_std_h_upon_grd = (py_std_h_upon_grd - read_height_offset)
* read_height_scale;
Dtype px_grd = c / read_width_scale;
Dtype x_grd = (px_grd - cam_xct) * z_grd / cam_xpz;
Dtype x_std_h_upon_grd = x_grd;
Dtype px_std_h_upon_grd = cam_xpz * x_std_h_upon_grd
/ z_std_h_upon_grd + cam_xct;
Dtype c_std_h_upon_grd = px_std_h_upon_grd * read_width_scale;
Dtype std_h_prj_scale =
sqrt((c_std_h_upon_grd - c) * (c_std_h_upon_grd - c)
+ (r_std_h_upon_grd - r) * (r_std_h_upon_grd - r));
if (!normalize_grd_ft) {
grd_ft_gpu_data[r * blob_width + c] = std_h_prj_scale;
} else {
int norm_chl = min(normalize_grd_ft_dim - 1, max(0,
int(ceil(log(std_h_prj_scale) / log(2.0)))));
grd_ft_gpu_data[(norm_chl * blob_height + r) * blob_width + c] =
std_h_prj_scale / pow(2.0, norm_chl);
}
}
}
}
template <typename Dtype>
void GenGrdFt_gpu(unsigned int im_width,
unsigned int im_height, unsigned int blob_width,
unsigned int blob_height, Dtype std_height,
const std::vector<Dtype> & cam_params, Dtype* grd_ft,
Dtype read_width_scale, Dtype read_height_scale,
unsigned int read_height_offset, unsigned int valid_param_idx_st,
bool trans_cam_pitch_to_zero, bool normalize_grd_ft,
unsigned int normalize_grd_ft_dim) {
CHECK_GT(im_width, 0);
CHECK_GT(im_height, 0);
CHECK_GE(blob_width, im_width);
CHECK_GE(blob_height, im_height);
CHECK_GT(read_width_scale, 0);
CHECK_GT(read_height_scale, 0);
CHECK_LE(valid_param_idx_st + 6, cam_params.size());
Dtype cam_xpz = cam_params[valid_param_idx_st + 0];
Dtype cam_xct = cam_params[valid_param_idx_st + 1];
Dtype cam_ypz = cam_params[valid_param_idx_st + 2];
Dtype cam_yct = cam_params[valid_param_idx_st + 3];
Dtype cam_hgrd = cam_params[valid_param_idx_st + 4];
Dtype cam_pitch = cam_params[valid_param_idx_st + 5];
CHECK_GT(cam_xpz, 0);
CHECK_GT(cam_ypz, 0);
CHECK_GT(cam_hgrd, 0);
Dtype cam_tanh = tanh(cam_pitch);
Dtype cam_ypz_x_tanh = cam_ypz * cam_tanh;
Dtype std_height_x_tanh = std_height
* (trans_cam_pitch_to_zero ? 0.0 : tanh(cam_pitch));
Dtype std_height_x_cos = std_height
* (trans_cam_pitch_to_zero ? 1.0 : cos(cam_pitch));
Dtype cam_ypz_x_cam_hgrd = cam_ypz * cam_hgrd;
Dtype min_py_grd = cam_yct + cam_ypz_x_tanh;
Dtype min_r_grd = (min_py_grd - read_height_offset)
* read_height_scale;
int count = im_height * im_width;
hipLaunchKernelGGL(( GenGrdFt_kernel<Dtype>), dim3(CUDA_GET_BLOCKS(count, CUDA_NUM_THREADS)),
dim3(CUDA_NUM_THREADS), 0, 0,
im_width, blob_width, blob_height, count,
std_height, cam_xpz, cam_xct, cam_ypz,
cam_yct, cam_hgrd, cam_pitch, cam_tanh,
cam_ypz_x_tanh, std_height_x_tanh,
std_height_x_cos, cam_ypz_x_cam_hgrd,
read_width_scale, read_height_scale,
read_height_offset, min_py_grd,
min_r_grd, normalize_grd_ft,
normalize_grd_ft_dim, grd_ft);
CUDA_POST_KERNEL_CHECK;
}
template void GenGrdFt_gpu(unsigned int im_width,
unsigned int im_height, unsigned int blob_width,
unsigned int blob_height, float std_height,
const std::vector<float> & cam_params, float* grd_ft,
float read_width_scale, float read_height_scale,
unsigned int read_height_offset, unsigned int valid_param_idx_st,
bool trans_cam_pitch_to_zero, bool normalize_grd_ft,
unsigned int normalize_grd_ft_dim);
}
}
|
a328526bead0ea6bcd81739b6fa39b770874b1c5.cu
|
#include "saber/utils.h"
#include "saber/core/common.h"
#include "saber/core/tensor.h"
#include <vector>
#include "thrust/functional.h"
#include "thrust/sort.h"
namespace anakin {
namespace saber {
// caffe util_nms.cu
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
int const threadsPerBlock = sizeof(unsigned long long) * 8;
__device__ inline float devIoU(float const *const a, float const *const b) {
float left = max(a[0], b[0]), right = min(a[2], b[2]);
float top = max(a[1], b[1]), bottom = min(a[3], b[3]);
float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
return interS / (Sa + Sb - interS);
}
__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh,
const float *dev_boxes, unsigned long long *dev_mask) {
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const float *cur_box = dev_boxes + cur_box_idx * 5;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
const std::vector<bool> nms_voting0(const float *boxes_dev, unsigned long long *mask_dev,
int boxes_num, float nms_overlap_thresh,
const int max_candidates,
const int top_n) {
if ((max_candidates > 0) && (boxes_num > max_candidates)) {
boxes_num = max_candidates;
}
// float *boxes_dev = NULL;
// unsigned long long *mask_dev = NULL;
const int col_blocks = DIVUP(boxes_num, threadsPerBlock);
// CUDA_CHECK(cudaMalloc(&mask_dev,
// boxes_num * col_blocks * sizeof(unsigned long long)));
dim3 blocks(DIVUP(boxes_num, threadsPerBlock),
DIVUP(boxes_num, threadsPerBlock));
dim3 threads(threadsPerBlock);
nms_kernel << < blocks, threads >> > (boxes_num,
nms_overlap_thresh,
boxes_dev,
mask_dev);
std::vector<unsigned long long> mask_host(boxes_num * col_blocks);
CUDA_CHECK(cudaMemcpy(&mask_host[0],
mask_dev,
sizeof(unsigned long long) * boxes_num * col_blocks,
cudaMemcpyDeviceToHost));
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
std::vector<bool> mask(boxes_num, false);
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
++num_to_keep;
mask[i] = true;
unsigned long long *p = &mask_host[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
if ((top_n > 0) && (num_to_keep >= top_n)) {
break;
}
}
}
// CUDA_CHECK(cudaFree(mask_dev));
return mask;
}
template <typename Dtype>
__global__ void rpn_cmp_conf_bbox_kernel(
const int threads, const int num_anchors,
const int map_height, const int map_width,
const Dtype input_height, const Dtype input_width,
const Dtype heat_map_a, const Dtype heat_map_b,
const Dtype allow_border, const Dtype allow_border_ratio,
const Dtype min_size_w, const Dtype min_size_h,
const bool min_size_mode_and_else_or, const Dtype thr_obj,
const Dtype bsz01, const bool do_bbox_norm,
const Dtype mean0, const Dtype mean1,
const Dtype mean2, const Dtype mean3,
const Dtype std0, const Dtype std1,
const Dtype std2, const Dtype std3,
const bool refine_out_of_map_bbox, const Dtype* anc_data,
const Dtype* prob_data, const Dtype* tgt_data,
Dtype* conf_data, Dtype* bbox_data) {
int map_size = map_height * map_width;
CUDA_KERNEL_LOOP(index, threads) {
int w = index % map_width;
int h = (index / map_width) % map_height;
int a = index / map_size;
int off = h * map_width + w;
Dtype score = prob_data[(num_anchors + a) * map_size + off];
if (score < thr_obj) {
conf_data[index] = 0.0;
continue;
}
int ax4 = a * 4;
Dtype anchor_ctr_x = anc_data[ax4];
Dtype anchor_ctr_y = anc_data[ax4 + 1];
Dtype anchor_width = anc_data[ax4 + 2];
Dtype anchor_height = anc_data[ax4 + 3];
Dtype input_ctr_x = w * heat_map_a + heat_map_b + anchor_ctr_x;
Dtype input_ctr_y = h * heat_map_a + heat_map_b + anchor_ctr_y;
if (allow_border >= Dtype(0.0)
|| allow_border_ratio >= Dtype(0.0)) {
Dtype x1 = input_ctr_x - 0.5 * (anchor_width - bsz01);
Dtype y1 = input_ctr_y - 0.5 * (anchor_height - bsz01);
Dtype x2 = x1 + anchor_width - bsz01;
Dtype y2 = y1 + anchor_height - bsz01;
if (allow_border >= Dtype(0.0) && (
x1 < -allow_border || y1 < -allow_border
|| x2 > input_width - 1 + allow_border ||
y2 > input_height - 1 + allow_border)) {
conf_data[index] = 0.0;
continue;
} else if (allow_border_ratio >= Dtype(0.0)) {
Dtype x11 = max(Dtype(0), x1);
Dtype y11 = max(Dtype(0), y1);
Dtype x22 = min(input_width - 1, x2);
Dtype y22 = min(input_height - 1, y2);
if ((y22 - y11 + bsz01) * (x22 - x11 + bsz01)
/ ((y2 - y1 + bsz01) * (x2 - x1 + bsz01))
< (1.0 - allow_border_ratio)) {
conf_data[index] = 0.0;
continue;
}
}
}
Dtype tg0 = tgt_data[ax4 * map_size + off];
Dtype tg1 = tgt_data[(ax4 + 1) * map_size + off];
Dtype tg2 = tgt_data[(ax4 + 2) * map_size + off];
Dtype tg3 = tgt_data[(ax4 + 3) * map_size + off];
if (do_bbox_norm) {
tg0 = tg0 * std0 + mean0;
tg1 = tg1 * std1 + mean1;
tg2 = tg2 * std2 + mean2;
tg3 = tg3 * std3 + mean3;
}
Dtype tw = anchor_width * exp(tg2);
Dtype th = anchor_height * exp(tg3);
Dtype ctx = tg0 * anchor_width + input_ctr_x;
Dtype cty = tg1 * anchor_height + input_ctr_y;
Dtype ltx = ctx - 0.5 * (tw - bsz01);
Dtype lty = cty - 0.5 * (th - bsz01);
Dtype rbx = ltx + tw - bsz01;
Dtype rby = lty + th - bsz01;
if (refine_out_of_map_bbox) {
ltx = min(max(ltx, Dtype(0.0)), input_width -1);
lty = min(max(lty, Dtype(0.0)), input_height -1);
rbx = min(max(rbx, Dtype(0.0)), input_width -1);
rby = min(max(rby, Dtype(0.0)), input_height -1);
}
if (min_size_mode_and_else_or) {
if ((rbx - ltx + bsz01) < min_size_w
|| (rby - lty + bsz01) < min_size_h) {
conf_data[index] = 0.0;
continue;
}
} else {
if ((rbx - ltx + bsz01) < min_size_w
&& (rby - lty + bsz01) < min_size_h) {
conf_data[index] = 0.0;
continue;
}
}
conf_data[index] = score;
bbox_data[index * 4] = ltx;
bbox_data[index * 4 + 1] = lty;
bbox_data[index * 4 + 2] = rbx;
bbox_data[index * 4 + 3] = rby;
}
}
template <typename Dtype>
void rpn_cmp_conf_bbox_gpu(const int num_anchors,
const int map_height, const int map_width,
const Dtype input_height, const Dtype input_width,
const Dtype heat_map_a, const Dtype heat_map_b,
const Dtype allow_border, const Dtype allow_border_ratio,
const Dtype min_size_w, const Dtype min_size_h,
const bool min_size_mode_and_else_or, const Dtype thr_obj,
const Dtype bsz01, const bool do_bbox_norm,
const Dtype mean0, const Dtype mean1,
const Dtype mean2, const Dtype mean3,
const Dtype std0, const Dtype std1,
const Dtype std2, const Dtype std3,
const bool refine_out_of_map_bbox, const Dtype* anc_data,
const Dtype* prob_data, const Dtype* tgt_data,
Dtype* conf_data, Dtype* bbox_data, Context<NV> *ctx) {
#ifdef ENABLE_DEBUG
#undef CUDA_NUM_THREADS
#define CUDA_NUM_THREADS 256
#endif
int threads = num_anchors * map_height * map_width;
rpn_cmp_conf_bbox_kernel<Dtype><<<CUDA_GET_BLOCKS(threads),
CUDA_NUM_THREADS, 0, ctx->get_compute_stream()>>>(threads, num_anchors,
map_height, map_width,
input_height, input_width,
heat_map_a, heat_map_b,
allow_border, allow_border_ratio,
min_size_w, min_size_h,
min_size_mode_and_else_or, thr_obj,
bsz01, do_bbox_norm,
mean0, mean1, mean2, mean3,
std0, std1, std2, std3,
refine_out_of_map_bbox, anc_data,
prob_data, tgt_data,
conf_data, bbox_data);
CUDA_POST_KERNEL_CHECK;
}
template void rpn_cmp_conf_bbox_gpu(const int num_anchors,
const int map_height, const int map_width,
const float input_height, const float input_width,
const float heat_map_a, const float heat_map_b,
const float allow_border, const float allow_border_ratio,
const float min_size_w, const float min_size_h,
const bool min_size_mode_and_else_or, const float thr_obj,
const float bsz01, const bool do_bbox_norm,
const float mean0, const float mean1,
const float mean2, const float mean3,
const float std0, const float std1,
const float std2, const float std3,
const bool refine_out_of_map_bbox, const float* anc_data,
const float* prob_data, const float* tgt_data,
float* conf_data, float* bbox_data, Context<NV> *ctx);
// rcnn
template <typename Dtype>
__global__ void rcnn_cmp_conf_bbox_kernel(const int num_rois,
const Dtype input_height, const Dtype input_width,
const Dtype allow_border, const Dtype allow_border_ratio,
const Dtype min_size_w, const Dtype min_size_h,
const bool min_size_mode_and_else_or, const Dtype thr_obj,
const Dtype bsz01, const bool do_bbox_norm,
const Dtype mean0, const Dtype mean1,
const Dtype mean2, const Dtype mean3,
const Dtype std0, const Dtype std1,
const Dtype std2, const Dtype std3,
const bool refine_out_of_map_bbox, const bool regress_agnostic,
const int num_class, const Dtype* thr_cls,
const Dtype* rois_data, const Dtype* prob_data,
const Dtype* tgt_data, Dtype* conf_data,
Dtype* bbox_data) {
int probs_dim = num_class + 1;
int cords_dim = (regress_agnostic ? 2 : (num_class + 1)) * 4;
CUDA_KERNEL_LOOP(index, num_rois) {
const Dtype* probs = prob_data + index * probs_dim;
const Dtype* cords = tgt_data + index * cords_dim;
const Dtype* rois = rois_data + index * 5;
if ((1.0 - probs[0]) < thr_obj) {
conf_data[index] = 0.0;
continue;
}
if (int(rois[0]) == -1) {
conf_data[index] = 0.0;
continue;
}
Dtype score_max = -10e6;
int cls_max = -1;
for (int c = 0; c < num_class; c++) {
Dtype score_c = probs[c + 1] - thr_cls[c];
if (score_c > score_max) {
score_max = score_c;
cls_max = c;
}
}
if (score_max < 0) {
conf_data[index] = 0.0;
continue;
}
if (allow_border >= 0.0
|| allow_border_ratio >= 0.0) {
Dtype x1 = rois[1];
Dtype y1 = rois[2];
Dtype x2 = rois[3];
Dtype y2 = rois[4];
if (allow_border >= 0.0 && (
x1 < -allow_border || y1 < -allow_border
|| x2 > input_width - 1 + allow_border ||
y2 > input_height - 1 + allow_border )) {
conf_data[index] = 0.0;
continue;
} else if (allow_border_ratio >= 0.0) {
Dtype x11 = max(Dtype(0.0), x1);
Dtype y11 = max(Dtype(0.0), y1);
Dtype x22 = min(input_width - 1, x2);
Dtype y22 = min(input_height - 1, y2);
if ((y22 - y11 + bsz01) * (x22 - x11 + bsz01)
/ ((y2 - y1 + bsz01) * (x2 - x1 +bsz01))
< (1.0 - allow_border_ratio)) {
conf_data[index] = 0.0;
continue;
}
}
}
Dtype rois_w = rois[3] - rois[1] + bsz01;
Dtype rois_h = rois[4] - rois[2] + bsz01;
Dtype rois_ctr_x = rois[1] + 0.5 * (rois_w - bsz01);
Dtype rois_ctr_y = rois[2] + 0.5 * (rois_h - bsz01);
int cdst = regress_agnostic ? 4 : ((cls_max + 1) * 4);
Dtype tg0 = cords[cdst];
Dtype tg1 = cords[cdst + 1];
Dtype tg2 = cords[cdst + 2];
Dtype tg3 = cords[cdst + 3];
if (do_bbox_norm) {
tg0 = tg0 * std0 + mean0;
tg1 = tg1 * std1 + mean1;
tg2 = tg2 * std2 + mean2;
tg3 = tg3 * std3 + mean3;
}
Dtype tw = rois_w * exp(tg2);
Dtype th = rois_h * exp(tg3);
Dtype ctx = tg0 * rois_w + rois_ctr_x;
Dtype cty = tg1 * rois_h + rois_ctr_y;
Dtype ltx = ctx - 0.5 * (tw - bsz01);
Dtype lty = cty - 0.5 * (th - bsz01);
Dtype rbx = ltx + tw - bsz01;
Dtype rby = lty + th - bsz01;
if (refine_out_of_map_bbox) {
ltx = min(max(ltx, Dtype(0.0)), input_width -1);
lty = min(max(lty, Dtype(0.0)), input_height -1);
rbx = min(max(rbx, Dtype(0.0)), input_width -1);
rby = min(max(rby, Dtype(0.0)), input_height -1);
}
if (min_size_mode_and_else_or) {
if ((rbx - ltx + bsz01) < min_size_w
|| (rby - lty + bsz01) < min_size_h) {
conf_data[index] = 0.0;
continue;
}
} else {
if ((rbx - ltx + bsz01) < min_size_w
&& (rby - lty + bsz01) < min_size_h) {
conf_data[index] = 0.0;
continue;
}
}
conf_data[index] = probs[cls_max + 1];
bbox_data[index * 4] = ltx;
bbox_data[index * 4 + 1] = lty;
bbox_data[index * 4 + 2] = rbx;
bbox_data[index * 4 + 3] = rby;
}
}
template <typename Dtype>
void rcnn_cmp_conf_bbox_gpu(const int num_rois,
const Dtype input_height, const Dtype input_width,
const Dtype allow_border, const Dtype allow_border_ratio,
const Dtype min_size_w, const Dtype min_size_h,
const bool min_size_mode_and_else_or, const Dtype thr_obj,
const Dtype bsz01, const bool do_bbox_norm,
const Dtype mean0, const Dtype mean1,
const Dtype mean2, const Dtype mean3,
const Dtype std0, const Dtype std1,
const Dtype std2, const Dtype std3,
const bool refine_out_of_map_bbox, const bool regress_agnostic,
const int num_class, const Dtype* thr_cls,
const Dtype* rois_data, const Dtype* prob_data,
const Dtype* tgt_data, Dtype* conf_data,
Dtype* bbox_data, Context<NV> *ctx) {
int threads = num_rois;
rcnn_cmp_conf_bbox_kernel<Dtype><<<CUDA_GET_BLOCKS(threads),
CUDA_NUM_THREADS, 0, ctx->get_compute_stream()>>>(num_rois,
input_height, input_width,
allow_border, allow_border_ratio,
min_size_w, min_size_h,
min_size_mode_and_else_or, thr_obj,
bsz01, do_bbox_norm,
mean0, mean1,
mean2, mean3,
std0, std1,
std2, std3,
refine_out_of_map_bbox, regress_agnostic,
num_class, thr_cls,
rois_data, prob_data,
tgt_data, conf_data,
bbox_data);
CUDA_POST_KERNEL_CHECK;
}
template void rcnn_cmp_conf_bbox_gpu(const int num_rois,
const float input_height, const float input_width,
const float allow_border, const float allow_border_ratio,
const float min_size_w, const float min_size_h,
const bool min_size_mode_and_else_or, const float thr_obj,
const float bsz01, const bool do_bbox_norm,
const float mean0, const float mean1,
const float mean2, const float mean3,
const float std0, const float std1,
const float std2, const float std3,
const bool refine_out_of_map_bbox, const bool regress_agnostic,
const int num_class, const float* thr_cls,
const float* rois_data, const float* prob_data,
const float* tgt_data, float* conf_data,
float* bbox_data, Context<NV> *ctx);
// nms, copy and modify some cuda codes form yolo
template <typename Dtype>
__host__ __device__ Dtype bbox_size_gpu(const Dtype *bbox, const Dtype bsz01) {
if (bbox[2] < bbox[0] || bbox[3] < bbox[1]) {
return Dtype(0.);
} else {
return (bbox[2] - bbox[0] + bsz01) * (bbox[3] - bbox[1] + bsz01);
}
}
template <typename Dtype>
__host__ __device__ Dtype jaccard_overlap_gpu(const Dtype *bbox1,
const Dtype *bbox2, const Dtype bsz01) {
if (bbox2[0] > bbox1[2] || bbox2[2] < bbox1[0] ||
bbox2[1] > bbox1[3] || bbox2[3] < bbox1[1]) {
return Dtype(0.);
} else {
const Dtype inter_xmin = max(bbox1[0], bbox2[0]);
const Dtype inter_ymin = max(bbox1[1], bbox2[1]);
const Dtype inter_xmax = min(bbox1[2], bbox2[2]);
const Dtype inter_ymax = min(bbox1[3], bbox2[3]);
const Dtype inter_width = inter_xmax - inter_xmin + bsz01;
const Dtype inter_height = inter_ymax - inter_ymin + bsz01;
const Dtype inter_size = inter_width * inter_height;
const Dtype bbox1_size = bbox_size_gpu(bbox1, bsz01);
const Dtype bbox2_size = bbox_size_gpu(bbox2, bsz01);
return inter_size / (bbox1_size + bbox2_size - inter_size);
}
}
template <typename Dtype>
__global__ void compute_overlapped_by_idx_kernel(
const int nthreads, const Dtype *bbox_data, const int bbox_step,
const Dtype overlap_threshold, const int *idx, const int num_idx,
const Dtype bsz01, bool *overlapped_data) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < (nthreads); index += blockDim.x * gridDim.x) {
const int j = index % num_idx;
const int i = index / num_idx;
if (i == j) {
// Ignore same bbox.
return;
}
// Compute overlap between i-th bbox and j-th bbox.
const int start_loc_i = idx[i] * bbox_step;
const int start_loc_j = idx[j] * bbox_step;
const Dtype overlap = jaccard_overlap_gpu(bbox_data + start_loc_i,
bbox_data + start_loc_j,
bsz01);
overlapped_data[index] = overlap > overlap_threshold;
}
}
//template <typename Dtype>
//void compute_overlapped_by_idx_gpu(
// const int nthreads, const Dtype *bbox_data, const int bbox_step,
// const Dtype overlap_threshold, const int *idx, const int num_idx,
// const Dtype bsz01, bool *overlapped_data) {
// // NOLINT_NEXT_LINE(whitespace/operators)
// const int thread_size = 256;
// int block_size = (nthreads + thread_size - 1) / thread_size;
// compute_overlapped_by_idx_kernel << < block_size, thread_size >> > (
// nthreads, bbox_data, bbox_step, overlap_threshold, idx, num_idx,
// bsz01, overlapped_data);
//}
template <typename Dtype>
void compute_overlapped_by_idx_gpu(
const int nthreads, const Dtype *bbox_data, const int bbox_step,
const Dtype overlap_threshold, const int *idx, const int num_idx,
const Dtype bsz01, bool *overlapped_data, const cudaStream_t &stream) {
// NOLINT_NEXT_LINE(whitespace/operators)
const int thread_size = 256;
int block_size = (nthreads + thread_size - 1) / thread_size;
// printf("thread_size = %d, block_size = %d\n", thread_size, block_size);
compute_overlapped_by_idx_kernel << < block_size, thread_size, 0, stream >> > (
nthreads, bbox_data, bbox_step, overlap_threshold, idx, num_idx,
bsz01, overlapped_data);
cudaDeviceSynchronize();
}
// Do nms, modified by mingli.
void apply_nms(const bool *overlapped, const int num, const int top_k,
const std::vector<int> &idxes, std::vector<int> *indices,
const int nmsed_num = 0, const int nmsed_loc = 0) {
std::vector<bool> mask(num, false);
if (nmsed_num > 0) {
int k_x_num_add_nmsed_num = nmsed_num;
for (int k = 0; k < nmsed_num; k++) {
int k_x_num_add_p = k_x_num_add_nmsed_num;
for (int p = nmsed_num; p < num; p++) {
if (overlapped[k_x_num_add_p++]) {
mask[p] = true;
}
}
k_x_num_add_nmsed_num += num;
}
}
int count = nmsed_num;
int k_x_num = (nmsed_num -1) * num;
for (int k = nmsed_num; k < num; k++) {
k_x_num += num;
if (mask[k]) {
continue;
} else {
indices->push_back(idxes[nmsed_loc + k - nmsed_num]);
if (++count >= top_k) {
break;
}
int k_x_num_add_p = k_x_num + k + 1;
for (int p = k + 1; p < num; p++) {
if (overlapped[k_x_num_add_p++]) {
mask[p] = true;
}
}
}
}
}
template <typename Dtype, typename PGlue_nv>
void apply_nms_gpu(const Dtype *bbox_data, const Dtype *conf_data,
const int num_bboxes, const int bbox_step, const Dtype confidence_threshold,
const int max_candidate_n, const int top_k, const Dtype nms_threshold,
const Dtype bsz01, std::vector<int> *indices,
PGlue_nv *overlapped, PGlue_nv *idx_sm,
Context<NV> *ctx, std::vector<int> *idx_ptr,
const int conf_step, const int conf_idx,
const int nms_gpu_max_n_per_time) {
indices->clear();
std::vector<int> idx;
std::vector<Dtype> confidences;
if (idx_ptr == NULL) {
if (conf_step == 1) {
for (int i = 0; i < num_bboxes; ++i) {
if (conf_data[i] > confidence_threshold) {
idx.push_back(i);
confidences.push_back(conf_data[i]);
}
}
} else {
int i_x_step_add_idx = conf_idx;
for (int i = 0; i < num_bboxes; ++i) {
if (conf_data[i_x_step_add_idx] > confidence_threshold) {
idx.push_back(i);
confidences.push_back(conf_data[i_x_step_add_idx]);
}
i_x_step_add_idx += conf_step;
}
}
} else {
if (conf_step == 1) {
for (int k = 0; k < idx_ptr->size(); k++) {
int i = (*idx_ptr)[k];
if (conf_data[i] > confidence_threshold) {
idx.push_back(i);
confidences.push_back(conf_data[i]);
}
}
} else {
for (int k = 0; k < idx_ptr->size(); k++) {
int i = (*idx_ptr)[k];
int i_x_step_add_idx = i * conf_step + conf_idx;
if (conf_data[i_x_step_add_idx] > confidence_threshold) {
idx.push_back(i);
confidences.push_back(conf_data[i_x_step_add_idx]);
}
}
}
}
int num_remain = confidences.size();
if (num_remain == 0) {
return;
}
if (nms_threshold >= Dtype(1.0)) {
for (int i = 0; i < idx.size(); i++) {
indices->push_back(idx[i]);
}
return;
}
thrust::sort_by_key(&confidences[0], &confidences[0] + num_remain, &idx[0],
thrust::greater<Dtype>());
if (max_candidate_n > -1 && max_candidate_n < num_remain) {
num_remain = max_candidate_n;
}
int idx_loc = 0;
int indices_size_pre = 0;
while (idx_loc < num_remain && indices->size() < top_k) {
int *idx_data = (int*)idx_sm->host_mutable_data(ctx);
std::copy(indices->begin() + indices_size_pre,
indices->end(), idx_data + indices_size_pre);
int idx_num_cur_time = min(int(nms_gpu_max_n_per_time - indices->size()),
int(num_remain - idx_loc));
std::copy(idx.begin() + idx_loc, idx.begin() + idx_loc + idx_num_cur_time,
idx_data + indices->size());
int candidate_n_cur_time = indices->size() + idx_num_cur_time;
int total_bboxes = candidate_n_cur_time * candidate_n_cur_time;
bool *overlapped_data = (bool*)overlapped->device_mutable_data(ctx);
compute_overlapped_by_idx_gpu(total_bboxes, bbox_data, bbox_step,
nms_threshold, (const int*)idx_sm->device_data(ctx),
candidate_n_cur_time, bsz01, overlapped_data, ctx->get_compute_stream());
const bool *overlapped_results = (const bool*)overlapped->host_data(ctx);
indices_size_pre = indices->size();
apply_nms(overlapped_results, candidate_n_cur_time, top_k,
idx, indices, indices->size(), idx_loc);
idx_loc += idx_num_cur_time;
}
}
template void apply_nms_gpu(const float *bbox_data, const float *conf_data,
const int num_bboxes, const int bbox_step, const float confidence_threshold,
const int max_candidate_n, const int top_k, const float nms_threshold,
const float bsz01, std::vector<int> *indices,
PGlue<Tensor<NV>, Tensor<NVHX86> > *overlapped,
PGlue<Tensor<NV>, Tensor<NVHX86> > *idx_sm,
Context<NV> *ctx, std::vector<int> *idx_ptr,
const int conf_step, const int conf_idx, const int nms_gpu_max_n_per_time);
template <typename Dtype>
void GenGrdFt_cpu(unsigned int im_width,
unsigned int im_height, unsigned int blob_width,
unsigned int blob_height, Dtype std_height,
const std::vector<Dtype> & cam_params, Dtype* grd_ft,
Dtype read_width_scale, Dtype read_height_scale,
unsigned int read_height_offset, unsigned int valid_param_idx_st,
bool trans_cam_pitch_to_zero, bool normalize_grd_ft,
unsigned int normalize_grd_ft_dim) {
CHECK_GT(im_width, 0);
CHECK_GT(im_height, 0);
CHECK_GE(blob_width, im_width);
CHECK_GE(blob_height, im_height);
CHECK_GT(read_width_scale, 0);
CHECK_GT(read_height_scale, 0);
CHECK_LE(valid_param_idx_st + 6, cam_params.size());
Dtype cam_xpz = cam_params[valid_param_idx_st + 0];
Dtype cam_xct = cam_params[valid_param_idx_st + 1];
Dtype cam_ypz = cam_params[valid_param_idx_st + 2];
Dtype cam_yct = cam_params[valid_param_idx_st + 3];
Dtype cam_hgrd = cam_params[valid_param_idx_st + 4];
Dtype cam_pitch = cam_params[valid_param_idx_st + 5];
CHECK_GT(cam_xpz, 0);
CHECK_GT(cam_ypz, 0);
CHECK_GT(cam_hgrd, 0);
Dtype min_py_grd = cam_yct + cam_ypz * tan(cam_pitch);
Dtype min_r_grd = (min_py_grd - read_height_offset)
* read_height_scale;
for (int r = 0; r < im_height; r++) {
Dtype py_grd;
Dtype z_grd, y_grd;
Dtype z_std_h_upon_grd, y_std_h_upon_grd;
Dtype py_std_h_upon_grd, r_std_h_upon_grd;
if (r > min_r_grd) {
py_grd = r / read_height_scale + read_height_offset;
z_grd = cam_ypz * cam_hgrd
/ (py_grd - cam_yct - cam_ypz * tan(cam_pitch));
y_grd = cam_hgrd + z_grd * tan(cam_pitch);
z_std_h_upon_grd = z_grd + std_height
* (trans_cam_pitch_to_zero?0.0:tan(cam_pitch));
y_std_h_upon_grd = y_grd - std_height
* (trans_cam_pitch_to_zero?1.0:cos(cam_pitch));
py_std_h_upon_grd = cam_ypz * y_std_h_upon_grd
/ z_std_h_upon_grd + cam_yct;
r_std_h_upon_grd = (py_std_h_upon_grd - read_height_offset)
* read_height_scale;
}
for (int c = 0; c < im_width; c++) {
if (r <= min_r_grd) {
grd_ft[r * blob_width + c] = Dtype(0.0);
} else {
Dtype px_grd = c / read_width_scale;
Dtype x_grd = (px_grd - cam_xct) * z_grd / cam_xpz;
Dtype x_std_h_upon_grd = x_grd;
Dtype px_std_h_upon_grd = cam_xpz * x_std_h_upon_grd
/ z_std_h_upon_grd + cam_xct;
Dtype c_std_h_upon_grd = px_std_h_upon_grd
* read_width_scale;
Dtype std_h_prj_scale =
sqrt((c_std_h_upon_grd - c) * (c_std_h_upon_grd - c)
+ (r_std_h_upon_grd - r) * (r_std_h_upon_grd - r));
if (!normalize_grd_ft) {
grd_ft[r * blob_width + c] = std_h_prj_scale;
} else {
int norm_chl = std::min<int>(normalize_grd_ft_dim - 1,
std::max<int>(0, static_cast<int>(
std::ceil(std::log(std_h_prj_scale) / std::log(2.0)))));
grd_ft[(norm_chl * blob_height + r) * blob_width + c] =
std_h_prj_scale / std::pow(2.0, norm_chl);
}
}
}
}
}
template void GenGrdFt_cpu(unsigned int im_width,
unsigned int im_height, unsigned int blob_width,
unsigned int blob_height, float std_height,
const std::vector<float> & cam_params, float* grd_ft,
float read_width_scale, float read_height_scale,
unsigned int read_height_offset, unsigned int valid_param_idx_st,
bool trans_cam_pitch_to_zero,bool normalize_grd_ft,
unsigned int normalize_grd_ft_dim);
template <typename Dtype>
__global__ void GenGrdFt_kernel(unsigned int im_width, unsigned int blob_width,
unsigned int blob_height, unsigned int n, Dtype std_height, Dtype cam_xpz,
Dtype cam_xct, Dtype cam_ypz, Dtype cam_yct, Dtype cam_hgrd, Dtype cam_pitch,
Dtype cam_tanh, Dtype cam_ypz_x_tanh, Dtype std_height_x_tanh, Dtype std_height_x_cos,
Dtype cam_ypz_x_cam_hgrd, Dtype read_width_scale, Dtype read_height_scale,
unsigned int read_height_offset, Dtype min_py_grd, Dtype min_r_grd,
bool normalize_grd_ft, unsigned int normalize_grd_ft_dim, Dtype* grd_ft_gpu_data) {
CUDA_KERNEL_LOOP(index, n) {
int r = index / im_width;
int c = index % im_width;
if (r <= min_r_grd) {
grd_ft_gpu_data[r * blob_width + c] = Dtype(0.0);
} else {
Dtype py_grd = r / read_height_scale + read_height_offset;
Dtype z_grd = cam_ypz_x_cam_hgrd
/ (py_grd - cam_yct - cam_ypz_x_tanh);
Dtype y_grd = cam_hgrd + z_grd * cam_tanh;
Dtype z_std_h_upon_grd = z_grd + std_height_x_tanh;
Dtype y_std_h_upon_grd = y_grd - std_height_x_cos;
Dtype py_std_h_upon_grd = cam_ypz * y_std_h_upon_grd
/ z_std_h_upon_grd + cam_yct;
Dtype r_std_h_upon_grd = (py_std_h_upon_grd - read_height_offset)
* read_height_scale;
Dtype px_grd = c / read_width_scale;
Dtype x_grd = (px_grd - cam_xct) * z_grd / cam_xpz;
Dtype x_std_h_upon_grd = x_grd;
Dtype px_std_h_upon_grd = cam_xpz * x_std_h_upon_grd
/ z_std_h_upon_grd + cam_xct;
Dtype c_std_h_upon_grd = px_std_h_upon_grd * read_width_scale;
Dtype std_h_prj_scale =
sqrt((c_std_h_upon_grd - c) * (c_std_h_upon_grd - c)
+ (r_std_h_upon_grd - r) * (r_std_h_upon_grd - r));
if (!normalize_grd_ft) {
grd_ft_gpu_data[r * blob_width + c] = std_h_prj_scale;
} else {
int norm_chl = min(normalize_grd_ft_dim - 1, max(0,
int(ceil(log(std_h_prj_scale) / log(2.0)))));
grd_ft_gpu_data[(norm_chl * blob_height + r) * blob_width + c] =
std_h_prj_scale / pow(2.0, norm_chl);
}
}
}
}
template <typename Dtype>
void GenGrdFt_gpu(unsigned int im_width,
unsigned int im_height, unsigned int blob_width,
unsigned int blob_height, Dtype std_height,
const std::vector<Dtype> & cam_params, Dtype* grd_ft,
Dtype read_width_scale, Dtype read_height_scale,
unsigned int read_height_offset, unsigned int valid_param_idx_st,
bool trans_cam_pitch_to_zero, bool normalize_grd_ft,
unsigned int normalize_grd_ft_dim) {
CHECK_GT(im_width, 0);
CHECK_GT(im_height, 0);
CHECK_GE(blob_width, im_width);
CHECK_GE(blob_height, im_height);
CHECK_GT(read_width_scale, 0);
CHECK_GT(read_height_scale, 0);
CHECK_LE(valid_param_idx_st + 6, cam_params.size());
Dtype cam_xpz = cam_params[valid_param_idx_st + 0];
Dtype cam_xct = cam_params[valid_param_idx_st + 1];
Dtype cam_ypz = cam_params[valid_param_idx_st + 2];
Dtype cam_yct = cam_params[valid_param_idx_st + 3];
Dtype cam_hgrd = cam_params[valid_param_idx_st + 4];
Dtype cam_pitch = cam_params[valid_param_idx_st + 5];
CHECK_GT(cam_xpz, 0);
CHECK_GT(cam_ypz, 0);
CHECK_GT(cam_hgrd, 0);
Dtype cam_tanh = tanh(cam_pitch);
Dtype cam_ypz_x_tanh = cam_ypz * cam_tanh;
Dtype std_height_x_tanh = std_height
* (trans_cam_pitch_to_zero ? 0.0 : tanh(cam_pitch));
Dtype std_height_x_cos = std_height
* (trans_cam_pitch_to_zero ? 1.0 : cos(cam_pitch));
Dtype cam_ypz_x_cam_hgrd = cam_ypz * cam_hgrd;
Dtype min_py_grd = cam_yct + cam_ypz_x_tanh;
Dtype min_r_grd = (min_py_grd - read_height_offset)
* read_height_scale;
int count = im_height * im_width;
GenGrdFt_kernel<Dtype><<<CUDA_GET_BLOCKS(count, CUDA_NUM_THREADS),
CUDA_NUM_THREADS>>>(
im_width, blob_width, blob_height, count,
std_height, cam_xpz, cam_xct, cam_ypz,
cam_yct, cam_hgrd, cam_pitch, cam_tanh,
cam_ypz_x_tanh, std_height_x_tanh,
std_height_x_cos, cam_ypz_x_cam_hgrd,
read_width_scale, read_height_scale,
read_height_offset, min_py_grd,
min_r_grd, normalize_grd_ft,
normalize_grd_ft_dim, grd_ft);
CUDA_POST_KERNEL_CHECK;
}
template void GenGrdFt_gpu(unsigned int im_width,
unsigned int im_height, unsigned int blob_width,
unsigned int blob_height, float std_height,
const std::vector<float> & cam_params, float* grd_ft,
float read_width_scale, float read_height_scale,
unsigned int read_height_offset, unsigned int valid_param_idx_st,
bool trans_cam_pitch_to_zero, bool normalize_grd_ft,
unsigned int normalize_grd_ft_dim);
}
}
|
c1ad7f27c5a564febb55603befa0866ef7db20f8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef _WIN32
# define WINDOWS_LEAN_AND_MEAN
# define NOMINMAX
# include <windows.h>
#endif
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, GL
#include <GL/glew.h>
#if defined (__APPLE__) || defined(MACOSX)
#include <GLUT/glut.h>
#else
#include <GL/glut.h>
#endif
// includes
#include <cutil.h>
#include <cutil_gl_error.h>
#include <cuda_gl_interop.h>
#include <assert.h>
//print include- for debugging purposes
//#include "cuPrintf_hip.cuh"
////////////////////////////////////////////////////////////////////////////////
// constants
#define BLOCK_SIZE 128 // :: Choose one! // Number of threads in a block.
unsigned int numBodies; // Number particles; determined at runtime.
// window
int window_width = 512;
int window_height = 512;
// Flag for pingpong;
int pingpong = 0;
// vbo variables
GLuint vbo_pos[2];
// Device buffer variables
float4* dVels[2];
// mouse controls
int mouse_old_x, mouse_old_y;
int mouse_buttons = 0;
float rotate_x = 0.0, rotate_y = 0.0;
float translate_z = -30.0;
////////////////////////////////////////////////////////////////////////////////
// kernels
__global__ void interact_kernel( float4* newPos, float4* oldPos, float4* newVel, float4* oldVel, float dt, float damping, int numBodies);
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runParticles( int argc, char** argv);
// GL functionality
CUTBoolean initGL();
void createVBOs( GLuint* vbo);
void deleteVBOs( GLuint* vbo);
// rendering callbacks
void display();
void keyboard( unsigned char key, int x, int y);
void mouse(int button, int state, int x, int y);
void motion(int x, int y);
// Cuda functionality
void runCuda( GLuint *vbo, float dt);
void createDeviceData();
void deleteDeviceData();
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
if(argc < 2)
{
printf("Usage: ./particles_interact N\n");
exit(1);
}
numBodies = atoi(argv[1]);
//cudaPrintfInit(numBodies * sizeof(float));
runParticles( argc, argv);
//cudaPrintfEnd();
CUT_EXIT(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple particle simulation for CUDA
////////////////////////////////////////////////////////////////////////////////
void runParticles( int argc, char** argv)
{
// Create GL context
glutInit( &argc, argv);
glutInitDisplayMode( GLUT_RGBA | GLUT_DOUBLE);
glutInitWindowSize( window_width, window_height);
glutCreateWindow( " Interacting Particles ");
// Init random number generator
srand(time(0));
// initialize GL
if( CUTFalse == initGL()) {
return;
}
// register callbacks
glutDisplayFunc( display);
glutKeyboardFunc( keyboard);
glutMouseFunc( mouse);
glutMotionFunc( motion);
// create VBO
createVBOs( vbo_pos );
// create other device memory (velocities!)
createDeviceData();
// start rendering mainloop
glutMainLoop();
}
////////////////////////////////////////////////////////////////////////////////
//! Run the Cuda part of the computation
////////////////////////////////////////////////////////////////////////////////
void runCuda( GLuint *vbo, float dt)
{
// map OpenGL buffer object for writing from CUDA
float4* oldPos;
float4* newPos;
// Velocity damping factor
float damping = 0.995;
// :: Map opengl buffers to CUDA.
CUDA_SAFE_CALL(hipGLMapBufferObject__((void**)&oldPos, vbo[pingpong]));
CUDA_SAFE_CALL(hipGLMapBufferObject__((void**)&newPos, vbo[1-pingpong]));
// :: Choose a block size, a grid size, an amount of shared mem,
// and execute the kernel
// dVels is the particle velocities old, new. Pingponging of these is
// handled, if the initial conditions have initial velocities in dVels[0].
dim3 block (BLOCK_SIZE, 1, 1);
dim3 grid (numBodies / BLOCK_SIZE, 1, 1);
unsigned int sharedMemSize = 4 * BLOCK_SIZE * sizeof(float);
hipLaunchKernelGGL(( interact_kernel), dim3(grid), dim3(block), sharedMemSize , 0, newPos, oldPos, dVels[1-pingpong], dVels[pingpong], dt, damping, numBodies );
//cudaPrintfDisplay(stdout, true);
// :: unmap buffer objects from cuda.
CUDA_SAFE_CALL(hipGLUnmapBufferObject(vbo[pingpong]));
CUDA_SAFE_CALL(hipGLUnmapBufferObject(vbo[1-pingpong]));
// :: Switch buffers between old/new
pingpong ^= 1;
}
////////////////////////////////////////////////////////////////////////////////
//! Initialize GL
////////////////////////////////////////////////////////////////////////////////
CUTBoolean initGL()
{
// initialize necessary OpenGL extensions
glewInit();
if (! glewIsSupported( "GL_VERSION_2_0 "
"GL_ARB_pixel_buffer_object"
)) {
fprintf( stderr, "ERROR: Support for necessary OpenGL extensions missing.");
fflush( stderr);
return CUTFalse;
}
// default initialization
glClearColor( 0.0, 0.0, 0.0, 1.0);
glDisable( GL_DEPTH_TEST);
// viewport
glViewport( 0, 0, window_width, window_height);
// projection
glMatrixMode( GL_PROJECTION);
glLoadIdentity();
// :: depending on your parameters, you may need to change
// near and far view distances (1, 500), to better see the simulation.
// If you do this, probably also change translate_z initial value at top.
gluPerspective(60.0, (GLfloat)window_width / (GLfloat) window_height, 1, 500.0);
CUT_CHECK_ERROR_GL();
return CUTTrue;
}
////////////////////////////////////////////////////////////////////////////////
//! Create VBO
////////////////////////////////////////////////////////////////////////////////
void createVBOs(GLuint* vbo)
{
// create buffer object
glGenBuffers( 2, vbo);
glBindBuffer( GL_ARRAY_BUFFER, vbo[0]);
// initialize buffer object; this will be used as 'oldPos' initially
unsigned int size = numBodies * 4 * sizeof( float);
// :: Modify initial positions!
float4* temppos = (float4*)malloc(numBodies*4*sizeof(float));
for(int i = 0; i < numBodies; i++)
{
//Placing particles in each of the 4 2D quadrants
int quadrant = floor(((float)rand())/RAND_MAX * 4.) + 1;
if (quadrant == 1)
{
temppos[i].x = 6.*((float)rand())/RAND_MAX + 1.5;
temppos[i].y = 6.*((float)rand())/RAND_MAX + 1.5;
temppos[i].z = 6.*((float)rand())/RAND_MAX + 1.5;
temppos[i].w = 1.;
}
else if (quadrant == 2)
{
temppos[i].x = -6.*((float)rand())/RAND_MAX - 1.5;
temppos[i].y = 6.*((float)rand())/RAND_MAX + 1.5;
temppos[i].z = 6.*((float)rand())/RAND_MAX + 1.5;
temppos[i].w = 1.;
}
else if (quadrant == 3)
{
temppos[i].x = -6.*((float)rand())/RAND_MAX - 1.5;
temppos[i].y = -6.*((float)rand())/RAND_MAX - 1.5;
temppos[i].z = 6.*((float)rand())/RAND_MAX + 1.5;
temppos[i].w = 1.;
}
else if (quadrant == 4)
{
temppos[i].x = 6.*((float)rand())/RAND_MAX + 1.5;
temppos[i].y = -6.*((float)rand())/RAND_MAX - 1.5;
temppos[i].z = 6.*((float)rand())/RAND_MAX + 1.5;
temppos[i].w = 1.;
}
else
{
temppos[i].x = 6.*((float)rand())/RAND_MAX + 1.5;
temppos[i].y = 6.*((float)rand())/RAND_MAX + 1.5;
temppos[i].z = 6.*((float)rand())/RAND_MAX + 1.5;
temppos[i].w = 1.;
}
}
// Notice only vbo[0] has initial data!
glBufferData( GL_ARRAY_BUFFER, size, temppos, GL_DYNAMIC_DRAW);
free(temppos);
// Create initial 'newPos' buffer
glBindBuffer( GL_ARRAY_BUFFER, vbo[1]);
glBufferData(GL_ARRAY_BUFFER, size, 0, GL_DYNAMIC_DRAW);
glBindBuffer( GL_ARRAY_BUFFER, 0);
// register buffer objects with CUDA
CUDA_SAFE_CALL(hipGLRegisterBufferObject(vbo[0]));
CUDA_SAFE_CALL(hipGLRegisterBufferObject(vbo[1]));
CUT_CHECK_ERROR_GL();
}
////////////////////////////////////////////////////////////////////////////////
//! Delete VBO
////////////////////////////////////////////////////////////////////////////////
void deleteVBOs( GLuint* vbo)
{
glBindBuffer( 1, vbo[0]);
glDeleteBuffers( 1, &vbo[0]);
glBindBuffer( 1, vbo[1]);
glDeleteBuffers( 1, &vbo[1]);
CUDA_SAFE_CALL(hipGLUnregisterBufferObject(vbo[0]));
CUDA_SAFE_CALL(hipGLUnregisterBufferObject(vbo[1]));
*vbo = 0;
}
////////////////////////////////////////////////////////////////////////////////
//! Create device data
////////////////////////////////////////////////////////////////////////////////
void createDeviceData()
{
// :: Modify velocities if need be.
CUDA_SAFE_CALL( hipMalloc( (void**)&dVels[0], numBodies *
4 * sizeof(float) ) );
CUDA_SAFE_CALL( hipMalloc( (void**)&dVels[1], numBodies *
4 * sizeof(float) ) );
// Initialize data.
float4* tempvels = (float4*)malloc(numBodies * 4*sizeof(float));
for(int i = 0; i < numBodies; ++i)
{
tempvels[i].x = .25*((2*(float)rand())/RAND_MAX-1);
tempvels[i].y = .25*((2*(float)rand())/RAND_MAX-1);
tempvels[i].z = .25*((2*(float)rand())/RAND_MAX-1);
tempvels[i].w = 0.;
}
// Copy to gpu
CUDA_SAFE_CALL( hipMemcpy( dVels[0], tempvels, numBodies*4*sizeof(float), hipMemcpyHostToDevice) );
free(tempvels);
}
////////////////////////////////////////////////////////////////////////////////
//! Delete device data
////////////////////////////////////////////////////////////////////////////////
void deleteDeviceData()
{
// Create a velocity for every position.
CUDA_SAFE_CALL( hipFree( dVels[0] ) );
CUDA_SAFE_CALL( hipFree( dVels[1] ) );
// pos's are the VBOs
}
////////////////////////////////////////////////////////////////////////////////
//! Display callback
////////////////////////////////////////////////////////////////////////////////
void display()
{
runCuda(vbo_pos, 0.001);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// set view matrix
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glTranslatef(0.0, 0.0, translate_z);
glRotatef(rotate_x, 1.0, 0.0, 0.0);
glRotatef(rotate_y, 0.0, 1.0, 0.0);
// render from the vbo with newPos
glBindBuffer(GL_ARRAY_BUFFER, vbo_pos[pingpong]);
glVertexPointer(4, GL_FLOAT, 0, 0);
glEnableClientState(GL_VERTEX_ARRAY);
glColor3f(0.0, 1.0, 0.0);
glDrawArrays(GL_POINTS, 0, numBodies);
glDisableClientState(GL_VERTEX_ARRAY);
glutSwapBuffers();
glutPostRedisplay();
}
////////////////////////////////////////////////////////////////////////////////
//! Keyboard events handler
////////////////////////////////////////////////////////////////////////////////
void keyboard( unsigned char key, int /*x*/, int /*y*/)
{
switch( key) {
case( 27) :
deleteVBOs( vbo_pos );
deleteDeviceData();
exit( 0);
case( 81) :
deleteVBOs( vbo_pos );
deleteDeviceData();
exit( 0);
case( 113) :
deleteVBOs( vbo_pos );
deleteDeviceData();
exit( 0);
}
}
////////////////////////////////////////////////////////////////////////////////
//! Mouse event handlers
////////////////////////////////////////////////////////////////////////////////
void mouse(int button, int state, int x, int y)
{
if (state == GLUT_DOWN) {
mouse_buttons |= 1<<button;
} else if (state == GLUT_UP) {
mouse_buttons = 0;
}
mouse_old_x = x;
mouse_old_y = y;
glutPostRedisplay();
}
void motion(int x, int y)
{
float dx, dy;
dx = x - mouse_old_x;
dy = y - mouse_old_y;
if (mouse_buttons & 1) {
rotate_x += dy * 0.2;
rotate_y += dx * 0.2;
} else if (mouse_buttons & 4) {
translate_z += dy * 0.01;
}
mouse_old_x = x;
mouse_old_y = y;
}
|
c1ad7f27c5a564febb55603befa0866ef7db20f8.cu
|
#ifdef _WIN32
# define WINDOWS_LEAN_AND_MEAN
# define NOMINMAX
# include <windows.h>
#endif
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, GL
#include <GL/glew.h>
#if defined (__APPLE__) || defined(MACOSX)
#include <GLUT/glut.h>
#else
#include <GL/glut.h>
#endif
// includes
#include <cutil.h>
#include <cutil_gl_error.h>
#include <cuda_gl_interop.h>
#include <assert.h>
//print include- for debugging purposes
//#include "cuPrintf.cuh"
////////////////////////////////////////////////////////////////////////////////
// constants
#define BLOCK_SIZE 128 // :: Choose one! // Number of threads in a block.
unsigned int numBodies; // Number particles; determined at runtime.
// window
int window_width = 512;
int window_height = 512;
// Flag for pingpong;
int pingpong = 0;
// vbo variables
GLuint vbo_pos[2];
// Device buffer variables
float4* dVels[2];
// mouse controls
int mouse_old_x, mouse_old_y;
int mouse_buttons = 0;
float rotate_x = 0.0, rotate_y = 0.0;
float translate_z = -30.0;
////////////////////////////////////////////////////////////////////////////////
// kernels
__global__ void interact_kernel( float4* newPos, float4* oldPos, float4* newVel, float4* oldVel, float dt, float damping, int numBodies);
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runParticles( int argc, char** argv);
// GL functionality
CUTBoolean initGL();
void createVBOs( GLuint* vbo);
void deleteVBOs( GLuint* vbo);
// rendering callbacks
void display();
void keyboard( unsigned char key, int x, int y);
void mouse(int button, int state, int x, int y);
void motion(int x, int y);
// Cuda functionality
void runCuda( GLuint *vbo, float dt);
void createDeviceData();
void deleteDeviceData();
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
if(argc < 2)
{
printf("Usage: ./particles_interact N\n");
exit(1);
}
numBodies = atoi(argv[1]);
//cudaPrintfInit(numBodies * sizeof(float));
runParticles( argc, argv);
//cudaPrintfEnd();
CUT_EXIT(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple particle simulation for CUDA
////////////////////////////////////////////////////////////////////////////////
void runParticles( int argc, char** argv)
{
// Create GL context
glutInit( &argc, argv);
glutInitDisplayMode( GLUT_RGBA | GLUT_DOUBLE);
glutInitWindowSize( window_width, window_height);
glutCreateWindow( " Interacting Particles ");
// Init random number generator
srand(time(0));
// initialize GL
if( CUTFalse == initGL()) {
return;
}
// register callbacks
glutDisplayFunc( display);
glutKeyboardFunc( keyboard);
glutMouseFunc( mouse);
glutMotionFunc( motion);
// create VBO
createVBOs( vbo_pos );
// create other device memory (velocities!)
createDeviceData();
// start rendering mainloop
glutMainLoop();
}
////////////////////////////////////////////////////////////////////////////////
//! Run the Cuda part of the computation
////////////////////////////////////////////////////////////////////////////////
void runCuda( GLuint *vbo, float dt)
{
// map OpenGL buffer object for writing from CUDA
float4* oldPos;
float4* newPos;
// Velocity damping factor
float damping = 0.995;
// :: Map opengl buffers to CUDA.
CUDA_SAFE_CALL(cudaGLMapBufferObject((void**)&oldPos, vbo[pingpong]));
CUDA_SAFE_CALL(cudaGLMapBufferObject((void**)&newPos, vbo[1-pingpong]));
// :: Choose a block size, a grid size, an amount of shared mem,
// and execute the kernel
// dVels is the particle velocities old, new. Pingponging of these is
// handled, if the initial conditions have initial velocities in dVels[0].
dim3 block (BLOCK_SIZE, 1, 1);
dim3 grid (numBodies / BLOCK_SIZE, 1, 1);
unsigned int sharedMemSize = 4 * BLOCK_SIZE * sizeof(float);
interact_kernel<<< grid, block, sharedMemSize >>>( newPos, oldPos, dVels[1-pingpong], dVels[pingpong], dt, damping, numBodies );
//cudaPrintfDisplay(stdout, true);
// :: unmap buffer objects from cuda.
CUDA_SAFE_CALL(cudaGLUnmapBufferObject(vbo[pingpong]));
CUDA_SAFE_CALL(cudaGLUnmapBufferObject(vbo[1-pingpong]));
// :: Switch buffers between old/new
pingpong ^= 1;
}
////////////////////////////////////////////////////////////////////////////////
//! Initialize GL
////////////////////////////////////////////////////////////////////////////////
CUTBoolean initGL()
{
// initialize necessary OpenGL extensions
glewInit();
if (! glewIsSupported( "GL_VERSION_2_0 "
"GL_ARB_pixel_buffer_object"
)) {
fprintf( stderr, "ERROR: Support for necessary OpenGL extensions missing.");
fflush( stderr);
return CUTFalse;
}
// default initialization
glClearColor( 0.0, 0.0, 0.0, 1.0);
glDisable( GL_DEPTH_TEST);
// viewport
glViewport( 0, 0, window_width, window_height);
// projection
glMatrixMode( GL_PROJECTION);
glLoadIdentity();
// :: depending on your parameters, you may need to change
// near and far view distances (1, 500), to better see the simulation.
// If you do this, probably also change translate_z initial value at top.
gluPerspective(60.0, (GLfloat)window_width / (GLfloat) window_height, 1, 500.0);
CUT_CHECK_ERROR_GL();
return CUTTrue;
}
////////////////////////////////////////////////////////////////////////////////
//! Create VBO
////////////////////////////////////////////////////////////////////////////////
void createVBOs(GLuint* vbo)
{
// create buffer object
glGenBuffers( 2, vbo);
glBindBuffer( GL_ARRAY_BUFFER, vbo[0]);
// initialize buffer object; this will be used as 'oldPos' initially
unsigned int size = numBodies * 4 * sizeof( float);
// :: Modify initial positions!
float4* temppos = (float4*)malloc(numBodies*4*sizeof(float));
for(int i = 0; i < numBodies; i++)
{
//Placing particles in each of the 4 2D quadrants
int quadrant = floor(((float)rand())/RAND_MAX * 4.) + 1;
if (quadrant == 1)
{
temppos[i].x = 6.*((float)rand())/RAND_MAX + 1.5;
temppos[i].y = 6.*((float)rand())/RAND_MAX + 1.5;
temppos[i].z = 6.*((float)rand())/RAND_MAX + 1.5;
temppos[i].w = 1.;
}
else if (quadrant == 2)
{
temppos[i].x = -6.*((float)rand())/RAND_MAX - 1.5;
temppos[i].y = 6.*((float)rand())/RAND_MAX + 1.5;
temppos[i].z = 6.*((float)rand())/RAND_MAX + 1.5;
temppos[i].w = 1.;
}
else if (quadrant == 3)
{
temppos[i].x = -6.*((float)rand())/RAND_MAX - 1.5;
temppos[i].y = -6.*((float)rand())/RAND_MAX - 1.5;
temppos[i].z = 6.*((float)rand())/RAND_MAX + 1.5;
temppos[i].w = 1.;
}
else if (quadrant == 4)
{
temppos[i].x = 6.*((float)rand())/RAND_MAX + 1.5;
temppos[i].y = -6.*((float)rand())/RAND_MAX - 1.5;
temppos[i].z = 6.*((float)rand())/RAND_MAX + 1.5;
temppos[i].w = 1.;
}
else
{
temppos[i].x = 6.*((float)rand())/RAND_MAX + 1.5;
temppos[i].y = 6.*((float)rand())/RAND_MAX + 1.5;
temppos[i].z = 6.*((float)rand())/RAND_MAX + 1.5;
temppos[i].w = 1.;
}
}
// Notice only vbo[0] has initial data!
glBufferData( GL_ARRAY_BUFFER, size, temppos, GL_DYNAMIC_DRAW);
free(temppos);
// Create initial 'newPos' buffer
glBindBuffer( GL_ARRAY_BUFFER, vbo[1]);
glBufferData(GL_ARRAY_BUFFER, size, 0, GL_DYNAMIC_DRAW);
glBindBuffer( GL_ARRAY_BUFFER, 0);
// register buffer objects with CUDA
CUDA_SAFE_CALL(cudaGLRegisterBufferObject(vbo[0]));
CUDA_SAFE_CALL(cudaGLRegisterBufferObject(vbo[1]));
CUT_CHECK_ERROR_GL();
}
////////////////////////////////////////////////////////////////////////////////
//! Delete VBO
////////////////////////////////////////////////////////////////////////////////
void deleteVBOs( GLuint* vbo)
{
glBindBuffer( 1, vbo[0]);
glDeleteBuffers( 1, &vbo[0]);
glBindBuffer( 1, vbo[1]);
glDeleteBuffers( 1, &vbo[1]);
CUDA_SAFE_CALL(cudaGLUnregisterBufferObject(vbo[0]));
CUDA_SAFE_CALL(cudaGLUnregisterBufferObject(vbo[1]));
*vbo = 0;
}
////////////////////////////////////////////////////////////////////////////////
//! Create device data
////////////////////////////////////////////////////////////////////////////////
void createDeviceData()
{
// :: Modify velocities if need be.
CUDA_SAFE_CALL( cudaMalloc( (void**)&dVels[0], numBodies *
4 * sizeof(float) ) );
CUDA_SAFE_CALL( cudaMalloc( (void**)&dVels[1], numBodies *
4 * sizeof(float) ) );
// Initialize data.
float4* tempvels = (float4*)malloc(numBodies * 4*sizeof(float));
for(int i = 0; i < numBodies; ++i)
{
tempvels[i].x = .25*((2*(float)rand())/RAND_MAX-1);
tempvels[i].y = .25*((2*(float)rand())/RAND_MAX-1);
tempvels[i].z = .25*((2*(float)rand())/RAND_MAX-1);
tempvels[i].w = 0.;
}
// Copy to gpu
CUDA_SAFE_CALL( cudaMemcpy( dVels[0], tempvels, numBodies*4*sizeof(float), cudaMemcpyHostToDevice) );
free(tempvels);
}
////////////////////////////////////////////////////////////////////////////////
//! Delete device data
////////////////////////////////////////////////////////////////////////////////
void deleteDeviceData()
{
// Create a velocity for every position.
CUDA_SAFE_CALL( cudaFree( dVels[0] ) );
CUDA_SAFE_CALL( cudaFree( dVels[1] ) );
// pos's are the VBOs
}
////////////////////////////////////////////////////////////////////////////////
//! Display callback
////////////////////////////////////////////////////////////////////////////////
void display()
{
runCuda(vbo_pos, 0.001);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// set view matrix
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glTranslatef(0.0, 0.0, translate_z);
glRotatef(rotate_x, 1.0, 0.0, 0.0);
glRotatef(rotate_y, 0.0, 1.0, 0.0);
// render from the vbo with newPos
glBindBuffer(GL_ARRAY_BUFFER, vbo_pos[pingpong]);
glVertexPointer(4, GL_FLOAT, 0, 0);
glEnableClientState(GL_VERTEX_ARRAY);
glColor3f(0.0, 1.0, 0.0);
glDrawArrays(GL_POINTS, 0, numBodies);
glDisableClientState(GL_VERTEX_ARRAY);
glutSwapBuffers();
glutPostRedisplay();
}
////////////////////////////////////////////////////////////////////////////////
//! Keyboard events handler
////////////////////////////////////////////////////////////////////////////////
void keyboard( unsigned char key, int /*x*/, int /*y*/)
{
switch( key) {
case( 27) :
deleteVBOs( vbo_pos );
deleteDeviceData();
exit( 0);
case( 81) :
deleteVBOs( vbo_pos );
deleteDeviceData();
exit( 0);
case( 113) :
deleteVBOs( vbo_pos );
deleteDeviceData();
exit( 0);
}
}
////////////////////////////////////////////////////////////////////////////////
//! Mouse event handlers
////////////////////////////////////////////////////////////////////////////////
void mouse(int button, int state, int x, int y)
{
if (state == GLUT_DOWN) {
mouse_buttons |= 1<<button;
} else if (state == GLUT_UP) {
mouse_buttons = 0;
}
mouse_old_x = x;
mouse_old_y = y;
glutPostRedisplay();
}
void motion(int x, int y)
{
float dx, dy;
dx = x - mouse_old_x;
dy = y - mouse_old_y;
if (mouse_buttons & 1) {
rotate_x += dy * 0.2;
rotate_y += dx * 0.2;
} else if (mouse_buttons & 4) {
translate_z += dy * 0.01;
}
mouse_old_x = x;
mouse_old_y = y;
}
|
259f69bc11cc65941c30d12206613603f0f86ef3.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
* \file dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma16x16x16_cdiv4hwn4_reorder_filter_per_chan_relu.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
// generated by gen_cuda_conv_bias_kern_impls.py
#include "../conv_bias_int8_implicit_gemm_imma16x16x16_cdiv4hwn4_reorder_filter.cuinl"
template void megdnn::cuda::conv_bias_int8::do_conv_bias_int8_implicit_gemm_imma16x16x16_cdiv4hwn4_reorder_filter<PerChannelBiasVisitor,
IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::RELU>>>(
const int8_t* d_src,
const int8_t* d_filter,
PerChannelBiasVisitor bias,
IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::RELU>> epilogue,
const ConvParam& param,
float alpha,
float beta,
hipStream_t stream);
|
259f69bc11cc65941c30d12206613603f0f86ef3.cu
|
/**
* \file dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma16x16x16_cdiv4hwn4_reorder_filter_per_chan_relu.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
// generated by gen_cuda_conv_bias_kern_impls.py
#include "../conv_bias_int8_implicit_gemm_imma16x16x16_cdiv4hwn4_reorder_filter.cuinl"
template void megdnn::cuda::conv_bias_int8::do_conv_bias_int8_implicit_gemm_imma16x16x16_cdiv4hwn4_reorder_filter<PerChannelBiasVisitor,
IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::RELU>>>(
const int8_t* d_src,
const int8_t* d_filter,
PerChannelBiasVisitor bias,
IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::RELU>> epilogue,
const ConvParam& param,
float alpha,
float beta,
cudaStream_t stream);
|
854d82d4f51a27b61f1a166ffcbe2869e93e3242.hip
|
// !!! This is a file automatically generated by hipify!!!
//pass
//--blockDim=1024 --gridDim=1 --no-inline
#include <hip/hip_runtime.h>
#include <stdio.h>
#define N 2 //1024
__global__ void definitions (int* A, unsigned int* B, unsigned long long int* C)
{
atomicMax(A,10);
atomicMax(B,1);
atomicMax(C,5);
}
int main (){
int a = 5;
int *dev_a;
hipMalloc ((void**) &dev_a, sizeof(int));
hipMemcpy(dev_a, &a, sizeof(int),hipMemcpyHostToDevice);
unsigned int b = 5;
unsigned int *dev_b;
hipMalloc ((void**) &dev_b, sizeof(unsigned int));
hipMemcpy(dev_b, &b, sizeof(unsigned int),hipMemcpyHostToDevice);
unsigned long long int c = 5;
unsigned long long int *dev_c;
hipMalloc ((void**) &dev_c, sizeof(unsigned long long int));
hipMemcpy(dev_c, &c, sizeof(unsigned long long int),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( definitions) , dim3(1),dim3(N), 0, 0, dev_a,dev_b,dev_c);
//ESBMC_verify_kernel(definitions,1,N,dev_a,dev_b,dev_c);
hipMemcpy(&a,dev_a,sizeof(int),hipMemcpyDeviceToHost);
hipMemcpy(&b,dev_b,sizeof(unsigned int),hipMemcpyDeviceToHost);
hipMemcpy(&c,dev_c,sizeof(unsigned long long int),hipMemcpyDeviceToHost);
printf("A: %d\n", a);
printf("B: %u\n", b);
printf("C: %u\n", c);
assert(a==10);
assert(b==5);
assert(c==5);
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return 0;
}
|
854d82d4f51a27b61f1a166ffcbe2869e93e3242.cu
|
//pass
//--blockDim=1024 --gridDim=1 --no-inline
#include <cuda.h>
#include <stdio.h>
#define N 2 //1024
__global__ void definitions (int* A, unsigned int* B, unsigned long long int* C)
{
atomicMax(A,10);
atomicMax(B,1);
atomicMax(C,5);
}
int main (){
int a = 5;
int *dev_a;
cudaMalloc ((void**) &dev_a, sizeof(int));
cudaMemcpy(dev_a, &a, sizeof(int),cudaMemcpyHostToDevice);
unsigned int b = 5;
unsigned int *dev_b;
cudaMalloc ((void**) &dev_b, sizeof(unsigned int));
cudaMemcpy(dev_b, &b, sizeof(unsigned int),cudaMemcpyHostToDevice);
unsigned long long int c = 5;
unsigned long long int *dev_c;
cudaMalloc ((void**) &dev_c, sizeof(unsigned long long int));
cudaMemcpy(dev_c, &c, sizeof(unsigned long long int),cudaMemcpyHostToDevice);
definitions <<<1,N>>>(dev_a,dev_b,dev_c);
//ESBMC_verify_kernel(definitions,1,N,dev_a,dev_b,dev_c);
cudaMemcpy(&a,dev_a,sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(&b,dev_b,sizeof(unsigned int),cudaMemcpyDeviceToHost);
cudaMemcpy(&c,dev_c,sizeof(unsigned long long int),cudaMemcpyDeviceToHost);
printf("A: %d\n", a);
printf("B: %u\n", b);
printf("C: %u\n", c);
assert(a==10);
assert(b==5);
assert(c==5);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
}
|
cd73214fbb5d1648ee33b3c291580888efedbf86.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_complex.h>
#include <complex.h>
#include <stdio.h>
#include<omp.h>
#define type double
#define STR1(X) #X
#define STR(X) STR1(X)
#define STRINGIFY(X,Y) X ## Y
#define CON(X,Y) STRINGIFY(X,Y)
#define KDir kernels
#include "includes/ourmacros.h"
extern __shared__ type tile[];
__device__ __forceinline__ void fvinomgeneralolap_main_coars(const type * __restrict__ Atmp, type * Btmp, const int tb_size, const int* __restrict__ aexpr, const int* __restrict__ bexpr, const int* __restrict__ texpr1, const int * __restrict__ texpr2, const int ilimit, const int olimit, const int rowinc, const int shm2, const int numelems_blk, const int acoars, const int bcoars, const int size,
type alpha, type beta)
{
//if(blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0 && threadIdx.x == 1)
//printf("ilimit = %d, olimit = %d, tbsize = %d, Atmp = %p, Btmp = %p, txpr2[10] = %d, numblocks = %d \n",ilimit, olimit, tb_size, Atmp, Btmp, texpr2[10], gridDim.x*gridDim.y*gridDim.z);
for(int i = 0; i < size; i++)
{
for(int Id=threadIdx.x; Id < numelems_blk; Id+= tb_size)
{
tile[texpr1[Id]] = Atmp[aexpr[Id] + i*acoars];
}
__syncthreads();
for(int Id=threadIdx.x; Id < numelems_blk; Id+= tb_size)
{
Btmp[bexpr[Id] + i *bcoars] = alpha* tile[texpr2[Id]] + beta* Btmp[bexpr[Id] + i *bcoars];
}
__syncthreads();
}
}
__device__ __forceinline__ void fvinomgeneralolap_rem_coars(const type * __restrict__ Atmp, type * __restrict__ Btmp, const int tb_size, const int* __restrict__ aexpr, const int* __restrict__ bexpr, const int* __restrict__ texpr1, const int * __restrict__ texpr2, const int ilimit, const int olimit, const int rowinc, const int shm2, const int ilimitr, const int olimitr, const int numelems_blk, const int acoars, const int bcoars, const int size, type alpha, type beta)
{
for(int i = 0; i < size; i++)
{
for(int Id=threadIdx.x; Id < numelems_blk; Id+= tb_size)
{
tile[texpr1[Id]] = Atmp[aexpr[Id]+i * acoars];
}
__syncthreads();
for(int Id=threadIdx.x; Id < numelems_blk; Id+= tb_size)
{
int toffset2 = texpr2[Id];
if(toffset2 % (shm2) < ilimitr && toffset2/shm2 < olimitr)
Btmp[bexpr[Id]+i*bcoars] = alpha* tile[toffset2] + beta * Btmp[bexpr[Id]+i*bcoars];
}
__syncthreads();
}
}
__device__ __forceinline__ void fvinomgeneralolap_main(const type * __restrict__ Atmp, type * Btmp, const int tb_size, const int* __restrict__ aexpr, const int* __restrict__ bexpr, const int* __restrict__ texpr1, const int * __restrict__ texpr2, const int ilimit, const int olimit, const int rowinc, const int shm2, const int numelems_blk, type alpha, type beta)
{
//if(blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0 && threadIdx.x == 1)
// printf("ilimit = %d, olimit = %d, tbsize = %d, Atmp = %p, Btmp = %p, txpr2[10] = %d, numblocks = %d \n",ilimit, olimit, tb_size, Atmp, Btmp, texpr2[10], gridDim.x*gridDim.y*gridDim.z);
for(int Id=threadIdx.x; Id < numelems_blk; Id+= tb_size)
{
tile[texpr1[Id]] = Atmp[aexpr[Id]];
}
__syncthreads();
for(int Id=threadIdx.x; Id < numelems_blk; Id+= tb_size)
{
int toffset2 = texpr2[Id];
Btmp[bexpr[Id]] =alpha* tile[toffset2] + beta*Btmp[bexpr[Id]];
}
}
__device__ __forceinline__ void fvinomgeneralolap_rem(const type * __restrict__ Atmp, type * __restrict__ Btmp, const int tb_size, const int* __restrict__ aexpr, const int* __restrict__ bexpr, const int* __restrict__ texpr1, const int * __restrict__ texpr2, const int ilimit, const int olimit, const int rowinc, const int shm2, const int ilimitr, const int olimitr, const int numelems_blk, type alpha, type beta)
{
for(int Id=threadIdx.x; Id < numelems_blk; Id+= tb_size)
{
tile[texpr1[Id]] = Atmp[aexpr[Id]];
}
__syncthreads();
for(int Id=threadIdx.x; Id < numelems_blk; Id+= tb_size)
{
int toffset2 = texpr2[Id];
if(toffset2 % (shm2) < ilimitr && toffset2/shm2 < olimitr)
Btmp[bexpr[Id]] = alpha* tile[toffset2] + beta*Btmp[bexpr[Id]];
}
}
#define FNAME fvigeneralolap.h
#include "includes/macro.h"
#undef FNAME
#define FNAME fvigeneralolap_coars.h
#include "includes/macro.h"
#undef FNAME
/*
//general
__global__ void fvinomgeneralolap_kernel (const int ndim, const type * A, type * B, const int ilimit, const int olimit,const int param2, const int param3, const int param4
, const int * __restrict__ lda_s, const int* __restrict__ ldb_s, const int* __restrict__ idx_s
, const int remainder1, const int remainder2,
const int* __restrict__ offseti, const int* __restrict__ offseto, const int* __restrict__ tile1, const int* __restrict__ tile2,
const int ilimitr, const int olimitr, const int inputrem, const int outputrem, const int rowinc, const int shm2, const int numelems_blk
)
{
int tmp;
int val0 = blockIdx.x;
int val1 = blockIdx.y;
int aexpr =0, bexpr = 0;
if(ndim > 1)
aexpr = val0 * lda_s[0] + val1*lda_s[1], bexpr = val0 * ldb_s[0] + val1 * ldb_s[1];
else
{
aexpr = val0 * lda_s[0], bexpr = val0 * ldb_s[0];
}
int idx;
idx = blockIdx.z;
int ii1 = -1 , iip2 = -1;
if(param2 == 0)
iip2 = blockIdx.x;
else if(param2 == 1)
iip2 = blockIdx.y;
if(param3 == 0)
ii1 = blockIdx.x;
else if(param3 == 1)
ii1 = blockIdx.y;
#pragma unroll
for(int i = 2; i < ndim; i++)
{
tmp = idx/idx_s[i];
int index = idx - tmp * idx_s[i];
aexpr += index * lda_s[i];
bexpr += index * ldb_s[i];
idx = tmp;
if(i == param2) ii1 = index;
else if(i == param3) iip2 = index;
}
const double *Atmp = A + aexpr;
double *Btmp = B + bexpr;
if(ii1 < inputrem && iip2 < outputrem)
{
fvinomgeneralolap_cuSharedMemTranspose_vec256(Atmp,Btmp, param4, offseti, offseto,tile1, tile2, ilimit, olimit, rowinc, shm2, numelems_blk );
}
else if(ii1 >= inputrem && iip2 < outputrem)
{ //remainder in size1
fvinomgeneralolap_cuSharedMemRemTranspose_vec256(Atmp,Btmp, param4, offseti, offseto,tile1, tile2, ilimit, olimit, rowinc, shm2, ilimitr, olimit, numelems_blk );
}
else if(iip2 >= outputrem && ii1 < inputrem)
{ //remainder in size2
fvinomgeneralolap_cuSharedMemRemTranspose_vec256(Atmp,Btmp, param4, offseti, offseto,tile1, tile2, ilimit, olimit, rowinc, shm2, ilimit, olimitr, numelems_blk );
}
else
{
fvinomgeneralolap_cuSharedMemRemTranspose_vec256(Atmp,Btmp, param4, offseti, offseto,tile1, tile2, ilimit, olimit, rowinc, shm2, ilimitr, olimitr, numelems_blk );
}
return;
//#undef ndim
}
*/
void fvinomgeneralolap_CallerWrapper(int ndim, type * A, type * B,const int ilimit, const int olimit, const int blockAI, const int blockBI, const int numblocks, const int numthreads, const int shm
, const int * __restrict__ lda_s, const int* __restrict__ ldb_s, const int* __restrict__ idx_s,
const int coarsa, const int coarsb, const int * idx_ss, const int shm2,
const int* __restrict__ aexpr, const int* __restrict__ bexpr, const int* __restrict__ texpr1, const int* __restrict__ texpr2, const int ilimitr, const int olimitr,
const int inputrem, const int outputrem, const int numelems_blk, const int size, type alpha, type beta
)
{
/* int second, third;
if(ndim > 2)
{
second = idx_ss[1]; third = numblocks/(idx_ss[0]*idx_ss[1]);
}
else if(ndim > 1)
{
second = idx_ss[1]; third = 1;// numblocks/(idx_ss[0]*idx_ss[1]);
}
else
{
second = third = 1;
}
dim3 thread_blocks(idx_ss[0], second, third);*/
const int rowinc = (numthreads+ilimit-1)/ilimit;
#ifdef printd
printf("thread_blocks = %d, numthreads = %d, shm = %d\n", numblocks, numthreads, shm);
printf("size = %d, ndim = %d, shm = %d\n", size, ndim, shm);
#endif
if(size > 0)
{
dim3 thread_blocks(numblocks/size, 1, 1);
switch(ndim)
{
EXPANDDIMS(fvinomgeneralolap_coars_kernel_, thread_blocks, numthreads, shm, (A, B, ilimit, olimit, blockAI, blockBI, numthreads, lda_s,ldb_s, idx_s, coarsa,coarsb, aexpr, bexpr, texpr1, texpr2, ilimitr, olimitr, inputrem, outputrem, rowinc, shm2, numelems_blk, size, alpha, beta))
default:
{
// fvinomgeneralolap_coars_kernel<<<thread_blocks, numthreads, shm>>>(ndim, A, B, ilimit, olimit, blockAI, blockBI, numthreads, lda_s,ldb_s, idx_s, coarsa, coarsb, aexpr, bexpr, texpr1, texpr2, ilimitr, olimitr, inputrem, outputrem, rowinc, shm2, numelems_blk);
}
}
}
else
{
dim3 thread_blocks(numblocks, 1, 1);
switch(ndim)
{
EXPANDDIMS(fvinomgeneralolap_kernel_, thread_blocks, numthreads, shm, (A, B, ilimit, olimit, blockAI, blockBI, numthreads, lda_s,ldb_s, idx_s, aexpr, bexpr, texpr1, texpr2, ilimitr, olimitr, inputrem, outputrem, rowinc, shm2, numelems_blk, alpha, beta))
default:
{
// fvinomgeneralolap_kernel<<<thread_blocks, numthreads, shm>>>(ndim, A, B, ilimit, olimit, blockAI, blockBI, numthreads, lda_s,ldb_s, idx_s, aexpr, bexpr, texpr1, texpr2, ilimitr, olimitr, inputrem, outputrem, rowinc, shm2, numelems_blk);
}
}
}
}
int ispresent(int a, int *array, int n)
{
for(int i = 0; i < n; i++)
{
if(array[i] == a) return 1;
}
return 0;
}
int getoff(int index,const int * dims,const int * stride, int n)
{
int ret = 0;
for(int i = 0; i < n; i++)
{
int ii = index % dims[i];
ret += ii * stride[i];
index/= dims[i];
}
return ret;
}
void makeconsecutive(int *tmp, int k)
{
int tmp2[20], permi[20], j,i;
//printf("\n perm before: ");
//for(i = 0; i < k; i++) printf("%d ", tmp[i]);//permi[i];
for(i = 0; i < k; i ++) tmp2[i] = tmp[i];
for(i = 0; i < k; i ++)
{
for(j = 0; j < k-i-1; j++)
{
if(tmp2[j] > tmp2[j+1])
{
int tmp = tmp2[j];
tmp2[j] = tmp2[j+1];
tmp2[j+1] = tmp;
}
}
}
for(i = 0; i < k; i++)
{
for(j=0; j <k; j++)
{
if(tmp[i] == tmp2[j])
{
permi[i] = j; break;
}
}
}
for(i = 0; i < k; i++) tmp[i] = permi[i];
}
void swap(int array[], int ind1, int ind2);
int cancoarsen(int *lda, int newndim)
{
if(newndim < 1) return -1;
unsigned long vol = 1;
for(int i = 0; i < newndim; i++)
{
vol *= lda[i];
}
if(vol < 32*100) return -1;
for(int i = 0; i < newndim; i++)
{
if(lda[i] >= 4 && lda[i] <= 31)
return i;
}
/* for(int i = 0; i < newndim; i++)
{
if(lda[i] >= 2 && lda[i] <= 300)
return i;
}*/
return -1;
}
extern "C"
void fvigeneralolap_transpose_kernel(int ndim, type *A, type *B, int *lda, const int *ldb, const int* params, const int * perm, const int* rperm, type alpha, type beta)
{
// int numBlocks = computeNumBlocksCode ;
#ifdef printd
printf("\nA Dims: %d \t %d \t %d\t %d\t %d\n", lda[0], lda[1], lda[2], lda[3], lda[4]);
printf("\nAll diff Params: %d \t %d \t %d\t %d\t %d\t %d\t %d\t %d\t %d \t%d\t %d\t %d\n", params[0], params[1], params[2], params[3], params[4], params[5], params[6], params[7], params[8], params[9], params[10], params[11]);
printf("\nB Dims: %d \t %d \t %d\t %d\t %d\n", ldb[0], ldb[1], ldb[2], ldb[3], ldb[4]);
printf("\nR perm: %d \t %d \t %d\t %d\t %d\n", rperm[0], rperm[1], rperm[2], rperm[3], rperm[4]);
#endif
int alimit = params[3];
int blimit = params[4];
int blockA=params[0];
int blockB = params[11];
int ilimit = params[7];
//int olimit = params[8];
int i = 0, j = 0;
int size = 1;
for(i = 0; i < blimit; i++)
{
if(perm[i] > alimit)
{
size *= ldb[i];
}
}
if(perm[i] > alimit)
{
if(blockB == 1)
size *= ldb[i];
else
size*= blockB;
}
#ifdef printd
printf("In .cu, alimit = %d, blimit = %d, bsize = %d, blockB = %d, blockA = %d\n", alimit, blimit, size, blockB, blockA);
#endif
//for(int y = 0; y < j; y++)
//printf("bo[%d] = %d ",y, bo[y]);
int olimit = size;
#ifdef SLICE
printf("\t%d\t%d\t", ilimit, olimit);
#endif
//exit(0);
int numBlocks = params[6];//((size[1] + 8 -1)/8) * size[2] * ((size[3] + 8 -1)/8) * size[4] ;
const int pad = ((ilimit %2)+1)%2;
int *d_lda_s, *d_ldb_s, *d_idx_s;
const int remainder1 = lda[params[3]] % blockA;
int remainder2;
remainder2 = lda[perm[params[4]]] % blockB;
const int ilimitr = (ilimit * remainder1) / blockA;
int olimitr = (olimit * remainder2) / blockB;
int irem, orem;
if(remainder1 == 0) irem = lda[alimit];
else irem = (lda[alimit] - remainder1)/blockA;
if(remainder2 == 0) orem = lda[perm[blimit]];
else orem = (ldb[blimit] - remainder2)/blockB;
if(perm[params[4]] == params[3])
{
olimitr = olimit;
remainder2 = 0;
orem = lda[perm[blimit]];
}
else
{
//remainder2 = lda[perm[params[4]]] % blockB;
}
#ifdef printd
printf("\nrem1 = %d, rem2 = %d\n", remainder1, remainder2);
printf("\nilimit = %d, olimit = %d, ilimitr = %d, olimitr = %d\n", ilimit, olimit, ilimitr, olimitr);
#endif
int *input_base, *output_base, *tile_base1, *tile_base2;
int *aexpr, *bexpr, *texpr1, *texpr2;
// int *ablock, *bblock, *d_ablock, *d_bblock;;
int lda_s[20], ldb_s[20], idx_s[20], temp[20];
lda_s[0] = 1;
ldb_s[0] = 1;
idx_s[0] = 1;
for(i = 1; i < alimit; i++)
{
idx_s[i] = 1;
lda_s[i] = lda_s[i-1] * lda[i-1];
ldb_s[i] = ldb_s[i-1] * ldb[i-1];
}
if(rperm[alimit] < blimit || rperm[alimit] == blimit && blockB == 1)
idx_s[alimit] = 1;
else{
idx_s[alimit] = (lda[alimit] + blockA - 1) / blockA;
}
lda_s[i] = lda_s[i-1] * lda[i-1];
ldb_s[i] = ldb_s[i-1] * ldb[i-1];
for(i = alimit+1; i < ndim; i++)
{
lda_s[i] = lda_s[i-1] * lda[i-1];
ldb_s[i] = ldb_s[i-1] * ldb[i-1];
if(rperm[i] < blimit)
{
idx_s[i] = 1;// (lda[i] + blockA - 1) / blockA;
}
else if(rperm[i] == blimit)
{
idx_s[i] = (lda[i] + blockB - 1) / blockB;
}
else
{
idx_s[i] = lda[i];
}
}
for(i = 0; i < ndim; i++)
{
temp[i] = ldb_s[rperm[i]];
#ifdef printd
printf("Idx[%d] = %d\n", i, idx_s[i]);
#endif
}
aexpr = (int*)malloc(ilimit* olimit * sizeof(int));
bexpr = (int*)malloc(ilimit * olimit * sizeof(int));
texpr1 = (int*)malloc(ilimit* olimit * sizeof(int));
texpr2 = (int*)malloc(ilimit * olimit* sizeof(int));
SAFECUDAMALLOC(&input_base,ilimit*olimit*sizeof(int));
SAFECUDAMALLOC(&output_base,ilimit*olimit*sizeof(int));
SAFECUDAMALLOC(&tile_base1, ilimit*olimit *sizeof(int));
SAFECUDAMALLOC(&tile_base2, ilimit*olimit*sizeof(int));
int outD[20], outD_s[20], B_s[20];
outD_s[0] = 1;
outD[0] = ldb[0];
int inD[20], inD_s[20];
inD_s[0] = 1;
inD[0] = lda[0];
//B_s[0] = ldb_s[rperm[0]];
B_s[0] = 1;//ldb_s[rperm[0]];
int permD_s[20], permD[20];
// permD_s[alimit+1] = outD_s[0];
//permD[alimit+1] = outD[0];
int OO_C = 0, C_C = 0;
int onlyOut[20];int onlyOutI[20];
for(i = 0; i <= alimit; i++)
{
if(rperm[i] <= blimit)
C_C++;
}
const int OI_C = alimit + 1;
if(perm[0] > alimit)
{
OO_C++;
onlyOut[0] = ldb[0];
onlyOutI[0] = 0;
permD[0] = OI_C;
}
else
{
// C_C++;
permD[0] = perm[0];
}
for(i = 1; i < blimit; i++)
{
outD[i] = ldb[i];
outD_s[i] = outD_s[i-1] * outD[i-1];
// B_s[i] = ldb_s[rperm[i]];
B_s[i] = ldb_s[i];
if(perm[i] > alimit)
{
onlyOut[OO_C] = ldb[i];
onlyOutI[OO_C++] = i;
permD[i] = OI_C + i;
}
else
{
// C_C++;
permD[i] = perm[i];
}
}
if(blimit == 0) {i = 0; OO_C = 0;}
if(blockB == 1)
{
outD[i] = ldb[i];
}
else
{
outD[i] = blockB;
}
if(i > 0)
{
outD_s[i] = outD_s[i-1] * outD[i-1];
}
else
{
outD_s[i] = 1;
}
B_s[i] = ldb_s[i];
if(perm[i] <= alimit)
{
// C_C++;
permD[i] = perm[i];
}
else
{
//if(blockB == 1)
onlyOut[OO_C] = ldb[i];
//else
//onlyOut[OO_C] = blockB;
onlyOutI[OO_C++] = i;
permD[i] = OI_C + i;
}
i++;
for(j = 0; j < alimit; j++)
{
if(rperm[j] > blimit)
{
outD[i] = lda[j];
permD[i] = j;
outD_s[i] = outD_s[i-1] * outD[i-1];
B_s[i] = ldb_s[rperm[j]];
i++;
}
else
{
//tmp [k++] = j;
//C_C++;
}
}
if(rperm[j] > blimit)
{
//printf("BI = %d, rperm[%d] = %d, blimit = %d\n", i, j, rperm[j], blimit);
if(blockA == 1)
{
outD[i] = lda[j];
}
else
{
outD[i] = blockA;
}
permD[i] = j;
B_s[i] = ldb_s[rperm[j]];
outD_s[i] = outD_s[i-1] * outD[i-1];
i++;
}
else
{
//tmp [k++] = rperm[j];
//C_C++;
}
int BI = i;
for(i = 1; i < alimit; i++)
{
inD[i] = lda[i];
inD_s[i] = inD_s[i-1] * inD[i-1] ;
}
if(alimit == 0) i = 0;
if(blockA == 1)
{
inD[i] = lda[i];
}
else
{
inD[i] = blockA;
}
if(i > 0)
inD_s[i] = inD_s[i-1] * inD[i-1];
i++;
for(j = 0; j < blimit; j++)
{
if(perm[j] > alimit)
{
inD[i] = ldb[j];
inD_s[i] = inD_s[i-1] * inD[i-1];
i++;
}
else{
// C_C++;
}
}
if(perm[j] > alimit)
{
if(blockB == 1)
{
inD[i] = ldb[j];
}
else
{
inD[i] = blockB;
}
inD_s[i] = inD_s[i-1] * inD[i-1];
i++;
}
else{
// C_C++;
}
int AI = i;
makeconsecutive(permD, AI);
//permD[0] = 1, permD[1] = 2, permD[2] = 0;
//inD[0] = 32, inD[1] = 2, inD[2] = 30;
//inD_s[0] = 1, inD_s[1] = 32, inD_s[2] = 64;
for(i = 0; i < AI; i++)
{
permD_s[i] = inD_s[permD[i]];
}
if(BI != AI)
{
printf("No. of dimensions in I and O non-matching...\n");
//return;
}
#ifdef printd
printf("\nOO_C = %d, C_C = %d\n ", OO_C, C_C);
printf("\nOUT_D: ");
for(int i = 0; i < BI; i++)
{
printf("%d ",outD[i]);
}
printf("\nOUT_D_S: ");
for(int i = 0; i < BI; i++)
{
printf("%d ",outD_s[i]);
}
printf("\nIn_D: ");
for(int i = 0; i < AI; i++)
{
printf("%d ",inD[i]);
}
printf("\nIN_D_S: ");
for(int i = 0; i < AI; i++)
{
printf("%d ", inD_s[i]);
}
printf("\n");
printf("\nB_S: ");
for(int i = 0; i < BI; i++)
{
printf("%d ", B_s[i]);
}
printf("\nPerm_D: ");
for(int i = 0; i < BI; i++)
{
printf("%d ",permD[i]);
}
printf("\n");
printf("\nPerm_S: ");
for(int i = 0; i < BI; i++)
{
printf("%d ", permD_s[i]);
}
printf("\n");
printf("\n");
#endif
for(int rowId=0; rowId < olimit; rowId++)
{
int tmp = rowId;
int aoff=0,j;
for(j = 0; j < OO_C; j++)
{
int dval = onlyOut[j];
int val = tmp%dval;
tmp /= dval;
aoff += val * lda_s[perm[onlyOutI[j]]];
}
for(int colId=0; colId < ilimit; colId++)
{
aexpr[rowId*ilimit + colId] = aoff + colId;
texpr1[rowId * (ilimit) + colId] = rowId * (ilimit+pad) + colId;
int off = getoff(rowId * ilimit + colId, outD, permD_s, BI);
texpr2[rowId * (ilimit) + colId] = off + pad * (off/ilimit);
off = getoff(rowId * ilimit + colId, outD, B_s, AI);
bexpr[rowId* ilimit + colId] = off;
}
}
#ifdef printd
printf("\n...A...\n");
for(int rowId=0; rowId < olimit; rowId++)
{
printf("%d ", aexpr[rowId]);
//printf("\n");
//for(int colId=0; colId < ilimit; colId++)
{
// printf("%d ", bexpr[rowId * ilimit + colId]);
// printf("%d ", texpr2[rowId * ilimit + colId]);
}
//printf("\n");
}
printf("\n...B...\n");
for(int rowId=0; rowId < olimit; rowId++)
{
// printf("%d ", bexpr[rowId]);
//printf("\n");
for(int colId=0; colId < ilimit; colId++)
{
printf("%d ", bexpr[rowId * ilimit + colId]);
// printf("%d ", texpr2[rowId * ilimit + colId]);
}
printf("\n");
}
printf("\n...T...\n");
for(int rowId=0; rowId < olimit; rowId++)
{
// printf("%d ", bexpr[rowId]);
// printf("\n");
for(int colId=0; colId < ilimit; colId++)
{
// printf("%d ", bexpr[rowId * ilimit + colId]);
printf("%d ", texpr2[rowId * ilimit + colId]);
}
printf("\n");
}
#endif
lda_s[params[3]] *= params[0];///lda_s[i-1] * lda[i-1];
temp[params[3]] *= params[0];// ldb_s[i-1] * ldb[i-1];
if(params[3] != perm[params[4]])//no double blocking
{
lda_s[perm[params[4]]] *= params[11];///lda_s[i-1] * lda[i-1];
temp[perm[params[4]]] *= params[11];// ldb_s[i-1] * ldb[i-1];
}
int c = 0, d = 0;
c = alimit + 1;//c = No. of dimensions to be removed from input for thread blocking, b = same for output but only for those which are not in input
if(blockA > 1) c--;
int ablockI, bblockI;
//int dims[20];
ablockI = alimit-c;
bblockI = perm[blimit]-c;
int tempbblockI = bblockI;
#ifdef printd
printf("\nablockI = %d, bblockI = %d\n", ablockI, bblockI);
#endif
for(int i = c; i < ndim; i++)
{
if(((rperm[i] < blimit) || ((rperm[i] == blimit) && (blockB ==1))))
{
idx_s[i] = 1;// idx_s[j];
/*for(int j = i+1; j < ndim-d; j ++)
{
idx_s[j-1] = idx_s[j];
lda_s[j-1] = lda_s[j];
temp[j-1] = temp[j];
}*/
d++;
if((i < bblockI + c) || (i == bblockI + c) && (blockB == 1))
tempbblockI--;
}
}
bblockI = tempbblockI;
int cnt = 0;
for(int i = c; i < ndim; i++)
{
if(idx_s[i] == 1)
{
for(int j = i+1; j < ndim; j ++)
{
idx_s[j-1] = idx_s[j];
lda_s[j-1] = lda_s[j];
temp[j-1] = temp[j];
}
cnt++;
i--;
}
if(cnt > ndim) break;
}
/*
for(int i = c; i < ndim; i++)
{
// dims[i] = lda[i];
if((rperm[i] < blimit || (rperm[i] == blimit && blockB ==1)))
{
for(j = i+1; j < ndim-d; j ++)
{
idx_s[j-1] = idx_s[j];
lda_s[j-1] = lda_s[j];
temp[j-1] = temp[j];
// dims[j-1] = lda[j];
}
d++;
if((i < bblockI + c) || (i == bblockI + c) && (blockB == 1))
bblockI--;
if(i < alimit)
ablockI--;
}
}*/
int newndim = ndim - (c + d);
#ifdef printd
printf("\nChanged ablockI = %d, bblockI = %d\n", ablockI, bblockI);
#endif
//Find the largest dimension and make it the first as only Dimx can have > 65k size
/*int max = 0;
for(int i = 1; i < newndim; i++)
{
if(idx_s[c+i] > idx_s[max+c]) max = i;
}
//printf("\nmax: %d ", max);
if(max > c)
{
swap(idx_s, c, max+c);
swap(lda_s, c, max+c);
swap(temp, c, max+c);
if(max == ablockI) ablockI = 0;
else if(ablockI == 0) ablockI = max;
if(max == bblockI) bblockI = 0;
else if(bblockI ==0) bblockI = max;
}*/
if(ablockI > 0)//move it to start, junk part
{
swap(idx_s, ablockI+c, c);
swap(lda_s, ablockI+c, c);
swap(temp, ablockI+c, c);
}
int bi = 0;
if(bblockI == 0 && ablockI > 0)
bi = ablockI+c;
else
bi = bblockI+c;
if(bblockI >= 0 && bblockI != ablockI)//move it to start
{
swap(idx_s, bi, c+ (ablockI >=0));
swap(lda_s, bi, c+ (ablockI >=0));
swap(temp, bi, c+ (ablockI >=0));
}
if(bblockI >= 0) {
if(ablockI == bblockI || ablockI < 0) bblockI = 0;
else bblockI = 1;
}
if(ablockI >=0) ablockI = 0;
int nblkdims = 0;
if(ablockI >= 0) nblkdims++;
if((bblockI >= 0) && (bblockI != ablockI)) nblkdims++;
#ifdef printd
printf("\nIDx: ");
for(int i = 0; i < newndim; i++)
{
printf("%d ",idx_s[i+c]);
}
printf("ndim = %d, c = %d, d = %d, newndim = %d, nblkdims = %d\n", ndim, c, d, newndim, nblkdims);
#endif
int acoars = 0, bcoars = 0;
size = -1;
#ifdef printd
printf("\nirem = %d, orem = %d, alimit = %d, blimit = %d, ablockI = %d, bblockI = %d, newdim = %d, c = %d, d = %d, olddim = %d\n\n", irem, orem, alimit, blimit, ablockI, bblockI, newndim, c, d, ndim);
#endif
#ifndef NOCOARSEN
int cd = cancoarsen(idx_s+c+nblkdims, newndim-nblkdims);
if(cd >= 0)
{
int offset = c + cd + nblkdims;
acoars = lda_s[offset];
bcoars = temp[offset];
size = idx_s[offset];
for(int j = cd+1+nblkdims; j < newndim; j++)
{
idx_s[c+j-1] = idx_s[c+j];
lda_s[c+j-1] = lda_s[c+j];
temp[c+j-1] = temp[c+j];
}
// ablockI--;
// bblockI--;
newndim--;
}
#ifdef printd
printf("\nirem = %d, orem = %d, alimit = %d, blimit = %d, ablockI = %d, bblockI = %d, newdim = %d, c = %d, d = %d, olddim = %d, acoars = %d, bcoars = %d, cd = %d\n\n", irem, orem, alimit, blimit, ablockI, bblockI, newndim, c, d, ndim, acoars, bcoars, cd);
#endif
#endif
SAFECUDAMALLOC(&d_lda_s,newndim*sizeof(int));
SAFECUDAMALLOC(&d_ldb_s,newndim*sizeof(int));
SAFECUDAMALLOC(&d_idx_s,newndim*sizeof(int));
SAFECUDAMEMCPY(d_idx_s, idx_s+c,newndim*sizeof(int), hipMemcpyHostToDevice);
SAFECUDAMEMCPY(d_lda_s, lda_s+c,newndim*sizeof(int), hipMemcpyHostToDevice);
SAFECUDAMEMCPY(d_ldb_s, temp+c,newndim*sizeof(int), hipMemcpyHostToDevice);
SAFECUDAMEMCPY(input_base, aexpr, ilimit*olimit*sizeof(int), hipMemcpyHostToDevice);
SAFECUDAMEMCPY(output_base, bexpr, ilimit*olimit*sizeof(int), hipMemcpyHostToDevice);
SAFECUDAMEMCPY(tile_base1, texpr1, ilimit*olimit*sizeof(int), hipMemcpyHostToDevice);
SAFECUDAMEMCPY(tile_base2, texpr2,ilimit* olimit*sizeof(int), hipMemcpyHostToDevice);
#ifdef MODEL
{
const int olimit = params[8];
int olimitr = (olimit * remainder2) / blockB;
printf("\t%d\t%d\t", ilimit, olimit);
printf("\t%d\t%d\t%d\t%d\t", ilimit/32, ilimit%32, olimit/32,olimit%32 );
double f1, f2, f3, f4, f;
printf("\tf1=%lf\t", f1 = ((ilimit/32) * (olimit/32) + (double)(ilimit/32) * (olimit%32) /32+ (double)(ilimit%32) * (olimit/32) /32 + (double)(ilimit%32) * (olimit%32) /(32*32) )/ (int)(((ilimit+31)/32) * ((olimit+31)/32)));
printf("\tf2=%lf\t", f2 = ((ilimitr/32) * (olimit/32) + (double)(ilimitr/32) * (olimit%32) /32+ (double)(ilimitr%32) * (olimit/32) /32 + (double)(ilimitr%32) * (olimit%32) /(32*32) )/ max(1,(int)(((ilimitr+31)/32) * ((olimit+31)/32))));
printf("\tf3=%lf\t", f3 = ((ilimit/32) * (olimitr/32) + (double)(ilimit/32) * (olimitr%32) /32+ (double)(ilimit%32) * (olimitr/32) /32 + (double)(ilimit%32) * (olimitr%32) /(32*32) )/ max(1,(int)(((ilimit+31)/32) * ((olimitr+31)/32))));
printf("\tf4=%lf\t", f4 = ((ilimitr/32) * (olimitr/32) + (double)(ilimitr/32) * (olimitr%32) /32+ (double)(ilimitr%32) * (olimitr/32) /32 + (double)(ilimitr%32) * (olimitr%32) /(32*32) )/ max(1,(int)(((olimitr+31)/32) * ((olimitr+31)/32))));
printf("\t%d\t%d\t", lda[alimit], ldb[blimit]);
int asize = lda[alimit];
int bsize = ldb[blimit];
printf("MKL \t%d\t%d\t%d\t%d\t", asize/blockA, asize%blockA, bsize/blockB,bsize%blockB );
//int amax = min(blockA, 32);
//int bmax = min(blockB, 32);
int amax = blockA;
int bmax = blockB;
printf("\tf=%lf\t", f = ((asize/amax) * (bsize/bmax) *f1 + (double)((asize/amax) * (bsize%bmax > 0) *f3)+ (double)((asize%amax > 0) * (bsize/bmax)*f2) + (double)((asize%amax>0) * (bsize%bmax > 0) *f4) )/ (int)(((asize+amax-1)/amax) * ((bsize+bmax-1)/bmax)));
//printf("\tf=%lf\t", f = ((asize/amax) * (bsize/bmax) *f1 + (double)(asize/amax) * (bsize%bmax) *f3/bmax+ (double)(asize%amax) * (bsize/bmax)*f2 /amax + (double)(asize%amax) * (bsize%bmax) *f4/(amax*bmax) )/ (int)(((asize+amax-1)/amax) * ((bsize+bmax-1)/bmax)));
printf("\t%lf\t", f);
}
#endif
#ifdef NOHTIME
#include "includes/nohtimestart.h"
#endif
fvinomgeneralolap_CallerWrapper(newndim, A, B,ilimit,olimit, ablockI,bblockI
,numBlocks, params[2], (ilimit+pad) * olimit *sizeof(type)
, d_lda_s,d_ldb_s,d_idx_s
,acoars,bcoars,idx_s+c, (ilimit+pad), input_base, output_base, tile_base1, tile_base2, ilimitr, olimitr, irem, orem, ilimit*olimit, size, alpha, beta);
#ifdef NOHTIME
#include "includes/nohtimestop.h"
#endif
{hipError_t err = hipGetLastError();
if(err != hipSuccess){
printf("\nKernel ERROR in fvi_nomatch_generalolap: %s (line: %d)\n", hipGetErrorString(err), __LINE__);
//exit(-1);
}}
free(aexpr);
free(bexpr);
free(texpr1);
free(texpr2);
hipFree(d_lda_s);
hipFree(d_ldb_s);
hipFree(d_idx_s);
hipFree(input_base);
hipFree(output_base);
hipFree(tile_base1);
hipFree(tile_base2);
}
|
cd73214fbb5d1648ee33b3c291580888efedbf86.cu
|
#include <cuda_runtime.h>
#include <cuComplex.h>
#include <complex.h>
#include <stdio.h>
#include<omp.h>
#define type double
#define STR1(X) #X
#define STR(X) STR1(X)
#define STRINGIFY(X,Y) X ## Y
#define CON(X,Y) STRINGIFY(X,Y)
#define KDir kernels
#include "includes/ourmacros.h"
extern __shared__ type tile[];
__device__ __forceinline__ void fvinomgeneralolap_main_coars(const type * __restrict__ Atmp, type * Btmp, const int tb_size, const int* __restrict__ aexpr, const int* __restrict__ bexpr, const int* __restrict__ texpr1, const int * __restrict__ texpr2, const int ilimit, const int olimit, const int rowinc, const int shm2, const int numelems_blk, const int acoars, const int bcoars, const int size,
type alpha, type beta)
{
//if(blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0 && threadIdx.x == 1)
//printf("ilimit = %d, olimit = %d, tbsize = %d, Atmp = %p, Btmp = %p, txpr2[10] = %d, numblocks = %d \n",ilimit, olimit, tb_size, Atmp, Btmp, texpr2[10], gridDim.x*gridDim.y*gridDim.z);
for(int i = 0; i < size; i++)
{
for(int Id=threadIdx.x; Id < numelems_blk; Id+= tb_size)
{
tile[texpr1[Id]] = Atmp[aexpr[Id] + i*acoars];
}
__syncthreads();
for(int Id=threadIdx.x; Id < numelems_blk; Id+= tb_size)
{
Btmp[bexpr[Id] + i *bcoars] = alpha* tile[texpr2[Id]] + beta* Btmp[bexpr[Id] + i *bcoars];
}
__syncthreads();
}
}
__device__ __forceinline__ void fvinomgeneralolap_rem_coars(const type * __restrict__ Atmp, type * __restrict__ Btmp, const int tb_size, const int* __restrict__ aexpr, const int* __restrict__ bexpr, const int* __restrict__ texpr1, const int * __restrict__ texpr2, const int ilimit, const int olimit, const int rowinc, const int shm2, const int ilimitr, const int olimitr, const int numelems_blk, const int acoars, const int bcoars, const int size, type alpha, type beta)
{
for(int i = 0; i < size; i++)
{
for(int Id=threadIdx.x; Id < numelems_blk; Id+= tb_size)
{
tile[texpr1[Id]] = Atmp[aexpr[Id]+i * acoars];
}
__syncthreads();
for(int Id=threadIdx.x; Id < numelems_blk; Id+= tb_size)
{
int toffset2 = texpr2[Id];
if(toffset2 % (shm2) < ilimitr && toffset2/shm2 < olimitr)
Btmp[bexpr[Id]+i*bcoars] = alpha* tile[toffset2] + beta * Btmp[bexpr[Id]+i*bcoars];
}
__syncthreads();
}
}
__device__ __forceinline__ void fvinomgeneralolap_main(const type * __restrict__ Atmp, type * Btmp, const int tb_size, const int* __restrict__ aexpr, const int* __restrict__ bexpr, const int* __restrict__ texpr1, const int * __restrict__ texpr2, const int ilimit, const int olimit, const int rowinc, const int shm2, const int numelems_blk, type alpha, type beta)
{
//if(blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0 && threadIdx.x == 1)
// printf("ilimit = %d, olimit = %d, tbsize = %d, Atmp = %p, Btmp = %p, txpr2[10] = %d, numblocks = %d \n",ilimit, olimit, tb_size, Atmp, Btmp, texpr2[10], gridDim.x*gridDim.y*gridDim.z);
for(int Id=threadIdx.x; Id < numelems_blk; Id+= tb_size)
{
tile[texpr1[Id]] = Atmp[aexpr[Id]];
}
__syncthreads();
for(int Id=threadIdx.x; Id < numelems_blk; Id+= tb_size)
{
int toffset2 = texpr2[Id];
Btmp[bexpr[Id]] =alpha* tile[toffset2] + beta*Btmp[bexpr[Id]];
}
}
__device__ __forceinline__ void fvinomgeneralolap_rem(const type * __restrict__ Atmp, type * __restrict__ Btmp, const int tb_size, const int* __restrict__ aexpr, const int* __restrict__ bexpr, const int* __restrict__ texpr1, const int * __restrict__ texpr2, const int ilimit, const int olimit, const int rowinc, const int shm2, const int ilimitr, const int olimitr, const int numelems_blk, type alpha, type beta)
{
for(int Id=threadIdx.x; Id < numelems_blk; Id+= tb_size)
{
tile[texpr1[Id]] = Atmp[aexpr[Id]];
}
__syncthreads();
for(int Id=threadIdx.x; Id < numelems_blk; Id+= tb_size)
{
int toffset2 = texpr2[Id];
if(toffset2 % (shm2) < ilimitr && toffset2/shm2 < olimitr)
Btmp[bexpr[Id]] = alpha* tile[toffset2] + beta*Btmp[bexpr[Id]];
}
}
#define FNAME fvigeneralolap.h
#include "includes/macro.h"
#undef FNAME
#define FNAME fvigeneralolap_coars.h
#include "includes/macro.h"
#undef FNAME
/*
//general
__global__ void fvinomgeneralolap_kernel (const int ndim, const type * A, type * B, const int ilimit, const int olimit,const int param2, const int param3, const int param4
, const int * __restrict__ lda_s, const int* __restrict__ ldb_s, const int* __restrict__ idx_s
, const int remainder1, const int remainder2,
const int* __restrict__ offseti, const int* __restrict__ offseto, const int* __restrict__ tile1, const int* __restrict__ tile2,
const int ilimitr, const int olimitr, const int inputrem, const int outputrem, const int rowinc, const int shm2, const int numelems_blk
)
{
int tmp;
int val0 = blockIdx.x;
int val1 = blockIdx.y;
int aexpr =0, bexpr = 0;
if(ndim > 1)
aexpr = val0 * lda_s[0] + val1*lda_s[1], bexpr = val0 * ldb_s[0] + val1 * ldb_s[1];
else
{
aexpr = val0 * lda_s[0], bexpr = val0 * ldb_s[0];
}
int idx;
idx = blockIdx.z;
int ii1 = -1 , iip2 = -1;
if(param2 == 0)
iip2 = blockIdx.x;
else if(param2 == 1)
iip2 = blockIdx.y;
if(param3 == 0)
ii1 = blockIdx.x;
else if(param3 == 1)
ii1 = blockIdx.y;
#pragma unroll
for(int i = 2; i < ndim; i++)
{
tmp = idx/idx_s[i];
int index = idx - tmp * idx_s[i];
aexpr += index * lda_s[i];
bexpr += index * ldb_s[i];
idx = tmp;
if(i == param2) ii1 = index;
else if(i == param3) iip2 = index;
}
const double *Atmp = A + aexpr;
double *Btmp = B + bexpr;
if(ii1 < inputrem && iip2 < outputrem)
{
fvinomgeneralolap_cuSharedMemTranspose_vec256(Atmp,Btmp, param4, offseti, offseto,tile1, tile2, ilimit, olimit, rowinc, shm2, numelems_blk );
}
else if(ii1 >= inputrem && iip2 < outputrem)
{ //remainder in size1
fvinomgeneralolap_cuSharedMemRemTranspose_vec256(Atmp,Btmp, param4, offseti, offseto,tile1, tile2, ilimit, olimit, rowinc, shm2, ilimitr, olimit, numelems_blk );
}
else if(iip2 >= outputrem && ii1 < inputrem)
{ //remainder in size2
fvinomgeneralolap_cuSharedMemRemTranspose_vec256(Atmp,Btmp, param4, offseti, offseto,tile1, tile2, ilimit, olimit, rowinc, shm2, ilimit, olimitr, numelems_blk );
}
else
{
fvinomgeneralolap_cuSharedMemRemTranspose_vec256(Atmp,Btmp, param4, offseti, offseto,tile1, tile2, ilimit, olimit, rowinc, shm2, ilimitr, olimitr, numelems_blk );
}
return;
//#undef ndim
}
*/
void fvinomgeneralolap_CallerWrapper(int ndim, type * A, type * B,const int ilimit, const int olimit, const int blockAI, const int blockBI, const int numblocks, const int numthreads, const int shm
, const int * __restrict__ lda_s, const int* __restrict__ ldb_s, const int* __restrict__ idx_s,
const int coarsa, const int coarsb, const int * idx_ss, const int shm2,
const int* __restrict__ aexpr, const int* __restrict__ bexpr, const int* __restrict__ texpr1, const int* __restrict__ texpr2, const int ilimitr, const int olimitr,
const int inputrem, const int outputrem, const int numelems_blk, const int size, type alpha, type beta
)
{
/* int second, third;
if(ndim > 2)
{
second = idx_ss[1]; third = numblocks/(idx_ss[0]*idx_ss[1]);
}
else if(ndim > 1)
{
second = idx_ss[1]; third = 1;// numblocks/(idx_ss[0]*idx_ss[1]);
}
else
{
second = third = 1;
}
dim3 thread_blocks(idx_ss[0], second, third);*/
const int rowinc = (numthreads+ilimit-1)/ilimit;
#ifdef printd
printf("thread_blocks = %d, numthreads = %d, shm = %d\n", numblocks, numthreads, shm);
printf("size = %d, ndim = %d, shm = %d\n", size, ndim, shm);
#endif
if(size > 0)
{
dim3 thread_blocks(numblocks/size, 1, 1);
switch(ndim)
{
EXPANDDIMS(fvinomgeneralolap_coars_kernel_, thread_blocks, numthreads, shm, (A, B, ilimit, olimit, blockAI, blockBI, numthreads, lda_s,ldb_s, idx_s, coarsa,coarsb, aexpr, bexpr, texpr1, texpr2, ilimitr, olimitr, inputrem, outputrem, rowinc, shm2, numelems_blk, size, alpha, beta))
default:
{
// fvinomgeneralolap_coars_kernel<<<thread_blocks, numthreads, shm>>>(ndim, A, B, ilimit, olimit, blockAI, blockBI, numthreads, lda_s,ldb_s, idx_s, coarsa, coarsb, aexpr, bexpr, texpr1, texpr2, ilimitr, olimitr, inputrem, outputrem, rowinc, shm2, numelems_blk);
}
}
}
else
{
dim3 thread_blocks(numblocks, 1, 1);
switch(ndim)
{
EXPANDDIMS(fvinomgeneralolap_kernel_, thread_blocks, numthreads, shm, (A, B, ilimit, olimit, blockAI, blockBI, numthreads, lda_s,ldb_s, idx_s, aexpr, bexpr, texpr1, texpr2, ilimitr, olimitr, inputrem, outputrem, rowinc, shm2, numelems_blk, alpha, beta))
default:
{
// fvinomgeneralolap_kernel<<<thread_blocks, numthreads, shm>>>(ndim, A, B, ilimit, olimit, blockAI, blockBI, numthreads, lda_s,ldb_s, idx_s, aexpr, bexpr, texpr1, texpr2, ilimitr, olimitr, inputrem, outputrem, rowinc, shm2, numelems_blk);
}
}
}
}
int ispresent(int a, int *array, int n)
{
for(int i = 0; i < n; i++)
{
if(array[i] == a) return 1;
}
return 0;
}
int getoff(int index,const int * dims,const int * stride, int n)
{
int ret = 0;
for(int i = 0; i < n; i++)
{
int ii = index % dims[i];
ret += ii * stride[i];
index/= dims[i];
}
return ret;
}
void makeconsecutive(int *tmp, int k)
{
int tmp2[20], permi[20], j,i;
//printf("\n perm before: ");
//for(i = 0; i < k; i++) printf("%d ", tmp[i]);//permi[i];
for(i = 0; i < k; i ++) tmp2[i] = tmp[i];
for(i = 0; i < k; i ++)
{
for(j = 0; j < k-i-1; j++)
{
if(tmp2[j] > tmp2[j+1])
{
int tmp = tmp2[j];
tmp2[j] = tmp2[j+1];
tmp2[j+1] = tmp;
}
}
}
for(i = 0; i < k; i++)
{
for(j=0; j <k; j++)
{
if(tmp[i] == tmp2[j])
{
permi[i] = j; break;
}
}
}
for(i = 0; i < k; i++) tmp[i] = permi[i];
}
void swap(int array[], int ind1, int ind2);
int cancoarsen(int *lda, int newndim)
{
if(newndim < 1) return -1;
unsigned long vol = 1;
for(int i = 0; i < newndim; i++)
{
vol *= lda[i];
}
if(vol < 32*100) return -1;
for(int i = 0; i < newndim; i++)
{
if(lda[i] >= 4 && lda[i] <= 31)
return i;
}
/* for(int i = 0; i < newndim; i++)
{
if(lda[i] >= 2 && lda[i] <= 300)
return i;
}*/
return -1;
}
extern "C"
void fvigeneralolap_transpose_kernel(int ndim, type *A, type *B, int *lda, const int *ldb, const int* params, const int * perm, const int* rperm, type alpha, type beta)
{
// int numBlocks = computeNumBlocksCode ;
#ifdef printd
printf("\nA Dims: %d \t %d \t %d\t %d\t %d\n", lda[0], lda[1], lda[2], lda[3], lda[4]);
printf("\nAll diff Params: %d \t %d \t %d\t %d\t %d\t %d\t %d\t %d\t %d \t%d\t %d\t %d\n", params[0], params[1], params[2], params[3], params[4], params[5], params[6], params[7], params[8], params[9], params[10], params[11]);
printf("\nB Dims: %d \t %d \t %d\t %d\t %d\n", ldb[0], ldb[1], ldb[2], ldb[3], ldb[4]);
printf("\nR perm: %d \t %d \t %d\t %d\t %d\n", rperm[0], rperm[1], rperm[2], rperm[3], rperm[4]);
#endif
int alimit = params[3];
int blimit = params[4];
int blockA=params[0];
int blockB = params[11];
int ilimit = params[7];
//int olimit = params[8];
int i = 0, j = 0;
int size = 1;
for(i = 0; i < blimit; i++)
{
if(perm[i] > alimit)
{
size *= ldb[i];
}
}
if(perm[i] > alimit)
{
if(blockB == 1)
size *= ldb[i];
else
size*= blockB;
}
#ifdef printd
printf("In .cu, alimit = %d, blimit = %d, bsize = %d, blockB = %d, blockA = %d\n", alimit, blimit, size, blockB, blockA);
#endif
//for(int y = 0; y < j; y++)
//printf("bo[%d] = %d ",y, bo[y]);
int olimit = size;
#ifdef SLICE
printf("\t%d\t%d\t", ilimit, olimit);
#endif
//exit(0);
int numBlocks = params[6];//((size[1] + 8 -1)/8) * size[2] * ((size[3] + 8 -1)/8) * size[4] ;
const int pad = ((ilimit %2)+1)%2;
int *d_lda_s, *d_ldb_s, *d_idx_s;
const int remainder1 = lda[params[3]] % blockA;
int remainder2;
remainder2 = lda[perm[params[4]]] % blockB;
const int ilimitr = (ilimit * remainder1) / blockA;
int olimitr = (olimit * remainder2) / blockB;
int irem, orem;
if(remainder1 == 0) irem = lda[alimit];
else irem = (lda[alimit] - remainder1)/blockA;
if(remainder2 == 0) orem = lda[perm[blimit]];
else orem = (ldb[blimit] - remainder2)/blockB;
if(perm[params[4]] == params[3])
{
olimitr = olimit;
remainder2 = 0;
orem = lda[perm[blimit]];
}
else
{
//remainder2 = lda[perm[params[4]]] % blockB;
}
#ifdef printd
printf("\nrem1 = %d, rem2 = %d\n", remainder1, remainder2);
printf("\nilimit = %d, olimit = %d, ilimitr = %d, olimitr = %d\n", ilimit, olimit, ilimitr, olimitr);
#endif
int *input_base, *output_base, *tile_base1, *tile_base2;
int *aexpr, *bexpr, *texpr1, *texpr2;
// int *ablock, *bblock, *d_ablock, *d_bblock;;
int lda_s[20], ldb_s[20], idx_s[20], temp[20];
lda_s[0] = 1;
ldb_s[0] = 1;
idx_s[0] = 1;
for(i = 1; i < alimit; i++)
{
idx_s[i] = 1;
lda_s[i] = lda_s[i-1] * lda[i-1];
ldb_s[i] = ldb_s[i-1] * ldb[i-1];
}
if(rperm[alimit] < blimit || rperm[alimit] == blimit && blockB == 1)
idx_s[alimit] = 1;
else{
idx_s[alimit] = (lda[alimit] + blockA - 1) / blockA;
}
lda_s[i] = lda_s[i-1] * lda[i-1];
ldb_s[i] = ldb_s[i-1] * ldb[i-1];
for(i = alimit+1; i < ndim; i++)
{
lda_s[i] = lda_s[i-1] * lda[i-1];
ldb_s[i] = ldb_s[i-1] * ldb[i-1];
if(rperm[i] < blimit)
{
idx_s[i] = 1;// (lda[i] + blockA - 1) / blockA;
}
else if(rperm[i] == blimit)
{
idx_s[i] = (lda[i] + blockB - 1) / blockB;
}
else
{
idx_s[i] = lda[i];
}
}
for(i = 0; i < ndim; i++)
{
temp[i] = ldb_s[rperm[i]];
#ifdef printd
printf("Idx[%d] = %d\n", i, idx_s[i]);
#endif
}
aexpr = (int*)malloc(ilimit* olimit * sizeof(int));
bexpr = (int*)malloc(ilimit * olimit * sizeof(int));
texpr1 = (int*)malloc(ilimit* olimit * sizeof(int));
texpr2 = (int*)malloc(ilimit * olimit* sizeof(int));
SAFECUDAMALLOC(&input_base,ilimit*olimit*sizeof(int));
SAFECUDAMALLOC(&output_base,ilimit*olimit*sizeof(int));
SAFECUDAMALLOC(&tile_base1, ilimit*olimit *sizeof(int));
SAFECUDAMALLOC(&tile_base2, ilimit*olimit*sizeof(int));
int outD[20], outD_s[20], B_s[20];
outD_s[0] = 1;
outD[0] = ldb[0];
int inD[20], inD_s[20];
inD_s[0] = 1;
inD[0] = lda[0];
//B_s[0] = ldb_s[rperm[0]];
B_s[0] = 1;//ldb_s[rperm[0]];
int permD_s[20], permD[20];
// permD_s[alimit+1] = outD_s[0];
//permD[alimit+1] = outD[0];
int OO_C = 0, C_C = 0;
int onlyOut[20];int onlyOutI[20];
for(i = 0; i <= alimit; i++)
{
if(rperm[i] <= blimit)
C_C++;
}
const int OI_C = alimit + 1;
if(perm[0] > alimit)
{
OO_C++;
onlyOut[0] = ldb[0];
onlyOutI[0] = 0;
permD[0] = OI_C;
}
else
{
// C_C++;
permD[0] = perm[0];
}
for(i = 1; i < blimit; i++)
{
outD[i] = ldb[i];
outD_s[i] = outD_s[i-1] * outD[i-1];
// B_s[i] = ldb_s[rperm[i]];
B_s[i] = ldb_s[i];
if(perm[i] > alimit)
{
onlyOut[OO_C] = ldb[i];
onlyOutI[OO_C++] = i;
permD[i] = OI_C + i;
}
else
{
// C_C++;
permD[i] = perm[i];
}
}
if(blimit == 0) {i = 0; OO_C = 0;}
if(blockB == 1)
{
outD[i] = ldb[i];
}
else
{
outD[i] = blockB;
}
if(i > 0)
{
outD_s[i] = outD_s[i-1] * outD[i-1];
}
else
{
outD_s[i] = 1;
}
B_s[i] = ldb_s[i];
if(perm[i] <= alimit)
{
// C_C++;
permD[i] = perm[i];
}
else
{
//if(blockB == 1)
onlyOut[OO_C] = ldb[i];
//else
//onlyOut[OO_C] = blockB;
onlyOutI[OO_C++] = i;
permD[i] = OI_C + i;
}
i++;
for(j = 0; j < alimit; j++)
{
if(rperm[j] > blimit)
{
outD[i] = lda[j];
permD[i] = j;
outD_s[i] = outD_s[i-1] * outD[i-1];
B_s[i] = ldb_s[rperm[j]];
i++;
}
else
{
//tmp [k++] = j;
//C_C++;
}
}
if(rperm[j] > blimit)
{
//printf("BI = %d, rperm[%d] = %d, blimit = %d\n", i, j, rperm[j], blimit);
if(blockA == 1)
{
outD[i] = lda[j];
}
else
{
outD[i] = blockA;
}
permD[i] = j;
B_s[i] = ldb_s[rperm[j]];
outD_s[i] = outD_s[i-1] * outD[i-1];
i++;
}
else
{
//tmp [k++] = rperm[j];
//C_C++;
}
int BI = i;
for(i = 1; i < alimit; i++)
{
inD[i] = lda[i];
inD_s[i] = inD_s[i-1] * inD[i-1] ;
}
if(alimit == 0) i = 0;
if(blockA == 1)
{
inD[i] = lda[i];
}
else
{
inD[i] = blockA;
}
if(i > 0)
inD_s[i] = inD_s[i-1] * inD[i-1];
i++;
for(j = 0; j < blimit; j++)
{
if(perm[j] > alimit)
{
inD[i] = ldb[j];
inD_s[i] = inD_s[i-1] * inD[i-1];
i++;
}
else{
// C_C++;
}
}
if(perm[j] > alimit)
{
if(blockB == 1)
{
inD[i] = ldb[j];
}
else
{
inD[i] = blockB;
}
inD_s[i] = inD_s[i-1] * inD[i-1];
i++;
}
else{
// C_C++;
}
int AI = i;
makeconsecutive(permD, AI);
//permD[0] = 1, permD[1] = 2, permD[2] = 0;
//inD[0] = 32, inD[1] = 2, inD[2] = 30;
//inD_s[0] = 1, inD_s[1] = 32, inD_s[2] = 64;
for(i = 0; i < AI; i++)
{
permD_s[i] = inD_s[permD[i]];
}
if(BI != AI)
{
printf("No. of dimensions in I and O non-matching...\n");
//return;
}
#ifdef printd
printf("\nOO_C = %d, C_C = %d\n ", OO_C, C_C);
printf("\nOUT_D: ");
for(int i = 0; i < BI; i++)
{
printf("%d ",outD[i]);
}
printf("\nOUT_D_S: ");
for(int i = 0; i < BI; i++)
{
printf("%d ",outD_s[i]);
}
printf("\nIn_D: ");
for(int i = 0; i < AI; i++)
{
printf("%d ",inD[i]);
}
printf("\nIN_D_S: ");
for(int i = 0; i < AI; i++)
{
printf("%d ", inD_s[i]);
}
printf("\n");
printf("\nB_S: ");
for(int i = 0; i < BI; i++)
{
printf("%d ", B_s[i]);
}
printf("\nPerm_D: ");
for(int i = 0; i < BI; i++)
{
printf("%d ",permD[i]);
}
printf("\n");
printf("\nPerm_S: ");
for(int i = 0; i < BI; i++)
{
printf("%d ", permD_s[i]);
}
printf("\n");
printf("\n");
#endif
for(int rowId=0; rowId < olimit; rowId++)
{
int tmp = rowId;
int aoff=0,j;
for(j = 0; j < OO_C; j++)
{
int dval = onlyOut[j];
int val = tmp%dval;
tmp /= dval;
aoff += val * lda_s[perm[onlyOutI[j]]];
}
for(int colId=0; colId < ilimit; colId++)
{
aexpr[rowId*ilimit + colId] = aoff + colId;
texpr1[rowId * (ilimit) + colId] = rowId * (ilimit+pad) + colId;
int off = getoff(rowId * ilimit + colId, outD, permD_s, BI);
texpr2[rowId * (ilimit) + colId] = off + pad * (off/ilimit);
off = getoff(rowId * ilimit + colId, outD, B_s, AI);
bexpr[rowId* ilimit + colId] = off;
}
}
#ifdef printd
printf("\n...A...\n");
for(int rowId=0; rowId < olimit; rowId++)
{
printf("%d ", aexpr[rowId]);
//printf("\n");
//for(int colId=0; colId < ilimit; colId++)
{
// printf("%d ", bexpr[rowId * ilimit + colId]);
// printf("%d ", texpr2[rowId * ilimit + colId]);
}
//printf("\n");
}
printf("\n...B...\n");
for(int rowId=0; rowId < olimit; rowId++)
{
// printf("%d ", bexpr[rowId]);
//printf("\n");
for(int colId=0; colId < ilimit; colId++)
{
printf("%d ", bexpr[rowId * ilimit + colId]);
// printf("%d ", texpr2[rowId * ilimit + colId]);
}
printf("\n");
}
printf("\n...T...\n");
for(int rowId=0; rowId < olimit; rowId++)
{
// printf("%d ", bexpr[rowId]);
// printf("\n");
for(int colId=0; colId < ilimit; colId++)
{
// printf("%d ", bexpr[rowId * ilimit + colId]);
printf("%d ", texpr2[rowId * ilimit + colId]);
}
printf("\n");
}
#endif
lda_s[params[3]] *= params[0];///lda_s[i-1] * lda[i-1];
temp[params[3]] *= params[0];// ldb_s[i-1] * ldb[i-1];
if(params[3] != perm[params[4]])//no double blocking
{
lda_s[perm[params[4]]] *= params[11];///lda_s[i-1] * lda[i-1];
temp[perm[params[4]]] *= params[11];// ldb_s[i-1] * ldb[i-1];
}
int c = 0, d = 0;
c = alimit + 1;//c = No. of dimensions to be removed from input for thread blocking, b = same for output but only for those which are not in input
if(blockA > 1) c--;
int ablockI, bblockI;
//int dims[20];
ablockI = alimit-c;
bblockI = perm[blimit]-c;
int tempbblockI = bblockI;
#ifdef printd
printf("\nablockI = %d, bblockI = %d\n", ablockI, bblockI);
#endif
for(int i = c; i < ndim; i++)
{
if(((rperm[i] < blimit) || ((rperm[i] == blimit) && (blockB ==1))))
{
idx_s[i] = 1;// idx_s[j];
/*for(int j = i+1; j < ndim-d; j ++)
{
idx_s[j-1] = idx_s[j];
lda_s[j-1] = lda_s[j];
temp[j-1] = temp[j];
}*/
d++;
if((i < bblockI + c) || (i == bblockI + c) && (blockB == 1))
tempbblockI--;
}
}
bblockI = tempbblockI;
int cnt = 0;
for(int i = c; i < ndim; i++)
{
if(idx_s[i] == 1)
{
for(int j = i+1; j < ndim; j ++)
{
idx_s[j-1] = idx_s[j];
lda_s[j-1] = lda_s[j];
temp[j-1] = temp[j];
}
cnt++;
i--;
}
if(cnt > ndim) break;
}
/*
for(int i = c; i < ndim; i++)
{
// dims[i] = lda[i];
if((rperm[i] < blimit || (rperm[i] == blimit && blockB ==1)))
{
for(j = i+1; j < ndim-d; j ++)
{
idx_s[j-1] = idx_s[j];
lda_s[j-1] = lda_s[j];
temp[j-1] = temp[j];
// dims[j-1] = lda[j];
}
d++;
if((i < bblockI + c) || (i == bblockI + c) && (blockB == 1))
bblockI--;
if(i < alimit)
ablockI--;
}
}*/
int newndim = ndim - (c + d);
#ifdef printd
printf("\nChanged ablockI = %d, bblockI = %d\n", ablockI, bblockI);
#endif
//Find the largest dimension and make it the first as only Dimx can have > 65k size
/*int max = 0;
for(int i = 1; i < newndim; i++)
{
if(idx_s[c+i] > idx_s[max+c]) max = i;
}
//printf("\nmax: %d ", max);
if(max > c)
{
swap(idx_s, c, max+c);
swap(lda_s, c, max+c);
swap(temp, c, max+c);
if(max == ablockI) ablockI = 0;
else if(ablockI == 0) ablockI = max;
if(max == bblockI) bblockI = 0;
else if(bblockI ==0) bblockI = max;
}*/
if(ablockI > 0)//move it to start, junk part
{
swap(idx_s, ablockI+c, c);
swap(lda_s, ablockI+c, c);
swap(temp, ablockI+c, c);
}
int bi = 0;
if(bblockI == 0 && ablockI > 0)
bi = ablockI+c;
else
bi = bblockI+c;
if(bblockI >= 0 && bblockI != ablockI)//move it to start
{
swap(idx_s, bi, c+ (ablockI >=0));
swap(lda_s, bi, c+ (ablockI >=0));
swap(temp, bi, c+ (ablockI >=0));
}
if(bblockI >= 0) {
if(ablockI == bblockI || ablockI < 0) bblockI = 0;
else bblockI = 1;
}
if(ablockI >=0) ablockI = 0;
int nblkdims = 0;
if(ablockI >= 0) nblkdims++;
if((bblockI >= 0) && (bblockI != ablockI)) nblkdims++;
#ifdef printd
printf("\nIDx: ");
for(int i = 0; i < newndim; i++)
{
printf("%d ",idx_s[i+c]);
}
printf("ndim = %d, c = %d, d = %d, newndim = %d, nblkdims = %d\n", ndim, c, d, newndim, nblkdims);
#endif
int acoars = 0, bcoars = 0;
size = -1;
#ifdef printd
printf("\nirem = %d, orem = %d, alimit = %d, blimit = %d, ablockI = %d, bblockI = %d, newdim = %d, c = %d, d = %d, olddim = %d\n\n", irem, orem, alimit, blimit, ablockI, bblockI, newndim, c, d, ndim);
#endif
#ifndef NOCOARSEN
int cd = cancoarsen(idx_s+c+nblkdims, newndim-nblkdims);
if(cd >= 0)
{
int offset = c + cd + nblkdims;
acoars = lda_s[offset];
bcoars = temp[offset];
size = idx_s[offset];
for(int j = cd+1+nblkdims; j < newndim; j++)
{
idx_s[c+j-1] = idx_s[c+j];
lda_s[c+j-1] = lda_s[c+j];
temp[c+j-1] = temp[c+j];
}
// ablockI--;
// bblockI--;
newndim--;
}
#ifdef printd
printf("\nirem = %d, orem = %d, alimit = %d, blimit = %d, ablockI = %d, bblockI = %d, newdim = %d, c = %d, d = %d, olddim = %d, acoars = %d, bcoars = %d, cd = %d\n\n", irem, orem, alimit, blimit, ablockI, bblockI, newndim, c, d, ndim, acoars, bcoars, cd);
#endif
#endif
SAFECUDAMALLOC(&d_lda_s,newndim*sizeof(int));
SAFECUDAMALLOC(&d_ldb_s,newndim*sizeof(int));
SAFECUDAMALLOC(&d_idx_s,newndim*sizeof(int));
SAFECUDAMEMCPY(d_idx_s, idx_s+c,newndim*sizeof(int), cudaMemcpyHostToDevice);
SAFECUDAMEMCPY(d_lda_s, lda_s+c,newndim*sizeof(int), cudaMemcpyHostToDevice);
SAFECUDAMEMCPY(d_ldb_s, temp+c,newndim*sizeof(int), cudaMemcpyHostToDevice);
SAFECUDAMEMCPY(input_base, aexpr, ilimit*olimit*sizeof(int), cudaMemcpyHostToDevice);
SAFECUDAMEMCPY(output_base, bexpr, ilimit*olimit*sizeof(int), cudaMemcpyHostToDevice);
SAFECUDAMEMCPY(tile_base1, texpr1, ilimit*olimit*sizeof(int), cudaMemcpyHostToDevice);
SAFECUDAMEMCPY(tile_base2, texpr2,ilimit* olimit*sizeof(int), cudaMemcpyHostToDevice);
#ifdef MODEL
{
const int olimit = params[8];
int olimitr = (olimit * remainder2) / blockB;
printf("\t%d\t%d\t", ilimit, olimit);
printf("\t%d\t%d\t%d\t%d\t", ilimit/32, ilimit%32, olimit/32,olimit%32 );
double f1, f2, f3, f4, f;
printf("\tf1=%lf\t", f1 = ((ilimit/32) * (olimit/32) + (double)(ilimit/32) * (olimit%32) /32+ (double)(ilimit%32) * (olimit/32) /32 + (double)(ilimit%32) * (olimit%32) /(32*32) )/ (int)(((ilimit+31)/32) * ((olimit+31)/32)));
printf("\tf2=%lf\t", f2 = ((ilimitr/32) * (olimit/32) + (double)(ilimitr/32) * (olimit%32) /32+ (double)(ilimitr%32) * (olimit/32) /32 + (double)(ilimitr%32) * (olimit%32) /(32*32) )/ max(1,(int)(((ilimitr+31)/32) * ((olimit+31)/32))));
printf("\tf3=%lf\t", f3 = ((ilimit/32) * (olimitr/32) + (double)(ilimit/32) * (olimitr%32) /32+ (double)(ilimit%32) * (olimitr/32) /32 + (double)(ilimit%32) * (olimitr%32) /(32*32) )/ max(1,(int)(((ilimit+31)/32) * ((olimitr+31)/32))));
printf("\tf4=%lf\t", f4 = ((ilimitr/32) * (olimitr/32) + (double)(ilimitr/32) * (olimitr%32) /32+ (double)(ilimitr%32) * (olimitr/32) /32 + (double)(ilimitr%32) * (olimitr%32) /(32*32) )/ max(1,(int)(((olimitr+31)/32) * ((olimitr+31)/32))));
printf("\t%d\t%d\t", lda[alimit], ldb[blimit]);
int asize = lda[alimit];
int bsize = ldb[blimit];
printf("MKL \t%d\t%d\t%d\t%d\t", asize/blockA, asize%blockA, bsize/blockB,bsize%blockB );
//int amax = min(blockA, 32);
//int bmax = min(blockB, 32);
int amax = blockA;
int bmax = blockB;
printf("\tf=%lf\t", f = ((asize/amax) * (bsize/bmax) *f1 + (double)((asize/amax) * (bsize%bmax > 0) *f3)+ (double)((asize%amax > 0) * (bsize/bmax)*f2) + (double)((asize%amax>0) * (bsize%bmax > 0) *f4) )/ (int)(((asize+amax-1)/amax) * ((bsize+bmax-1)/bmax)));
//printf("\tf=%lf\t", f = ((asize/amax) * (bsize/bmax) *f1 + (double)(asize/amax) * (bsize%bmax) *f3/bmax+ (double)(asize%amax) * (bsize/bmax)*f2 /amax + (double)(asize%amax) * (bsize%bmax) *f4/(amax*bmax) )/ (int)(((asize+amax-1)/amax) * ((bsize+bmax-1)/bmax)));
printf("\t%lf\t", f);
}
#endif
#ifdef NOHTIME
#include "includes/nohtimestart.h"
#endif
fvinomgeneralolap_CallerWrapper(newndim, A, B,ilimit,olimit, ablockI,bblockI
,numBlocks, params[2], (ilimit+pad) * olimit *sizeof(type)
, d_lda_s,d_ldb_s,d_idx_s
,acoars,bcoars,idx_s+c, (ilimit+pad), input_base, output_base, tile_base1, tile_base2, ilimitr, olimitr, irem, orem, ilimit*olimit, size, alpha, beta);
#ifdef NOHTIME
#include "includes/nohtimestop.h"
#endif
{cudaError_t err = cudaGetLastError();
if(err != cudaSuccess){
printf("\nKernel ERROR in fvi_nomatch_generalolap: %s (line: %d)\n", cudaGetErrorString(err), __LINE__);
//exit(-1);
}}
free(aexpr);
free(bexpr);
free(texpr1);
free(texpr2);
cudaFree(d_lda_s);
cudaFree(d_ldb_s);
cudaFree(d_idx_s);
cudaFree(input_base);
cudaFree(output_base);
cudaFree(tile_base1);
cudaFree(tile_base2);
}
|
4538ae8660a9757829dc8557272bb7c2a35e72c0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <chrono>
#include <iomanip>
#include <iostream>
#include <random>
#include "Timer.cuh"
#include "CheckError.cuh"
using namespace timer;
//LAVORA SEMPRE SULLA SUA MEMORIA QUINDI IL KERNEL DEVE ESSERE CHIAMATO QUANDO LE COSE SONO NELLA MEM DELLA GPU
__global__
void vectorAddKernel(const int* d_inputA,
const int* d_inputB,
int N,
int* output) {
int global_id = blockIdx.x * blockDim.x + threadIdx.x;
if(global_id < N) output[global_id] = d_inputA[global_id] + d_inputB[global_id];
}
const int N = 100000000;
int main(){
Timer<DEVICE> TM_device;
Timer<HOST> TM_host;
// -------------------------------------------------------------------------
// HOST MEMORY ALLOCATION
int* h_inputA = new int[N];
int* h_inputB = new int[N];
int* d_output_tmp = new int[N]; // <-- used for device result
int* h_output = new int[N];
// -------------------------------------------------------------------------
// HOST INITILIZATION
unsigned seed = std::chrono::system_clock::now().time_since_epoch().count();
std::default_random_engine generator(seed);
std::uniform_int_distribution<int> distribution(1, 100);
for (int i = 0; i < N; i++) {
h_inputA[i] = distribution(generator);
h_inputB[i] = distribution(generator);
}
// -------------------------------------------------------------------------
// HOST EXECUTIION
std::cout<<"Starting computation on HOST.."<<std::endl;
TM_host.start();
for (int i = 0; i < N; i++)
h_output[i] = h_inputA[i] + h_inputB[i];
TM_host.stop();
TM_host.print("vectorAdd host: ");
// -------------------------------------------------------------------------
// DEVICE MEMORY ALLOCATION
int *d_inputA, *d_inputB, *d_output;
SAFE_CALL( hipMalloc( &d_inputA, N * sizeof(int) )); //catturiamo il codice dela cudamalloc (lo si fa per ogni chiamata delle primitive)
SAFE_CALL( hipMalloc( &d_inputB, N * sizeof(int) ));
SAFE_CALL( hipMalloc( &d_output, N * sizeof(int) ));
// -------------------------------------------------------------------------
// COPY DATA FROM HOST TO DEVIE
SAFE_CALL( hipMemcpy( d_inputA, h_inputA, N * sizeof(int), hipMemcpyHostToDevice));
SAFE_CALL( hipMemcpy( d_inputB, h_inputB, N * sizeof(int), hipMemcpyHostToDevice));
// -------------------------------------------------------------------------
// DEVICE INIT
dim3 DimGrid(N/256, 1, 1);
if (N%256) DimGrid.x++;
dim3 DimBlock(256, 1, 1);
// -------------------------------------------------------------------------
// DEVICE EXECUTION
std::cout<<"Starting computation on DEVICE.."<<std::endl;
TM_device.start();
hipLaunchKernelGGL(( vectorAddKernel), dim3(DimGrid),dim3(DimBlock), 0, 0, d_inputA,d_inputB,N,d_output);
CHECK_CUDA_ERROR
TM_device.stop();
TM_device.print("vectorAdd device: ");
std::cout << std::setprecision(1)
<< "Speedup: " << TM_host.duration() / TM_device.duration()
<< "x\n\n";
// -------------------------------------------------------------------------
// COPY DATA FROM DEVICE TO HOST
SAFE_CALL( hipMemcpy( d_output_tmp, d_output, N * sizeof(int), hipMemcpyDeviceToHost));//dest, sorg
// -------------------------------------------------------------------------
// RESULT CHECK
for (int i = 0; i < N; i++) {
if (h_output[i] != d_output_tmp[i]) {
std::cerr << "wrong result at: " << i
<< "\nhost: " << h_output[i]
<< "\ndevice: " << d_output_tmp[i] << "\n\n";
hipDeviceReset();
std::exit(EXIT_FAILURE);
}
// else printf("%i %i\n", h_output[i], d_output_tmp[i]);
}
std::cout << "<> Correct\n\n";
// -------------------------------------------------------------------------
// HOST MEMORY DEALLOCATION
delete[] h_inputA;
delete[] h_inputB;
delete[] h_output;
delete[] d_output_tmp;
// -------------------------------------------------------------------------
// DEVICE MEMORY DEALLOCATION
SAFE_CALL( hipFree( d_inputA ) );
SAFE_CALL( hipFree( d_inputB ) );
SAFE_CALL( hipFree( d_output ) );
// -------------------------------------------------------------------------
hipDeviceReset();
}
|
4538ae8660a9757829dc8557272bb7c2a35e72c0.cu
|
#include <chrono>
#include <iomanip>
#include <iostream>
#include <random>
#include "Timer.cuh"
#include "CheckError.cuh"
using namespace timer;
//LAVORA SEMPRE SULLA SUA MEMORIA QUINDI IL KERNEL DEVE ESSERE CHIAMATO QUANDO LE COSE SONO NELLA MEM DELLA GPU
__global__
void vectorAddKernel(const int* d_inputA,
const int* d_inputB,
int N,
int* output) {
int global_id = blockIdx.x * blockDim.x + threadIdx.x;
if(global_id < N) output[global_id] = d_inputA[global_id] + d_inputB[global_id];
}
const int N = 100000000;
int main(){
Timer<DEVICE> TM_device;
Timer<HOST> TM_host;
// -------------------------------------------------------------------------
// HOST MEMORY ALLOCATION
int* h_inputA = new int[N];
int* h_inputB = new int[N];
int* d_output_tmp = new int[N]; // <-- used for device result
int* h_output = new int[N];
// -------------------------------------------------------------------------
// HOST INITILIZATION
unsigned seed = std::chrono::system_clock::now().time_since_epoch().count();
std::default_random_engine generator(seed);
std::uniform_int_distribution<int> distribution(1, 100);
for (int i = 0; i < N; i++) {
h_inputA[i] = distribution(generator);
h_inputB[i] = distribution(generator);
}
// -------------------------------------------------------------------------
// HOST EXECUTIION
std::cout<<"Starting computation on HOST.."<<std::endl;
TM_host.start();
for (int i = 0; i < N; i++)
h_output[i] = h_inputA[i] + h_inputB[i];
TM_host.stop();
TM_host.print("vectorAdd host: ");
// -------------------------------------------------------------------------
// DEVICE MEMORY ALLOCATION
int *d_inputA, *d_inputB, *d_output;
SAFE_CALL( cudaMalloc( &d_inputA, N * sizeof(int) )); //catturiamo il codice dela cudamalloc (lo si fa per ogni chiamata delle primitive)
SAFE_CALL( cudaMalloc( &d_inputB, N * sizeof(int) ));
SAFE_CALL( cudaMalloc( &d_output, N * sizeof(int) ));
// -------------------------------------------------------------------------
// COPY DATA FROM HOST TO DEVIE
SAFE_CALL( cudaMemcpy( d_inputA, h_inputA, N * sizeof(int), cudaMemcpyHostToDevice));
SAFE_CALL( cudaMemcpy( d_inputB, h_inputB, N * sizeof(int), cudaMemcpyHostToDevice));
// -------------------------------------------------------------------------
// DEVICE INIT
dim3 DimGrid(N/256, 1, 1);
if (N%256) DimGrid.x++;
dim3 DimBlock(256, 1, 1);
// -------------------------------------------------------------------------
// DEVICE EXECUTION
std::cout<<"Starting computation on DEVICE.."<<std::endl;
TM_device.start();
vectorAddKernel<<<DimGrid,DimBlock>>>(d_inputA,d_inputB,N,d_output);
CHECK_CUDA_ERROR
TM_device.stop();
TM_device.print("vectorAdd device: ");
std::cout << std::setprecision(1)
<< "Speedup: " << TM_host.duration() / TM_device.duration()
<< "x\n\n";
// -------------------------------------------------------------------------
// COPY DATA FROM DEVICE TO HOST
SAFE_CALL( cudaMemcpy( d_output_tmp, d_output, N * sizeof(int), cudaMemcpyDeviceToHost));//dest, sorg
// -------------------------------------------------------------------------
// RESULT CHECK
for (int i = 0; i < N; i++) {
if (h_output[i] != d_output_tmp[i]) {
std::cerr << "wrong result at: " << i
<< "\nhost: " << h_output[i]
<< "\ndevice: " << d_output_tmp[i] << "\n\n";
cudaDeviceReset();
std::exit(EXIT_FAILURE);
}
// else printf("%i %i\n", h_output[i], d_output_tmp[i]);
}
std::cout << "<> Correct\n\n";
// -------------------------------------------------------------------------
// HOST MEMORY DEALLOCATION
delete[] h_inputA;
delete[] h_inputB;
delete[] h_output;
delete[] d_output_tmp;
// -------------------------------------------------------------------------
// DEVICE MEMORY DEALLOCATION
SAFE_CALL( cudaFree( d_inputA ) );
SAFE_CALL( cudaFree( d_inputB ) );
SAFE_CALL( cudaFree( d_output ) );
// -------------------------------------------------------------------------
cudaDeviceReset();
}
|
767eb5a41ff64707323c72ae4ce78fe487772343.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "header_hip.cuh"
typedef float pq_float;
typedef uint8_t pq_int;
// configuration
int topk;
int codebook;
int Ks;
int group;
int batch_vecs;
int batch_query;
int threads_per_block;
int ret_limit;
// size of everything
int size_codeword_nq;
int size_codeword_pq;
int size_codebook;
int size_query;
int size_threshold;
int size_device_query;
long long size_vecs;
int size_norm_filter;
int size_q_map;
int size_lookup_table;
int size_ret_result;
int size_topnorm_vecs;
string dataset;
int num_vecs, num_dimen, num_query, num_q, top_norm;
pq_float *vecs;
pq_float *query;
pq_float *norm_ret;
std::pair<pq_float, int> *ip_vecs;
int *ret_result;
int query_idx;
long long cal_2Dcoordinate(int x, int y, int leny) {
return (long long)x * leny + y;
}
int cal_3Dcoordinate(int x, int y, int z, int leny, int lenz) {
return x * leny * lenz + y * lenz + z;
}
__global__ void calLookupOnGPU(pq_float *query, pq_float *codeword_nq, pq_float *codeword_pq, pq_int *q_map, pq_float *lookup_table,
int num_dimen, int offset) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int q_type = q_map[2 * blockIdx.x];
int idx_q = q_map[2 * blockIdx.x + 1];
if (q_type == 0) {
lookup_table[idx] = codeword_nq[idx_q * blockDim.x + threadIdx.x];
} else if (q_type == 1) {
pq_float temp_sum = 0;
for (int i = 0; i < num_dimen; ++i) {
temp_sum += codeword_pq[idx_q * blockDim.x * num_dimen + threadIdx.x * num_dimen + i] * query[offset + i];
}
lookup_table[idx] = temp_sum;
}
}
void calLookupOnCPU(pq_float *query, pq_float *codeword_nq, pq_float *codeword_pq, pq_int *q_map, pq_float *lookup_table,
int num_dimen, int num_q, int Ks) {
for (int i = 0; i < num_q; ++i) {
int q_type = q_map[2 * i];
int idx_q = q_map[2 * i + 1];
for (int j = 0; j < Ks; ++j) {
if (q_type == 0) {
lookup_table[cal_2Dcoordinate(i, j, Ks)] = codeword_nq[cal_2Dcoordinate(idx_q, j, Ks)];
} else if (q_type == 1) {
pq_float temp_sum = 0;
for (int k = 0; k < num_dimen; ++k) temp_sum += codeword_pq[cal_3Dcoordinate(idx_q, j, k, Ks, num_dimen)] * query[k];
lookup_table[cal_2Dcoordinate(i, j, Ks)] = temp_sum;
}
}
}
}
void checkLookup(pq_float *h_lookup_table, pq_float *gpuref_lookup_table, int length) {
double eps = 1e-7;
bool match = 1;
for (int i = 0; i < length; ++i) {
if (abs(h_lookup_table[i] - gpuref_lookup_table[i]) > eps) {
match = 0;
printf("Lookup tables do not match!\n");
printf("host %f gpu %f at current %d\n", h_lookup_table[i], gpuref_lookup_table[i], i);
}
}
if (match) printf("Lookup tables match.\n\n");
}
__global__ void calApproxVecs(int *mid_result, pq_int *codebook, pq_float *lookup_table, pq_int *q_map, int num_q, int Ks, int num_vecs,
int start_vecs, int end_vecs, pq_float threshold) {
int average_assign = num_q * Ks / blockDim.x;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ pq_float local_lookup_table [];
for (int i = 0; i < average_assign; ++i) local_lookup_table[threadIdx.x * average_assign + i] = lookup_table[threadIdx.x * average_assign + i];
__syncthreads();
// init
if (start_vecs + idx < end_vecs) {
int q_type = 0;
pq_float result = 0, coefficient = 1; //result_temp = 0;
for (int i = 0; i < num_q; ++i) {
q_type = q_map[2 * i];
if (q_type == 0) {
//result += result_temp * coefficient;
coefficient = local_lookup_table[i * Ks + codebook[i * num_vecs + (start_vecs + idx)]];
//result_temp = 0;
} else if (q_type == 1) {
result += coefficient * local_lookup_table[i * Ks + codebook[i * num_vecs + (start_vecs + idx)]];
//result_temp += local_lookup_table[i * Ks + codebook[i * num_vecs + (start_vecs + idx)]];
}
}
//result += coefficient * result_temp;
if (result >= threshold) mid_result[idx] = 1;
else mid_result[idx] = 0;
// mid_result[idx] = result;
}
}
void calMidResultOnCPU(int *mid_result, pq_int *codebook, pq_float *lookup_table, pq_int *q_map, int num_q, int Ks, int num_vecs,
int start_pos, int end_pos, pq_float threshold) {
for (int i = 0; i < end_pos - start_pos; ++i) {
pq_float result = 0, mid_coefficient = 1;
for (int j = 0; j < num_q; ++j) {
int q_type = q_map[2 * j], idx = j * Ks + codebook[j * num_vecs + (start_pos + i)];
if (q_type == 0) {
mid_coefficient = lookup_table[idx];
} else if (q_type == 1) {
result += mid_coefficient * lookup_table[idx];
}
}
mid_result[i] = result >= threshold;
}
}
void checkMidResult(int *h_mid_result, int *gpuref_mid_result, int length) {
bool match = 1;
// pq_float eps = 1e-7;
for (int i = 0; i < length; ++i) {
if (h_mid_result[i] != gpuref_mid_result[i]) {
// if (abs(h_mid_result[i] - gpuref_mid_result[i]) > eps) {
match = 0;
printf("Mid results do not match!\n");
printf("host %d gpu %d at current %d\n", h_mid_result[i], gpuref_mid_result[i], i);
// printf("host %f gpu %f at current %d\n", h_mid_result[i], gpuref_mid_result[i], i);
break;
}
}
if (match) printf("Mid results match.\n\n");
}
// __global__ void assignResult2(int *prefixsum_result, int *d_ret_result, int start_pos, int end_pos, int current_length) {
// int idx = blockIdx.x * blockDim.x + threadIdx.x;
// if (start_pos + idx < end_pos) {
// if (!idx) {
// if (prefixsum_result[idx] == 1) {
// d_ret_result[current_length] = start_pos + idx;
// }
// } else {
// if (prefixsum_result[idx] - prefixsum_result[idx - 1]) {
// d_ret_result[current_length + prefixsum_result[idx] - 1] = start_pos + idx;
// }
// }
// }
// }
__global__ void assignResult(int *prefixsum_result, int *d_ret_result, int start_pos, int end_pos) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (start_pos + idx < end_pos) {
if (!idx) {
if (prefixsum_result[idx] == 1) {
d_ret_result[0] = start_pos + idx;
}
} else {
if (prefixsum_result[idx] - prefixsum_result[idx - 1]) {
d_ret_result[prefixsum_result[idx] - 1] = start_pos + idx;
}
}
}
}
__global__ void initResult(int *d_ret_result, int size_ret_result) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size_ret_result) d_ret_result[idx] = 0;
}
pq_float calOneIP(pq_float *arr, int vecs_idx, int query_idx, int num_dimen) {
pq_float temp_sum = 0;
int offset_vecs = vecs_idx * num_dimen, offset_query = query_idx * num_dimen;
for (int i = 0; i < num_dimen; ++i) {
temp_sum += arr[offset_vecs + i] * query[offset_query + i];
}
return temp_sum;
}
void calIP(int *idx, int length, std::pair<pq_float, int> *out, int num_dimen) {
for (int i = 0; i < length; ++i) {
out[i] = std::make_pair(-calOneIP(vecs, idx[i], query_idx, num_dimen), idx[i]);
}
}
bool cmpIPIndex(int lhs, int rhs) {
return norm_ret[lhs] > norm_ret[rhs];
}
int sameIndex(int *vecs1, int *vecs2, int number) {
bool *flag = new bool[number];
for (int i = 0; i < number; ++i) {
flag[i] = false;
}
int ret = 0;
for (int i = 0; i < number; ++i) {
for (int j = 0; j < number; ++j) {
if (!flag[j] && vecs1[i] == vecs2[j]) {
flag[j] = true;
ret++;
}
}
}
delete [] flag;
return ret;
}
int main(int argc, char *argv[]) {
// input configre
dataset = string(argv[1]);
topk = atoi(argv[2]);
codebook = atoi(argv[3]);
Ks = atoi(argv[4]);
group = atoi(argv[5]);
batch_vecs = atoi(argv[6]);
batch_query = atoi(argv[7]);
threads_per_block = atoi(argv[8]);
ret_limit = atoi(argv[9]);
// init input data
printf("# Begin reading data\n");
string file_path = "../data/" + dataset + "/" + dataset + "_cuda_c.txt";
freopen(file_path.c_str(), "r", stdin);
vector<int>q_type;
pq_int q_map[100];
scanf("%d%d%d%d", &num_vecs, &num_dimen, &num_query, &top_norm);
num_q = codebook * group;
ret_limit = num_vecs * ret_limit / 100;
char temp_str[50];
scanf("%s", temp_str);
// printf("%s\n", temp_str);
int num_nq = 0, num_pq = 0;
for (int i = 0; i < num_q; ++i) {
if (temp_str[i] == 'N') {
q_type.push_back(0);
q_map[2 * i] = 0;
q_map[2 * i + 1] = num_nq;
num_nq++;
}
else if (temp_str[i] == 'P') {
q_type.push_back(1);
q_map[2 * i] = 1;
q_map[2 * i + 1] = num_pq;
num_pq++;
}
}
// may be larger than the limit of int
printf("# Begin allocating memory\n");
size_codeword_nq = num_nq * Ks;
size_codeword_pq = num_pq * Ks * num_dimen;
size_codebook = num_q * num_vecs;
size_query = num_query * num_dimen;
size_threshold = num_query;
size_device_query = num_query * num_dimen;
size_vecs = (long long)num_vecs * num_dimen;
size_norm_filter = num_vecs / batch_vecs;
size_q_map = num_q * 2;
size_lookup_table = num_q * Ks;
size_ret_result = num_vecs;
size_topnorm_vecs = top_norm * num_dimen;
vecs = (pq_float *)malloc(size_vecs * sizeof(pq_float));
if (!vecs) {
printf("\n------malloc failed!!------\n");
}
pq_float *codeword_nq = new pq_float[size_codeword_nq];
pq_float *codeword_pq = new pq_float[size_codeword_pq];
pq_int *codebook = new pq_int[size_codebook];
pq_float *query_norm = new pq_float[num_query];
pq_float *threshold = new pq_float[size_threshold];
// vecs = (pq_float *)malloc(size_vecs * sizeof(pq_float));
ret_result = new int[size_ret_result];
query = new pq_float[size_query];
norm_ret = new pq_float[size_ret_result];
ip_vecs = new std::pair<pq_float, int>[size_ret_result];
pq_float *norm_filter = new pq_float[size_norm_filter + 2];
pq_float total_time;
int **candidate_init = new int*[num_query];
for (int i = 0; i < num_query; ++i) candidate_init[i] = new int[topk];
int **answer = new int*[num_query];
for (int i = 0; i < num_query; ++i) answer[i] = new int[topk];
// load input data
printf("# Begin reading codeword+codebook\n");
int temp_nq = 0, temp_pq = 0;
for (int i = 0; i < num_q; ++i) {
if (!q_type[i]) {
for (int j = 0; j < Ks; ++j) scanf("%f", &codeword_nq[cal_2Dcoordinate(temp_nq, j, Ks)]);
for (int j = 0; j < num_vecs; ++j) scanf("%d", &codebook[cal_2Dcoordinate(temp_nq + temp_pq, j, num_vecs)]);
temp_nq++;
} else if (q_type[i] == 1) {
for (int j = 0; j < Ks; ++j) {
for (int k = 0; k < num_dimen; ++k) scanf("%f", &codeword_pq[cal_3Dcoordinate(temp_pq, j, k, Ks, num_dimen)]);
}
for (int j = 0; j < num_vecs; ++j) scanf("%d", &codebook[cal_2Dcoordinate(temp_nq + temp_pq, j, num_vecs)]);
temp_pq++;
}
}
pq_float temp_sum = 0, temp_value = 0;
printf("# Begin reading query\n");
for (int i = 0; i < num_query; ++i) {
temp_sum = 0;
for (int j = 0; j < num_dimen; ++j) {
scanf("%f", &temp_value);
temp_sum += temp_value * temp_value;
query[cal_2Dcoordinate(i, j, num_dimen)] = temp_value;
}
query_norm[i] = sqrt(temp_sum);
}
int sum_init_correct = 0;
for (int i = 0; i < num_query; ++i) {
scanf("%f", &threshold[i]);
// if (i % (num_query / 10) == 0) printf("# %dth threshold is %f\n", i, threshold[i]);
}
int query_cnt = 0;
for (int i = 0; i < num_query; ++i) {
for (int j = 0; j < topk; ++j) {
scanf("%d", &candidate_init[i][j]);
}
for (int j = 0; j < topk; ++j) {
scanf("%d", &answer[i][j]);
}
// if (query_cnt < 10000) {
sum_init_correct += sameIndex(candidate_init[i], answer[i], topk);
// query_cnt++;
// }
// if (i % (num_query / 10) == 0) {
// for (int j = 0; j < topk; ++j) printf("%d ", candidate_init[i][j]);
// for (int j = 0; j < topk; ++j) printf("%d ", answer[i][j]);
// printf("\n");
// }
}
scanf("%f", &total_time);
int index = 0;
bool flag = false;
printf("# Begin reading items\n");
for (int i = 0; i < num_vecs; ++i) {
if ((i - top_norm) % batch_vecs == 0 && i >= top_norm) {
//if (i % batch_vecs == top_norm) {
flag = true;
temp_sum = 0;
}
for (int j = 0; j < num_dimen; ++j) {
scanf("%f", &temp_value);
if (flag) temp_sum += temp_value * temp_value;
vecs[cal_2Dcoordinate(i, j, num_dimen)] = temp_value;
}
if (flag) {
flag = false;
norm_filter[index] = -sqrt(temp_sum);
index++;
}
}
norm_filter[index++] = 0;
// load data to GPU
printf("# Begin loading to GPU\n");
pq_float *device_codeword_pq, *device_codeword_nq, *device_query, *device_lookup_table, *device_topnorm_vecs;
int *device_ret_result, *device_prefixsum_result, *device_mid_result;
pq_int *device_codebook, *device_q_map;
hipMalloc((pq_float **)&device_codeword_nq, size_codeword_nq * sizeof(pq_float));
hipMalloc((pq_float **)&device_codeword_pq, size_codeword_pq * sizeof(pq_float));
hipMalloc((pq_int **)&device_codebook, size_codebook * sizeof(pq_int));
hipMalloc((pq_float **)&device_query, size_device_query * sizeof(pq_float));
hipMalloc((pq_int **)&device_q_map, size_q_map * sizeof(pq_int));
hipMalloc((pq_float **)&device_lookup_table, size_lookup_table * sizeof(pq_float));
hipMalloc((int **)&device_mid_result, size_ret_result * sizeof(int));
hipMalloc((int **)&device_prefixsum_result, size_ret_result * sizeof(int));
hipMalloc((int **)&device_ret_result, size_ret_result * sizeof(int));
hipMalloc((pq_float **)&device_topnorm_vecs, size_topnorm_vecs * sizeof(pq_float));
int *h_mid_result = new int[batch_vecs];
int *h_prefixsum_result = new int[batch_vecs];
int *h_ret_result = new int[batch_vecs];
pq_float *h_lookup_table = new pq_float[size_lookup_table];
pq_float *gpuref_lookup_table = new pq_float[size_lookup_table];
int *gpuref_mid_result = new int[batch_vecs];
printf("# Begin copying data\n");
hipMemcpy(device_codeword_nq, codeword_nq, size_codeword_nq * sizeof(pq_float), hipMemcpyHostToDevice);
hipMemcpy(device_codeword_pq, codeword_pq, size_codeword_pq * sizeof(pq_float), hipMemcpyHostToDevice);
hipMemcpy(device_codebook, codebook, size_codebook * sizeof(pq_int), hipMemcpyHostToDevice);
hipMemcpy(device_q_map, q_map, size_q_map * sizeof(pq_int), hipMemcpyHostToDevice);
hipMemcpy(device_topnorm_vecs, vecs, size_topnorm_vecs * sizeof(pq_float), hipMemcpyHostToDevice);
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
hipcub::DeviceScan::InclusiveSum(d_temp_storage, temp_storage_bytes, device_mid_result, device_prefixsum_result, size_ret_result);
hipMalloc(&d_temp_storage, temp_storage_bytes);
// test lookup table
// hipMemcpy(device_query, query, num_dimen * sizeof(pq_float), hipMemcpyHostToDevice);
// dim3 grid_lookup (num_q);
// dim3 block_lookup (Ks);
//hipLaunchKernelGGL(( calLookupOnGPU), dim3(grid_lookup), dim3(block_lookup), 0, 0, device_query, device_codeword_nq, device_codeword_pq, device_q_map, device_lookup_table,
// num_dimen);
// hipDeviceSynchronize();
// hipMemcpy(gpuref_lookup_table, device_lookup_table, size_lookup_table * sizeof(pq_float), hipMemcpyDeviceToHost);
// calLookupOnCPU(query, codeword_nq, codeword_pq, q_map, h_lookup_table, num_dimen, num_q, Ks);
// checkLookup(h_lookup_table, gpuref_lookup_table, size_lookup_table);
// int start_pos = 0, end_pos = batch_vecs;
// dim3 grid_prune (batch_vecs / threads_per_block);
// dim3 block_prune (threads_per_block);
//hipLaunchKernelGGL(( calApproxVecs), dim3(grid_prune), dim3(block_prune), size_lookup_table * sizeof(pq_float), 0, device_mid_result, device_codebook, device_lookup_table, device_q_map,
// num_q, Ks, num_vecs, start_pos, end_pos, threshold[0]);
// hipDeviceSynchronize();
// hipMemcpy(gpuref_mid_result, device_mid_result, batch_vecs * sizeof(pq_float), hipMemcpyDeviceToHost);
// calMidResultOnCPU(h_mid_result, codebook, h_lookup_table, q_map, num_q, Ks, num_vecs, start_pos, end_pos, threshold[0]);
// checkMidResult(h_mid_result, gpuref_mid_result, end_pos - start_pos);
// end testing lookup table
// int sum_batch_query = (num_query + batch_query - 1) / batch_query;
int sum_batch_vecs = (num_vecs - top_norm + batch_vecs - 1) / batch_vecs;
// vector<int> candidate;
int sum_final_correct = 0, temp_length[2], max_length = 0;
long long sum_final_length = 0;
long long sum_filter_length = 0;
int batch_print = 10000;
double cpu_time = 0;
double gpu_cal_time = 0;
double gpu_sum_time = 0;
double gpu_assign_time = 0;
double prepare_time = 0;
double copy_back_time = 0;
clock_t temp_time_cpu = 0;
clock_t temp_time_gpu = 0;
// clock_t end_time_cpu = 0;
printf("# Begin calculating\n");
query_cnt = 0;
clock_t start_time = clock();
hipMemcpy(device_query, query, num_query * num_dimen * sizeof(pq_float), hipMemcpyHostToDevice);
// while(1) {
for (int i = 0; i < num_query; ++i) {
// load the query into gpu
// temp_time_cpu = clock();
// calculate the lookup table
dim3 grid_lookup (num_q);
dim3 block_lookup (Ks);
hipLaunchKernelGGL(( calLookupOnGPU), dim3(grid_lookup), dim3(block_lookup), 0, 0, device_query, device_codeword_nq, device_codeword_pq, device_q_map,
device_lookup_table, num_dimen, i * num_dimen);
// calculate the approximate vecs
// dim3 grid_prune (batch_vecs / threads_per_block);
dim3 block_prune (threads_per_block);
int start_pos = top_norm, end_pos = top_norm + batch_vecs, current_length = 0;
double norm_threshold = -threshold[i] / query_norm[i];
// printf("# %dth query threshold is %f", i, norm_threshold);
// dim3 grid_ret (size_ret_result + threads_per_block - 1 / threads_per_block);
// dim3 block_ret (threads_per_block);
// initResult<<<grid_ret, block_ret>>>(device_ret_result, size_ret_result);
// binary search for the threshold
int norm_index_left = 0, norm_index_right = sum_batch_vecs + 1, norm_index_mid = 0;
while (norm_index_left < norm_index_right) {
norm_index_mid = (norm_index_left + norm_index_right) >> 1;
if (norm_threshold <= norm_filter[norm_index_mid]) norm_index_right = norm_index_mid;
else norm_index_left = norm_index_mid + 1;
}
int index_vecs = norm_index_left;
// int index_vecs = std::lower_bound(norm_filter, norm_filter + sum_batch_vecs + 1, norm_threshold) - norm_filter;
hipDeviceSynchronize();
// prepare_time += (double)(clock() - temp_time_cpu) / CLOCKS_PER_SEC;
// if (i == 900) {
// printf("# %d\n", i);
// }
if (index_vecs > 0) {
end_pos = index_vecs * batch_vecs + top_norm;
if (end_pos > num_vecs) end_pos = num_vecs;
sum_filter_length += num_vecs - end_pos;
// temp_time_gpu = clock();
dim3 grid_prune ((end_pos - start_pos + threads_per_block - 1) / threads_per_block);
hipLaunchKernelGGL(( calApproxVecs), dim3(grid_prune), dim3(block_prune), size_lookup_table * sizeof(pq_float), 0, device_mid_result, device_codebook, device_lookup_table,
device_q_map, num_q, Ks, num_vecs, start_pos, end_pos, threshold[i]);
hipDeviceSynchronize();
// gpu_cal_time += (double)(clock() - temp_time_gpu) / CLOCKS_PER_SEC;
// temp_time_gpu = clock();
// hipMemcpy(h_mid_result, device_mid_result, batch_vecs * sizeof(int), hipMemcpyDeviceToHost);
hipcub::DeviceScan::InclusiveSum(d_temp_storage, temp_storage_bytes, device_mid_result, device_prefixsum_result, end_pos - start_pos);
hipDeviceSynchronize();
// gpu_sum_time += (double)(clock() - temp_time_gpu) / CLOCKS_PER_SEC;
// temp_time_gpu = clock();
// hipMemcpy(h_prefixsum_result, device_prefixsum_result, batch_vecs * sizeof(int), hipMemcpyDeviceToHost);
hipLaunchKernelGGL(( assignResult), dim3(grid_prune), dim3(block_prune), 0, 0, device_prefixsum_result, device_ret_result, start_pos, end_pos);
hipDeviceSynchronize();
// gpu_assign_time += (double)(clock() - temp_time_gpu) / CLOCKS_PER_SEC;
hipMemcpy(temp_length, device_prefixsum_result + end_pos - start_pos - 1, sizeof(int), hipMemcpyDeviceToHost);
// hipMemcpy(h_ret_result, device_prefixsum_result, batch_vecs * sizeof(int), hipMemcpyDeviceToHost);
current_length += min(temp_length[0], ret_limit);
// if (current_length >= num_vecs / 20) break;
// start_pos += batch_vecs;
// end_pos += batch_vecs;
// if (end_pos > num_vecs) end_pos = num_vecs;
}
// for (int j = 0; j < sum_batch_vecs; ++j) {
// if (norm_threshold > norm_filter[j]) {
// sum_filter_length += num_vecs - top_norm - j * batch_vecs;
// break;
// }
// temp_time_gpu = clock();
// hipLaunchKernelGGL(( calApproxVecs), dim3(grid_prune), dim3(block_prune), size_lookup_table * sizeof(pq_float), 0, device_mid_result, device_codebook, device_lookup_table,
// device_q_map, num_q, Ks, num_vecs, start_pos, end_pos, threshold[i]);
// hipDeviceSynchronize();
// gpu_cal_time += (double)(clock() - temp_time_gpu) / CLOCKS_PER_SEC;
// temp_time_gpu = clock();
// // hipMemcpy(h_mid_result, device_mid_result, batch_vecs * sizeof(int), hipMemcpyDeviceToHost);
// hipcub::DeviceScan::InclusiveSum(d_temp_storage, temp_storage_bytes, device_mid_result, device_prefixsum_result, batch_vecs);
// hipDeviceSynchronize();
// gpu_sum_time += (double)(clock() - temp_time_gpu) / CLOCKS_PER_SEC;
// temp_time_gpu = clock();
// // hipMemcpy(h_prefixsum_result, device_prefixsum_result, batch_vecs * sizeof(int), hipMemcpyDeviceToHost);
// assignResult2<<<grid_prune, block_prune>>>(device_prefixsum_result, device_ret_result, start_pos, end_pos, current_length);
// hipDeviceSynchronize();
// gpu_assign_time += (double)(clock() - temp_time_gpu) / CLOCKS_PER_SEC;
// hipMemcpy(temp_length, device_prefixsum_result + end_pos - start_pos - 1, sizeof(int), hipMemcpyDeviceToHost);
// // hipMemcpy(h_ret_result, device_prefixsum_result, batch_vecs * sizeof(int), hipMemcpyDeviceToHost);
// current_length += temp_length[0];
// if (current_length >= num_vecs / 20) break;
// start_pos += batch_vecs;
// end_pos += batch_vecs;
// if (end_pos > num_vecs) end_pos = num_vecs;
// }
// temp_time_gpu = clock();
hipMemcpy(ret_result, device_ret_result, current_length * sizeof(int), hipMemcpyDeviceToHost);
// temp_time_cpu = clock();
// copy_back_time += (double)(temp_time_cpu - temp_time_gpu) / CLOCKS_PER_SEC;
for (int j = 0; j < topk; ++j) ret_result[current_length + j] = candidate_init[i][j];
// bool flag = 0;
// for (int j = 0; j < current_length; ++j) {
// if (ret_result[j] >= num_vecs) {
// flag = 1;
// break;
// }
// }
// if (flag) {
// printf("# %dth query outputs wrong with threshold %f!\n", i, threshold[i]);
// for (int j = 0; j < current_length; ++j) {
// printf("%d ", ret_result[j]);
// }
// printf("\n");
// break;
// }
current_length += topk;
// if (current_length >= num_vecs / 2) {
// for (int j = 0; j < sum_batch_vecs; ++j) printf("# %dth norm filter is %f\n", j, norm_filter[j]);
// printf("# %dth query threshold is %f and query norm is %f\n", i, threshold[i], query_norm[i]);
// break;
// }
max_length = max(max_length, current_length);
sum_final_length += current_length;
query_idx = i;
calIP(ret_result, current_length, ip_vecs, num_dimen);
nth_element(ip_vecs, ip_vecs + topk, ip_vecs + current_length);
// bool flag = 0;
// for (int j = 0; j < topk - 1; ++j) {
// if (-ip_vecs[j].first < -ip_vecs[topk - 1].first) {
// flag = 1;
// break;
// }
// }
// if (flag) {
// printf("# %dth query is wrong\n", i);
// for (int j = 0; j < topk; ++j) {
// printf("# id: %d ip: %f\n", ip_vecs[j].second, ip_vecs[j].first);
// }
// break;
// }
// for (int j = 0; j < topk; ++j) {
// printf("# id: %d ip: %f\n", ip_vecs[j].second, ip_vecs[j].first);
// }
for (int j = 0; j < topk; ++j) ret_result[j] = ip_vecs[j].second;
// printf("# length = %d\n", current_length);
// std::sort(ret_result, ret_result + current_length, cmpIPIndex);
sum_final_correct += sameIndex(ret_result, answer[i], topk);
// end_time_cpu = clock();
// cpu_time += (double)(clock() - temp_time_cpu) / CLOCKS_PER_SEC;
// break;
// if (i % batch_print == batch_print - 1) printf("# %dth query has been processed\n", i);
// query_cnt++;
//if (query_cnt == 10000) break;
}
//if (query_cnt == 10000) break;
//}
clock_t end_time = clock();
double query_processing_time = (double)(end_time - start_time) / CLOCKS_PER_SEC + total_time;
printf("\n# time spend: %fs\n# top norm faiss time spend: %fs\n# cpu time: %fs\n# gpu cal time: %fs\n# gpu sum time: %fs\n# gpu assign time: %fs\n# prepare time: %fs\n# copy back time: %fs",
query_processing_time, total_time, cpu_time, gpu_cal_time, gpu_sum_time, gpu_assign_time, prepare_time, copy_back_time);
printf("\n# total query: %d\n# total vecs: %d\n# norm filter length: %f\n# max length: %d\n# final_length: %f",
num_query, num_vecs, sum_filter_length * 1.0 / num_query, max_length, sum_final_length * 1.0 / num_query);
printf("\n# recall: %f\n# init recall: %f\n", sum_final_correct * 1.0 / num_query / topk,
sum_init_correct * 1.0 / num_query / topk);
// delete all the data
// printf("# Begin delete gpu array 1!");
hipFree(device_codeword_nq);
// printf("# Begin delete gpu array 2!");
hipFree(device_codeword_pq);
// printf("# Begin delete gpu array 3!");
hipFree(device_query);
// printf("# Begin delete gpu array 4!");
hipFree(device_codebook);
// printf("# Begin delete gpu array 5!");
hipFree(device_q_map);
// printf("# Begin delete gpu array 6!");
hipFree(device_lookup_table);
// printf("# Begin delete gpu array 7!");
hipFree(device_mid_result);
// printf("# Begin delete gpu array 8!");
hipFree(device_prefixsum_result);
// printf("# Begin delete gpu array 9!");
hipFree(device_ret_result);
// printf("# Begin delete gpu array 10!");
hipFree(d_temp_storage);
// printf("# Begin delete cpu array!");
delete [] codeword_nq;
delete [] codeword_pq;
delete [] codebook;
delete [] query;
delete [] query_norm;
delete [] threshold;
free(vecs);
delete [] norm_filter;
delete [] ret_result;
delete [] h_mid_result;
delete [] h_prefixsum_result;
delete [] h_ret_result;
delete norm_ret;
for (int i = 0; i < num_query; ++i) delete [] candidate_init[i];
delete [] candidate_init;
for (int i = 0; i < num_query; ++i) delete [] answer[i];
delete [] answer;
delete ip_vecs;
return 0;
}
|
767eb5a41ff64707323c72ae4ce78fe487772343.cu
|
#include "header.cuh"
typedef float pq_float;
typedef uint8_t pq_int;
// configuration
int topk;
int codebook;
int Ks;
int group;
int batch_vecs;
int batch_query;
int threads_per_block;
int ret_limit;
// size of everything
int size_codeword_nq;
int size_codeword_pq;
int size_codebook;
int size_query;
int size_threshold;
int size_device_query;
long long size_vecs;
int size_norm_filter;
int size_q_map;
int size_lookup_table;
int size_ret_result;
int size_topnorm_vecs;
string dataset;
int num_vecs, num_dimen, num_query, num_q, top_norm;
pq_float *vecs;
pq_float *query;
pq_float *norm_ret;
std::pair<pq_float, int> *ip_vecs;
int *ret_result;
int query_idx;
long long cal_2Dcoordinate(int x, int y, int leny) {
return (long long)x * leny + y;
}
int cal_3Dcoordinate(int x, int y, int z, int leny, int lenz) {
return x * leny * lenz + y * lenz + z;
}
__global__ void calLookupOnGPU(pq_float *query, pq_float *codeword_nq, pq_float *codeword_pq, pq_int *q_map, pq_float *lookup_table,
int num_dimen, int offset) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int q_type = q_map[2 * blockIdx.x];
int idx_q = q_map[2 * blockIdx.x + 1];
if (q_type == 0) {
lookup_table[idx] = codeword_nq[idx_q * blockDim.x + threadIdx.x];
} else if (q_type == 1) {
pq_float temp_sum = 0;
for (int i = 0; i < num_dimen; ++i) {
temp_sum += codeword_pq[idx_q * blockDim.x * num_dimen + threadIdx.x * num_dimen + i] * query[offset + i];
}
lookup_table[idx] = temp_sum;
}
}
void calLookupOnCPU(pq_float *query, pq_float *codeword_nq, pq_float *codeword_pq, pq_int *q_map, pq_float *lookup_table,
int num_dimen, int num_q, int Ks) {
for (int i = 0; i < num_q; ++i) {
int q_type = q_map[2 * i];
int idx_q = q_map[2 * i + 1];
for (int j = 0; j < Ks; ++j) {
if (q_type == 0) {
lookup_table[cal_2Dcoordinate(i, j, Ks)] = codeword_nq[cal_2Dcoordinate(idx_q, j, Ks)];
} else if (q_type == 1) {
pq_float temp_sum = 0;
for (int k = 0; k < num_dimen; ++k) temp_sum += codeword_pq[cal_3Dcoordinate(idx_q, j, k, Ks, num_dimen)] * query[k];
lookup_table[cal_2Dcoordinate(i, j, Ks)] = temp_sum;
}
}
}
}
void checkLookup(pq_float *h_lookup_table, pq_float *gpuref_lookup_table, int length) {
double eps = 1e-7;
bool match = 1;
for (int i = 0; i < length; ++i) {
if (abs(h_lookup_table[i] - gpuref_lookup_table[i]) > eps) {
match = 0;
printf("Lookup tables do not match!\n");
printf("host %f gpu %f at current %d\n", h_lookup_table[i], gpuref_lookup_table[i], i);
}
}
if (match) printf("Lookup tables match.\n\n");
}
__global__ void calApproxVecs(int *mid_result, pq_int *codebook, pq_float *lookup_table, pq_int *q_map, int num_q, int Ks, int num_vecs,
int start_vecs, int end_vecs, pq_float threshold) {
int average_assign = num_q * Ks / blockDim.x;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ pq_float local_lookup_table [];
for (int i = 0; i < average_assign; ++i) local_lookup_table[threadIdx.x * average_assign + i] = lookup_table[threadIdx.x * average_assign + i];
__syncthreads();
// init
if (start_vecs + idx < end_vecs) {
int q_type = 0;
pq_float result = 0, coefficient = 1; //result_temp = 0;
for (int i = 0; i < num_q; ++i) {
q_type = q_map[2 * i];
if (q_type == 0) {
//result += result_temp * coefficient;
coefficient = local_lookup_table[i * Ks + codebook[i * num_vecs + (start_vecs + idx)]];
//result_temp = 0;
} else if (q_type == 1) {
result += coefficient * local_lookup_table[i * Ks + codebook[i * num_vecs + (start_vecs + idx)]];
//result_temp += local_lookup_table[i * Ks + codebook[i * num_vecs + (start_vecs + idx)]];
}
}
//result += coefficient * result_temp;
if (result >= threshold) mid_result[idx] = 1;
else mid_result[idx] = 0;
// mid_result[idx] = result;
}
}
void calMidResultOnCPU(int *mid_result, pq_int *codebook, pq_float *lookup_table, pq_int *q_map, int num_q, int Ks, int num_vecs,
int start_pos, int end_pos, pq_float threshold) {
for (int i = 0; i < end_pos - start_pos; ++i) {
pq_float result = 0, mid_coefficient = 1;
for (int j = 0; j < num_q; ++j) {
int q_type = q_map[2 * j], idx = j * Ks + codebook[j * num_vecs + (start_pos + i)];
if (q_type == 0) {
mid_coefficient = lookup_table[idx];
} else if (q_type == 1) {
result += mid_coefficient * lookup_table[idx];
}
}
mid_result[i] = result >= threshold;
}
}
void checkMidResult(int *h_mid_result, int *gpuref_mid_result, int length) {
bool match = 1;
// pq_float eps = 1e-7;
for (int i = 0; i < length; ++i) {
if (h_mid_result[i] != gpuref_mid_result[i]) {
// if (abs(h_mid_result[i] - gpuref_mid_result[i]) > eps) {
match = 0;
printf("Mid results do not match!\n");
printf("host %d gpu %d at current %d\n", h_mid_result[i], gpuref_mid_result[i], i);
// printf("host %f gpu %f at current %d\n", h_mid_result[i], gpuref_mid_result[i], i);
break;
}
}
if (match) printf("Mid results match.\n\n");
}
// __global__ void assignResult2(int *prefixsum_result, int *d_ret_result, int start_pos, int end_pos, int current_length) {
// int idx = blockIdx.x * blockDim.x + threadIdx.x;
// if (start_pos + idx < end_pos) {
// if (!idx) {
// if (prefixsum_result[idx] == 1) {
// d_ret_result[current_length] = start_pos + idx;
// }
// } else {
// if (prefixsum_result[idx] - prefixsum_result[idx - 1]) {
// d_ret_result[current_length + prefixsum_result[idx] - 1] = start_pos + idx;
// }
// }
// }
// }
__global__ void assignResult(int *prefixsum_result, int *d_ret_result, int start_pos, int end_pos) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (start_pos + idx < end_pos) {
if (!idx) {
if (prefixsum_result[idx] == 1) {
d_ret_result[0] = start_pos + idx;
}
} else {
if (prefixsum_result[idx] - prefixsum_result[idx - 1]) {
d_ret_result[prefixsum_result[idx] - 1] = start_pos + idx;
}
}
}
}
__global__ void initResult(int *d_ret_result, int size_ret_result) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size_ret_result) d_ret_result[idx] = 0;
}
pq_float calOneIP(pq_float *arr, int vecs_idx, int query_idx, int num_dimen) {
pq_float temp_sum = 0;
int offset_vecs = vecs_idx * num_dimen, offset_query = query_idx * num_dimen;
for (int i = 0; i < num_dimen; ++i) {
temp_sum += arr[offset_vecs + i] * query[offset_query + i];
}
return temp_sum;
}
void calIP(int *idx, int length, std::pair<pq_float, int> *out, int num_dimen) {
for (int i = 0; i < length; ++i) {
out[i] = std::make_pair(-calOneIP(vecs, idx[i], query_idx, num_dimen), idx[i]);
}
}
bool cmpIPIndex(int lhs, int rhs) {
return norm_ret[lhs] > norm_ret[rhs];
}
int sameIndex(int *vecs1, int *vecs2, int number) {
bool *flag = new bool[number];
for (int i = 0; i < number; ++i) {
flag[i] = false;
}
int ret = 0;
for (int i = 0; i < number; ++i) {
for (int j = 0; j < number; ++j) {
if (!flag[j] && vecs1[i] == vecs2[j]) {
flag[j] = true;
ret++;
}
}
}
delete [] flag;
return ret;
}
int main(int argc, char *argv[]) {
// input configre
dataset = string(argv[1]);
topk = atoi(argv[2]);
codebook = atoi(argv[3]);
Ks = atoi(argv[4]);
group = atoi(argv[5]);
batch_vecs = atoi(argv[6]);
batch_query = atoi(argv[7]);
threads_per_block = atoi(argv[8]);
ret_limit = atoi(argv[9]);
// init input data
printf("# Begin reading data\n");
string file_path = "../data/" + dataset + "/" + dataset + "_cuda_c.txt";
freopen(file_path.c_str(), "r", stdin);
vector<int>q_type;
pq_int q_map[100];
scanf("%d%d%d%d", &num_vecs, &num_dimen, &num_query, &top_norm);
num_q = codebook * group;
ret_limit = num_vecs * ret_limit / 100;
char temp_str[50];
scanf("%s", temp_str);
// printf("%s\n", temp_str);
int num_nq = 0, num_pq = 0;
for (int i = 0; i < num_q; ++i) {
if (temp_str[i] == 'N') {
q_type.push_back(0);
q_map[2 * i] = 0;
q_map[2 * i + 1] = num_nq;
num_nq++;
}
else if (temp_str[i] == 'P') {
q_type.push_back(1);
q_map[2 * i] = 1;
q_map[2 * i + 1] = num_pq;
num_pq++;
}
}
// may be larger than the limit of int
printf("# Begin allocating memory\n");
size_codeword_nq = num_nq * Ks;
size_codeword_pq = num_pq * Ks * num_dimen;
size_codebook = num_q * num_vecs;
size_query = num_query * num_dimen;
size_threshold = num_query;
size_device_query = num_query * num_dimen;
size_vecs = (long long)num_vecs * num_dimen;
size_norm_filter = num_vecs / batch_vecs;
size_q_map = num_q * 2;
size_lookup_table = num_q * Ks;
size_ret_result = num_vecs;
size_topnorm_vecs = top_norm * num_dimen;
vecs = (pq_float *)malloc(size_vecs * sizeof(pq_float));
if (!vecs) {
printf("\n------malloc failed!!------\n");
}
pq_float *codeword_nq = new pq_float[size_codeword_nq];
pq_float *codeword_pq = new pq_float[size_codeword_pq];
pq_int *codebook = new pq_int[size_codebook];
pq_float *query_norm = new pq_float[num_query];
pq_float *threshold = new pq_float[size_threshold];
// vecs = (pq_float *)malloc(size_vecs * sizeof(pq_float));
ret_result = new int[size_ret_result];
query = new pq_float[size_query];
norm_ret = new pq_float[size_ret_result];
ip_vecs = new std::pair<pq_float, int>[size_ret_result];
pq_float *norm_filter = new pq_float[size_norm_filter + 2];
pq_float total_time;
int **candidate_init = new int*[num_query];
for (int i = 0; i < num_query; ++i) candidate_init[i] = new int[topk];
int **answer = new int*[num_query];
for (int i = 0; i < num_query; ++i) answer[i] = new int[topk];
// load input data
printf("# Begin reading codeword+codebook\n");
int temp_nq = 0, temp_pq = 0;
for (int i = 0; i < num_q; ++i) {
if (!q_type[i]) {
for (int j = 0; j < Ks; ++j) scanf("%f", &codeword_nq[cal_2Dcoordinate(temp_nq, j, Ks)]);
for (int j = 0; j < num_vecs; ++j) scanf("%d", &codebook[cal_2Dcoordinate(temp_nq + temp_pq, j, num_vecs)]);
temp_nq++;
} else if (q_type[i] == 1) {
for (int j = 0; j < Ks; ++j) {
for (int k = 0; k < num_dimen; ++k) scanf("%f", &codeword_pq[cal_3Dcoordinate(temp_pq, j, k, Ks, num_dimen)]);
}
for (int j = 0; j < num_vecs; ++j) scanf("%d", &codebook[cal_2Dcoordinate(temp_nq + temp_pq, j, num_vecs)]);
temp_pq++;
}
}
pq_float temp_sum = 0, temp_value = 0;
printf("# Begin reading query\n");
for (int i = 0; i < num_query; ++i) {
temp_sum = 0;
for (int j = 0; j < num_dimen; ++j) {
scanf("%f", &temp_value);
temp_sum += temp_value * temp_value;
query[cal_2Dcoordinate(i, j, num_dimen)] = temp_value;
}
query_norm[i] = sqrt(temp_sum);
}
int sum_init_correct = 0;
for (int i = 0; i < num_query; ++i) {
scanf("%f", &threshold[i]);
// if (i % (num_query / 10) == 0) printf("# %dth threshold is %f\n", i, threshold[i]);
}
int query_cnt = 0;
for (int i = 0; i < num_query; ++i) {
for (int j = 0; j < topk; ++j) {
scanf("%d", &candidate_init[i][j]);
}
for (int j = 0; j < topk; ++j) {
scanf("%d", &answer[i][j]);
}
// if (query_cnt < 10000) {
sum_init_correct += sameIndex(candidate_init[i], answer[i], topk);
// query_cnt++;
// }
// if (i % (num_query / 10) == 0) {
// for (int j = 0; j < topk; ++j) printf("%d ", candidate_init[i][j]);
// for (int j = 0; j < topk; ++j) printf("%d ", answer[i][j]);
// printf("\n");
// }
}
scanf("%f", &total_time);
int index = 0;
bool flag = false;
printf("# Begin reading items\n");
for (int i = 0; i < num_vecs; ++i) {
if ((i - top_norm) % batch_vecs == 0 && i >= top_norm) {
//if (i % batch_vecs == top_norm) {
flag = true;
temp_sum = 0;
}
for (int j = 0; j < num_dimen; ++j) {
scanf("%f", &temp_value);
if (flag) temp_sum += temp_value * temp_value;
vecs[cal_2Dcoordinate(i, j, num_dimen)] = temp_value;
}
if (flag) {
flag = false;
norm_filter[index] = -sqrt(temp_sum);
index++;
}
}
norm_filter[index++] = 0;
// load data to GPU
printf("# Begin loading to GPU\n");
pq_float *device_codeword_pq, *device_codeword_nq, *device_query, *device_lookup_table, *device_topnorm_vecs;
int *device_ret_result, *device_prefixsum_result, *device_mid_result;
pq_int *device_codebook, *device_q_map;
cudaMalloc((pq_float **)&device_codeword_nq, size_codeword_nq * sizeof(pq_float));
cudaMalloc((pq_float **)&device_codeword_pq, size_codeword_pq * sizeof(pq_float));
cudaMalloc((pq_int **)&device_codebook, size_codebook * sizeof(pq_int));
cudaMalloc((pq_float **)&device_query, size_device_query * sizeof(pq_float));
cudaMalloc((pq_int **)&device_q_map, size_q_map * sizeof(pq_int));
cudaMalloc((pq_float **)&device_lookup_table, size_lookup_table * sizeof(pq_float));
cudaMalloc((int **)&device_mid_result, size_ret_result * sizeof(int));
cudaMalloc((int **)&device_prefixsum_result, size_ret_result * sizeof(int));
cudaMalloc((int **)&device_ret_result, size_ret_result * sizeof(int));
cudaMalloc((pq_float **)&device_topnorm_vecs, size_topnorm_vecs * sizeof(pq_float));
int *h_mid_result = new int[batch_vecs];
int *h_prefixsum_result = new int[batch_vecs];
int *h_ret_result = new int[batch_vecs];
pq_float *h_lookup_table = new pq_float[size_lookup_table];
pq_float *gpuref_lookup_table = new pq_float[size_lookup_table];
int *gpuref_mid_result = new int[batch_vecs];
printf("# Begin copying data\n");
cudaMemcpy(device_codeword_nq, codeword_nq, size_codeword_nq * sizeof(pq_float), cudaMemcpyHostToDevice);
cudaMemcpy(device_codeword_pq, codeword_pq, size_codeword_pq * sizeof(pq_float), cudaMemcpyHostToDevice);
cudaMemcpy(device_codebook, codebook, size_codebook * sizeof(pq_int), cudaMemcpyHostToDevice);
cudaMemcpy(device_q_map, q_map, size_q_map * sizeof(pq_int), cudaMemcpyHostToDevice);
cudaMemcpy(device_topnorm_vecs, vecs, size_topnorm_vecs * sizeof(pq_float), cudaMemcpyHostToDevice);
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
cub::DeviceScan::InclusiveSum(d_temp_storage, temp_storage_bytes, device_mid_result, device_prefixsum_result, size_ret_result);
cudaMalloc(&d_temp_storage, temp_storage_bytes);
// test lookup table
// cudaMemcpy(device_query, query, num_dimen * sizeof(pq_float), cudaMemcpyHostToDevice);
// dim3 grid_lookup (num_q);
// dim3 block_lookup (Ks);
// calLookupOnGPU<<<grid_lookup, block_lookup>>>(device_query, device_codeword_nq, device_codeword_pq, device_q_map, device_lookup_table,
// num_dimen);
// cudaDeviceSynchronize();
// cudaMemcpy(gpuref_lookup_table, device_lookup_table, size_lookup_table * sizeof(pq_float), cudaMemcpyDeviceToHost);
// calLookupOnCPU(query, codeword_nq, codeword_pq, q_map, h_lookup_table, num_dimen, num_q, Ks);
// checkLookup(h_lookup_table, gpuref_lookup_table, size_lookup_table);
// int start_pos = 0, end_pos = batch_vecs;
// dim3 grid_prune (batch_vecs / threads_per_block);
// dim3 block_prune (threads_per_block);
// calApproxVecs<<<grid_prune, block_prune, size_lookup_table * sizeof(pq_float)>>>(device_mid_result, device_codebook, device_lookup_table, device_q_map,
// num_q, Ks, num_vecs, start_pos, end_pos, threshold[0]);
// cudaDeviceSynchronize();
// cudaMemcpy(gpuref_mid_result, device_mid_result, batch_vecs * sizeof(pq_float), cudaMemcpyDeviceToHost);
// calMidResultOnCPU(h_mid_result, codebook, h_lookup_table, q_map, num_q, Ks, num_vecs, start_pos, end_pos, threshold[0]);
// checkMidResult(h_mid_result, gpuref_mid_result, end_pos - start_pos);
// end testing lookup table
// int sum_batch_query = (num_query + batch_query - 1) / batch_query;
int sum_batch_vecs = (num_vecs - top_norm + batch_vecs - 1) / batch_vecs;
// vector<int> candidate;
int sum_final_correct = 0, temp_length[2], max_length = 0;
long long sum_final_length = 0;
long long sum_filter_length = 0;
int batch_print = 10000;
double cpu_time = 0;
double gpu_cal_time = 0;
double gpu_sum_time = 0;
double gpu_assign_time = 0;
double prepare_time = 0;
double copy_back_time = 0;
clock_t temp_time_cpu = 0;
clock_t temp_time_gpu = 0;
// clock_t end_time_cpu = 0;
printf("# Begin calculating\n");
query_cnt = 0;
clock_t start_time = clock();
cudaMemcpy(device_query, query, num_query * num_dimen * sizeof(pq_float), cudaMemcpyHostToDevice);
// while(1) {
for (int i = 0; i < num_query; ++i) {
// load the query into gpu
// temp_time_cpu = clock();
// calculate the lookup table
dim3 grid_lookup (num_q);
dim3 block_lookup (Ks);
calLookupOnGPU<<<grid_lookup, block_lookup>>>(device_query, device_codeword_nq, device_codeword_pq, device_q_map,
device_lookup_table, num_dimen, i * num_dimen);
// calculate the approximate vecs
// dim3 grid_prune (batch_vecs / threads_per_block);
dim3 block_prune (threads_per_block);
int start_pos = top_norm, end_pos = top_norm + batch_vecs, current_length = 0;
double norm_threshold = -threshold[i] / query_norm[i];
// printf("# %dth query threshold is %f", i, norm_threshold);
// dim3 grid_ret (size_ret_result + threads_per_block - 1 / threads_per_block);
// dim3 block_ret (threads_per_block);
// initResult<<<grid_ret, block_ret>>>(device_ret_result, size_ret_result);
// binary search for the threshold
int norm_index_left = 0, norm_index_right = sum_batch_vecs + 1, norm_index_mid = 0;
while (norm_index_left < norm_index_right) {
norm_index_mid = (norm_index_left + norm_index_right) >> 1;
if (norm_threshold <= norm_filter[norm_index_mid]) norm_index_right = norm_index_mid;
else norm_index_left = norm_index_mid + 1;
}
int index_vecs = norm_index_left;
// int index_vecs = std::lower_bound(norm_filter, norm_filter + sum_batch_vecs + 1, norm_threshold) - norm_filter;
cudaDeviceSynchronize();
// prepare_time += (double)(clock() - temp_time_cpu) / CLOCKS_PER_SEC;
// if (i == 900) {
// printf("# %d\n", i);
// }
if (index_vecs > 0) {
end_pos = index_vecs * batch_vecs + top_norm;
if (end_pos > num_vecs) end_pos = num_vecs;
sum_filter_length += num_vecs - end_pos;
// temp_time_gpu = clock();
dim3 grid_prune ((end_pos - start_pos + threads_per_block - 1) / threads_per_block);
calApproxVecs<<<grid_prune, block_prune, size_lookup_table * sizeof(pq_float)>>>(device_mid_result, device_codebook, device_lookup_table,
device_q_map, num_q, Ks, num_vecs, start_pos, end_pos, threshold[i]);
cudaDeviceSynchronize();
// gpu_cal_time += (double)(clock() - temp_time_gpu) / CLOCKS_PER_SEC;
// temp_time_gpu = clock();
// cudaMemcpy(h_mid_result, device_mid_result, batch_vecs * sizeof(int), cudaMemcpyDeviceToHost);
cub::DeviceScan::InclusiveSum(d_temp_storage, temp_storage_bytes, device_mid_result, device_prefixsum_result, end_pos - start_pos);
cudaDeviceSynchronize();
// gpu_sum_time += (double)(clock() - temp_time_gpu) / CLOCKS_PER_SEC;
// temp_time_gpu = clock();
// cudaMemcpy(h_prefixsum_result, device_prefixsum_result, batch_vecs * sizeof(int), cudaMemcpyDeviceToHost);
assignResult<<<grid_prune, block_prune>>>(device_prefixsum_result, device_ret_result, start_pos, end_pos);
cudaDeviceSynchronize();
// gpu_assign_time += (double)(clock() - temp_time_gpu) / CLOCKS_PER_SEC;
cudaMemcpy(temp_length, device_prefixsum_result + end_pos - start_pos - 1, sizeof(int), cudaMemcpyDeviceToHost);
// cudaMemcpy(h_ret_result, device_prefixsum_result, batch_vecs * sizeof(int), cudaMemcpyDeviceToHost);
current_length += min(temp_length[0], ret_limit);
// if (current_length >= num_vecs / 20) break;
// start_pos += batch_vecs;
// end_pos += batch_vecs;
// if (end_pos > num_vecs) end_pos = num_vecs;
}
// for (int j = 0; j < sum_batch_vecs; ++j) {
// if (norm_threshold > norm_filter[j]) {
// sum_filter_length += num_vecs - top_norm - j * batch_vecs;
// break;
// }
// temp_time_gpu = clock();
// calApproxVecs<<<grid_prune, block_prune, size_lookup_table * sizeof(pq_float)>>>(device_mid_result, device_codebook, device_lookup_table,
// device_q_map, num_q, Ks, num_vecs, start_pos, end_pos, threshold[i]);
// cudaDeviceSynchronize();
// gpu_cal_time += (double)(clock() - temp_time_gpu) / CLOCKS_PER_SEC;
// temp_time_gpu = clock();
// // cudaMemcpy(h_mid_result, device_mid_result, batch_vecs * sizeof(int), cudaMemcpyDeviceToHost);
// cub::DeviceScan::InclusiveSum(d_temp_storage, temp_storage_bytes, device_mid_result, device_prefixsum_result, batch_vecs);
// cudaDeviceSynchronize();
// gpu_sum_time += (double)(clock() - temp_time_gpu) / CLOCKS_PER_SEC;
// temp_time_gpu = clock();
// // cudaMemcpy(h_prefixsum_result, device_prefixsum_result, batch_vecs * sizeof(int), cudaMemcpyDeviceToHost);
// assignResult2<<<grid_prune, block_prune>>>(device_prefixsum_result, device_ret_result, start_pos, end_pos, current_length);
// cudaDeviceSynchronize();
// gpu_assign_time += (double)(clock() - temp_time_gpu) / CLOCKS_PER_SEC;
// cudaMemcpy(temp_length, device_prefixsum_result + end_pos - start_pos - 1, sizeof(int), cudaMemcpyDeviceToHost);
// // cudaMemcpy(h_ret_result, device_prefixsum_result, batch_vecs * sizeof(int), cudaMemcpyDeviceToHost);
// current_length += temp_length[0];
// if (current_length >= num_vecs / 20) break;
// start_pos += batch_vecs;
// end_pos += batch_vecs;
// if (end_pos > num_vecs) end_pos = num_vecs;
// }
// temp_time_gpu = clock();
cudaMemcpy(ret_result, device_ret_result, current_length * sizeof(int), cudaMemcpyDeviceToHost);
// temp_time_cpu = clock();
// copy_back_time += (double)(temp_time_cpu - temp_time_gpu) / CLOCKS_PER_SEC;
for (int j = 0; j < topk; ++j) ret_result[current_length + j] = candidate_init[i][j];
// bool flag = 0;
// for (int j = 0; j < current_length; ++j) {
// if (ret_result[j] >= num_vecs) {
// flag = 1;
// break;
// }
// }
// if (flag) {
// printf("# %dth query outputs wrong with threshold %f!\n", i, threshold[i]);
// for (int j = 0; j < current_length; ++j) {
// printf("%d ", ret_result[j]);
// }
// printf("\n");
// break;
// }
current_length += topk;
// if (current_length >= num_vecs / 2) {
// for (int j = 0; j < sum_batch_vecs; ++j) printf("# %dth norm filter is %f\n", j, norm_filter[j]);
// printf("# %dth query threshold is %f and query norm is %f\n", i, threshold[i], query_norm[i]);
// break;
// }
max_length = max(max_length, current_length);
sum_final_length += current_length;
query_idx = i;
calIP(ret_result, current_length, ip_vecs, num_dimen);
nth_element(ip_vecs, ip_vecs + topk, ip_vecs + current_length);
// bool flag = 0;
// for (int j = 0; j < topk - 1; ++j) {
// if (-ip_vecs[j].first < -ip_vecs[topk - 1].first) {
// flag = 1;
// break;
// }
// }
// if (flag) {
// printf("# %dth query is wrong\n", i);
// for (int j = 0; j < topk; ++j) {
// printf("# id: %d ip: %f\n", ip_vecs[j].second, ip_vecs[j].first);
// }
// break;
// }
// for (int j = 0; j < topk; ++j) {
// printf("# id: %d ip: %f\n", ip_vecs[j].second, ip_vecs[j].first);
// }
for (int j = 0; j < topk; ++j) ret_result[j] = ip_vecs[j].second;
// printf("# length = %d\n", current_length);
// std::sort(ret_result, ret_result + current_length, cmpIPIndex);
sum_final_correct += sameIndex(ret_result, answer[i], topk);
// end_time_cpu = clock();
// cpu_time += (double)(clock() - temp_time_cpu) / CLOCKS_PER_SEC;
// break;
// if (i % batch_print == batch_print - 1) printf("# %dth query has been processed\n", i);
// query_cnt++;
//if (query_cnt == 10000) break;
}
//if (query_cnt == 10000) break;
//}
clock_t end_time = clock();
double query_processing_time = (double)(end_time - start_time) / CLOCKS_PER_SEC + total_time;
printf("\n# time spend: %fs\n# top norm faiss time spend: %fs\n# cpu time: %fs\n# gpu cal time: %fs\n# gpu sum time: %fs\n# gpu assign time: %fs\n# prepare time: %fs\n# copy back time: %fs",
query_processing_time, total_time, cpu_time, gpu_cal_time, gpu_sum_time, gpu_assign_time, prepare_time, copy_back_time);
printf("\n# total query: %d\n# total vecs: %d\n# norm filter length: %f\n# max length: %d\n# final_length: %f",
num_query, num_vecs, sum_filter_length * 1.0 / num_query, max_length, sum_final_length * 1.0 / num_query);
printf("\n# recall: %f\n# init recall: %f\n", sum_final_correct * 1.0 / num_query / topk,
sum_init_correct * 1.0 / num_query / topk);
// delete all the data
// printf("# Begin delete gpu array 1!");
cudaFree(device_codeword_nq);
// printf("# Begin delete gpu array 2!");
cudaFree(device_codeword_pq);
// printf("# Begin delete gpu array 3!");
cudaFree(device_query);
// printf("# Begin delete gpu array 4!");
cudaFree(device_codebook);
// printf("# Begin delete gpu array 5!");
cudaFree(device_q_map);
// printf("# Begin delete gpu array 6!");
cudaFree(device_lookup_table);
// printf("# Begin delete gpu array 7!");
cudaFree(device_mid_result);
// printf("# Begin delete gpu array 8!");
cudaFree(device_prefixsum_result);
// printf("# Begin delete gpu array 9!");
cudaFree(device_ret_result);
// printf("# Begin delete gpu array 10!");
cudaFree(d_temp_storage);
// printf("# Begin delete cpu array!");
delete [] codeword_nq;
delete [] codeword_pq;
delete [] codebook;
delete [] query;
delete [] query_norm;
delete [] threshold;
free(vecs);
delete [] norm_filter;
delete [] ret_result;
delete [] h_mid_result;
delete [] h_prefixsum_result;
delete [] h_ret_result;
delete norm_ret;
for (int i = 0; i < num_query; ++i) delete [] candidate_init[i];
delete [] candidate_init;
for (int i = 0; i < num_query; ++i) delete [] answer[i];
delete [] answer;
delete ip_vecs;
return 0;
}
|
c8a5244b3080f4abb9d74323698c2ca7a1e428e1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// MP 1
#include <wb.h>
__global__ void vecAdd(float *in1, float *in2, float *out, int len) {
//@@ Insert code to implement vector addition here
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < len) {
out[i] = in1[i] + in2[i];
}
}
int main(int argc, char **argv) {
wbArg_t args;
int inputLength;
float *hostInput1;
float *hostInput2;
float *hostOutput;
float *deviceInput1;
float *deviceInput2;
float *deviceOutput;
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput1 = (float *)wbImport(wbArg_getInputFile(args, 0), &inputLength);
hostInput2 = (float *)wbImport(wbArg_getInputFile(args, 1), &inputLength);
hostOutput = (float *)malloc(inputLength * sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The input length is ", inputLength);
wbTime_start(GPU, "Allocating GPU memory.");
//@@ Allocate GPU memory here
hipError_t err; //For fetching cuda errors
err = hipMalloc((void **)&deviceOutput, inputLength * sizeof(float));
if (err != hipSuccess) {
printf("%s at line %d\n", hipGetErrorString(err), __LINE__);
exit(EXIT_FAILURE);
}
err = hipMalloc((void **)&deviceInput1, inputLength * sizeof(float));
if (err != hipSuccess) {
printf("%s at line %d\n", hipGetErrorString(err), __LINE__);
exit(EXIT_FAILURE);
}
err = hipMalloc((void **)&deviceInput2, inputLength * sizeof(float));
if (err != hipSuccess) {
printf("%s at line %d\n", hipGetErrorString(err), __LINE__);
exit(EXIT_FAILURE);
}
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
//@@ Copy memory to the GPU here
err = hipMemcpy(deviceInput1, hostInput1, inputLength * sizeof(float), hipMemcpyHostToDevice);
if (err != hipSuccess) {
printf("%s at line %d\n", hipGetErrorString(err), __LINE__);
exit(EXIT_FAILURE);
}
err = hipMemcpy(deviceInput2, hostInput2, inputLength * sizeof(float), hipMemcpyHostToDevice);
if (err != hipSuccess) {
printf("%s at line %d\n", hipGetErrorString(err), __LINE__);
exit(EXIT_FAILURE);
}
wbTime_stop(GPU, "Copying input memory to the GPU.");
//@@ Initialize the grid and block dimensions here
dim3 DimGrid(ceil(inputLength / 256.0), 1, 1);
dim3 DimBlock(256, 1, 1);
wbTime_start(Compute, "Performing CUDA computation");
//@@ Launch the GPU Kernel here
vecAdd << <DimGrid, DimBlock >> >(deviceInput1, deviceInput2, deviceOutput, inputLength);
hipDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
//@@ Copy the GPU memory back to the CPU here
hipMemcpy(hostOutput, deviceOutput, inputLength * sizeof(float), hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
printf("%s at line %d\n", hipGetErrorString(err), __LINE__);
exit(EXIT_FAILURE);
}
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory here
hipFree(deviceInput1);
hipFree(deviceInput2);
hipFree(deviceOutput);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostOutput, inputLength);
free(hostInput1);
free(hostInput2);
free(hostOutput);
return 0;
}
|
c8a5244b3080f4abb9d74323698c2ca7a1e428e1.cu
|
// MP 1
#include <wb.h>
__global__ void vecAdd(float *in1, float *in2, float *out, int len) {
//@@ Insert code to implement vector addition here
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < len) {
out[i] = in1[i] + in2[i];
}
}
int main(int argc, char **argv) {
wbArg_t args;
int inputLength;
float *hostInput1;
float *hostInput2;
float *hostOutput;
float *deviceInput1;
float *deviceInput2;
float *deviceOutput;
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput1 = (float *)wbImport(wbArg_getInputFile(args, 0), &inputLength);
hostInput2 = (float *)wbImport(wbArg_getInputFile(args, 1), &inputLength);
hostOutput = (float *)malloc(inputLength * sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The input length is ", inputLength);
wbTime_start(GPU, "Allocating GPU memory.");
//@@ Allocate GPU memory here
cudaError_t err; //For fetching cuda errors
err = cudaMalloc((void **)&deviceOutput, inputLength * sizeof(float));
if (err != cudaSuccess) {
printf("%s at line %d\n", cudaGetErrorString(err), __LINE__);
exit(EXIT_FAILURE);
}
err = cudaMalloc((void **)&deviceInput1, inputLength * sizeof(float));
if (err != cudaSuccess) {
printf("%s at line %d\n", cudaGetErrorString(err), __LINE__);
exit(EXIT_FAILURE);
}
err = cudaMalloc((void **)&deviceInput2, inputLength * sizeof(float));
if (err != cudaSuccess) {
printf("%s at line %d\n", cudaGetErrorString(err), __LINE__);
exit(EXIT_FAILURE);
}
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
//@@ Copy memory to the GPU here
err = cudaMemcpy(deviceInput1, hostInput1, inputLength * sizeof(float), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
printf("%s at line %d\n", cudaGetErrorString(err), __LINE__);
exit(EXIT_FAILURE);
}
err = cudaMemcpy(deviceInput2, hostInput2, inputLength * sizeof(float), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
printf("%s at line %d\n", cudaGetErrorString(err), __LINE__);
exit(EXIT_FAILURE);
}
wbTime_stop(GPU, "Copying input memory to the GPU.");
//@@ Initialize the grid and block dimensions here
dim3 DimGrid(ceil(inputLength / 256.0), 1, 1);
dim3 DimBlock(256, 1, 1);
wbTime_start(Compute, "Performing CUDA computation");
//@@ Launch the GPU Kernel here
vecAdd << <DimGrid, DimBlock >> >(deviceInput1, deviceInput2, deviceOutput, inputLength);
cudaDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
//@@ Copy the GPU memory back to the CPU here
cudaMemcpy(hostOutput, deviceOutput, inputLength * sizeof(float), cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
printf("%s at line %d\n", cudaGetErrorString(err), __LINE__);
exit(EXIT_FAILURE);
}
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory here
cudaFree(deviceInput1);
cudaFree(deviceInput2);
cudaFree(deviceOutput);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostOutput, inputLength);
free(hostInput1);
free(hostInput2);
free(hostOutput);
return 0;
}
|
17bf81eb885293b35dc7a7c506777e0d8aa54943.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/concatenate.hpp>
#include <cudf/detail/indexalator.cuh>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/search.hpp>
#include <cudf/detail/stream_compaction.hpp>
#include <cudf/detail/valid_if.cuh>
#include <cudf/dictionary/detail/encode.hpp>
#include <cudf/dictionary/dictionary_column_view.hpp>
#include <cudf/dictionary/dictionary_factories.hpp>
#include <cudf/stream_compaction.hpp>
#include <rmm/thrust_rmm_allocator.h>
#include <thrust/binary_search.h>
namespace cudf {
namespace dictionary {
namespace detail {
namespace {
/**
* @brief Type-dispatch functor for remapping the old indices to new values based on the new
* key-set.
*
* The dispatch is based on the key type.
* The output column is the new indices column for the new dictionary column.
*/
struct dispatch_compute_indices {
template <typename Element>
typename std::enable_if_t<cudf::is_relationally_comparable<Element, Element>(),
std::unique_ptr<column>>
operator()(dictionary_column_view const& input,
column_view const& new_keys,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
auto dictionary_view = column_device_view::create(input.parent(), stream);
auto d_dictionary = *dictionary_view;
auto keys_view = column_device_view::create(input.keys(), stream);
auto dictionary_itr = thrust::make_permutation_iterator(
keys_view->begin<Element>(),
thrust::make_transform_iterator(
thrust::make_counting_iterator<size_type>(0), [d_dictionary] __device__(size_type idx) {
if (d_dictionary.is_null(idx)) return 0;
return static_cast<size_type>(d_dictionary.element<dictionary32>(idx));
}));
auto new_keys_view = column_device_view::create(new_keys, stream);
// create output indices column
auto result = make_numeric_column(get_indices_type_for_size(new_keys.size()),
input.size(),
mask_state::UNALLOCATED,
stream,
mr);
auto result_itr =
cudf::detail::indexalator_factory::make_output_iterator(result->mutable_view());
thrust::lower_bound(rmm::exec_policy(stream)->on(stream),
new_keys_view->begin<Element>(),
new_keys_view->end<Element>(),
dictionary_itr,
dictionary_itr + input.size(),
result_itr,
thrust::less<Element>());
result->set_null_count(0);
return result;
}
template <typename Element>
typename std::enable_if_t<!cudf::is_relationally_comparable<Element, Element>(),
std::unique_ptr<column>>
operator()(dictionary_column_view const& input,
column_view const& new_keys,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
CUDF_FAIL("list_view dictionary set_keys not supported yet");
}
};
} // namespace
//
std::unique_ptr<column> set_keys(
dictionary_column_view const& dictionary_column,
column_view const& new_keys,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource(),
hipStream_t stream = 0)
{
CUDF_EXPECTS(!new_keys.has_nulls(), "keys parameter must not have nulls");
auto keys = dictionary_column.keys();
CUDF_EXPECTS(keys.type() == new_keys.type(), "keys types must match");
// copy the keys -- use drop_duplicates to make sure they are sorted and unique
auto table_keys = cudf::detail::drop_duplicates(table_view{{new_keys}},
std::vector<size_type>{0},
duplicate_keep_option::KEEP_FIRST,
null_equality::EQUAL,
mr,
stream)
->release();
std::unique_ptr<column> keys_column(std::move(table_keys.front()));
// compute the new nulls
auto matches = cudf::detail::contains(keys, keys_column->view(), mr, stream);
auto d_matches = matches->view().data<bool>();
auto indices_itr =
cudf::detail::indexalator_factory::make_input_iterator(dictionary_column.indices());
auto d_null_mask = dictionary_column.null_mask();
auto new_nulls = cudf::detail::valid_if(
thrust::make_counting_iterator<size_type>(dictionary_column.offset()),
thrust::make_counting_iterator<size_type>(dictionary_column.offset() +
dictionary_column.size()),
[d_null_mask, indices_itr, d_matches] __device__(size_type idx) {
if (d_null_mask && !bit_is_set(d_null_mask, idx)) return false;
return d_matches[indices_itr[idx]];
},
stream,
mr);
// compute the new indices
auto indices_column = type_dispatcher(keys_column->type(),
dispatch_compute_indices{},
dictionary_column,
keys_column->view(),
mr,
stream);
// create column with keys_column and indices_column
return make_dictionary_column(std::move(keys_column),
std::move(indices_column),
std::move(new_nulls.first),
new_nulls.second);
}
std::vector<std::unique_ptr<column>> match_dictionaries(std::vector<dictionary_column_view> input,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
std::vector<column_view> keys(input.size());
std::transform(input.begin(), input.end(), keys.begin(), [](auto& col) { return col.keys(); });
auto new_keys = cudf::detail::concatenate(keys, rmm::mr::get_current_device_resource(), stream);
auto keys_view = new_keys->view();
std::vector<std::unique_ptr<column>> result(input.size());
std::transform(input.begin(), input.end(), result.begin(), [keys_view, mr, stream](auto& col) {
return set_keys(col, keys_view, mr, stream);
});
return result;
}
} // namespace detail
// external API
std::unique_ptr<column> set_keys(dictionary_column_view const& dictionary_column,
column_view const& keys,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::set_keys(dictionary_column, keys, mr);
}
} // namespace dictionary
} // namespace cudf
|
17bf81eb885293b35dc7a7c506777e0d8aa54943.cu
|
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/concatenate.hpp>
#include <cudf/detail/indexalator.cuh>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/search.hpp>
#include <cudf/detail/stream_compaction.hpp>
#include <cudf/detail/valid_if.cuh>
#include <cudf/dictionary/detail/encode.hpp>
#include <cudf/dictionary/dictionary_column_view.hpp>
#include <cudf/dictionary/dictionary_factories.hpp>
#include <cudf/stream_compaction.hpp>
#include <rmm/thrust_rmm_allocator.h>
#include <thrust/binary_search.h>
namespace cudf {
namespace dictionary {
namespace detail {
namespace {
/**
* @brief Type-dispatch functor for remapping the old indices to new values based on the new
* key-set.
*
* The dispatch is based on the key type.
* The output column is the new indices column for the new dictionary column.
*/
struct dispatch_compute_indices {
template <typename Element>
typename std::enable_if_t<cudf::is_relationally_comparable<Element, Element>(),
std::unique_ptr<column>>
operator()(dictionary_column_view const& input,
column_view const& new_keys,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
auto dictionary_view = column_device_view::create(input.parent(), stream);
auto d_dictionary = *dictionary_view;
auto keys_view = column_device_view::create(input.keys(), stream);
auto dictionary_itr = thrust::make_permutation_iterator(
keys_view->begin<Element>(),
thrust::make_transform_iterator(
thrust::make_counting_iterator<size_type>(0), [d_dictionary] __device__(size_type idx) {
if (d_dictionary.is_null(idx)) return 0;
return static_cast<size_type>(d_dictionary.element<dictionary32>(idx));
}));
auto new_keys_view = column_device_view::create(new_keys, stream);
// create output indices column
auto result = make_numeric_column(get_indices_type_for_size(new_keys.size()),
input.size(),
mask_state::UNALLOCATED,
stream,
mr);
auto result_itr =
cudf::detail::indexalator_factory::make_output_iterator(result->mutable_view());
thrust::lower_bound(rmm::exec_policy(stream)->on(stream),
new_keys_view->begin<Element>(),
new_keys_view->end<Element>(),
dictionary_itr,
dictionary_itr + input.size(),
result_itr,
thrust::less<Element>());
result->set_null_count(0);
return result;
}
template <typename Element>
typename std::enable_if_t<!cudf::is_relationally_comparable<Element, Element>(),
std::unique_ptr<column>>
operator()(dictionary_column_view const& input,
column_view const& new_keys,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
CUDF_FAIL("list_view dictionary set_keys not supported yet");
}
};
} // namespace
//
std::unique_ptr<column> set_keys(
dictionary_column_view const& dictionary_column,
column_view const& new_keys,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource(),
cudaStream_t stream = 0)
{
CUDF_EXPECTS(!new_keys.has_nulls(), "keys parameter must not have nulls");
auto keys = dictionary_column.keys();
CUDF_EXPECTS(keys.type() == new_keys.type(), "keys types must match");
// copy the keys -- use drop_duplicates to make sure they are sorted and unique
auto table_keys = cudf::detail::drop_duplicates(table_view{{new_keys}},
std::vector<size_type>{0},
duplicate_keep_option::KEEP_FIRST,
null_equality::EQUAL,
mr,
stream)
->release();
std::unique_ptr<column> keys_column(std::move(table_keys.front()));
// compute the new nulls
auto matches = cudf::detail::contains(keys, keys_column->view(), mr, stream);
auto d_matches = matches->view().data<bool>();
auto indices_itr =
cudf::detail::indexalator_factory::make_input_iterator(dictionary_column.indices());
auto d_null_mask = dictionary_column.null_mask();
auto new_nulls = cudf::detail::valid_if(
thrust::make_counting_iterator<size_type>(dictionary_column.offset()),
thrust::make_counting_iterator<size_type>(dictionary_column.offset() +
dictionary_column.size()),
[d_null_mask, indices_itr, d_matches] __device__(size_type idx) {
if (d_null_mask && !bit_is_set(d_null_mask, idx)) return false;
return d_matches[indices_itr[idx]];
},
stream,
mr);
// compute the new indices
auto indices_column = type_dispatcher(keys_column->type(),
dispatch_compute_indices{},
dictionary_column,
keys_column->view(),
mr,
stream);
// create column with keys_column and indices_column
return make_dictionary_column(std::move(keys_column),
std::move(indices_column),
std::move(new_nulls.first),
new_nulls.second);
}
std::vector<std::unique_ptr<column>> match_dictionaries(std::vector<dictionary_column_view> input,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
std::vector<column_view> keys(input.size());
std::transform(input.begin(), input.end(), keys.begin(), [](auto& col) { return col.keys(); });
auto new_keys = cudf::detail::concatenate(keys, rmm::mr::get_current_device_resource(), stream);
auto keys_view = new_keys->view();
std::vector<std::unique_ptr<column>> result(input.size());
std::transform(input.begin(), input.end(), result.begin(), [keys_view, mr, stream](auto& col) {
return set_keys(col, keys_view, mr, stream);
});
return result;
}
} // namespace detail
// external API
std::unique_ptr<column> set_keys(dictionary_column_view const& dictionary_column,
column_view const& keys,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::set_keys(dictionary_column, keys, mr);
}
} // namespace dictionary
} // namespace cudf
|
4b7d61b73143986d47af9ce56067f948c76affe4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/hip/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, true, false, 64, 64, 32>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, true, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k32_dropout_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, true, false, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 500
#if __CUDA_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, true, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k32_dropout_sm50` is for sm50-sm70, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 64, 64, 32>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k32_dropout_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 700
#if __CUDA_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k32_dropout_sm70` is for sm70-sm75, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 64, 64, 32>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k32_dropout_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 750
#if __CUDA_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k32_dropout_sm75` is for sm75-sm80, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, true, 64, 64, 32>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, true, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k32_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, true, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, true, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k32_dropout_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
|
4b7d61b73143986d47af9ce56067f948c76affe4.cu
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, true, false, 64, 64, 32>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, true, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k32_dropout_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, true, false, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 500
#if __CUDA_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, true, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k32_dropout_sm50` is for sm50-sm70, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 64, 64, 32>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k32_dropout_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 700
#if __CUDA_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k32_dropout_sm70` is for sm70-sm75, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 64, 64, 32>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k32_dropout_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 750
#if __CUDA_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k32_dropout_sm75` is for sm75-sm80, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, true, 64, 64, 32>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, true, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k32_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, true, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, true, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k32_dropout_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
|
6d66cbd4ef97c42208e01ef52b513bb21be96b12.hip
|
// !!! This is a file automatically generated by hipify!!!
/* Lab 4 Exercise 3 Program
In this exercise we write a CUDA program to implement a matrix addition kernel, using a GPU for parallel computation.
The kernel performs the addition of two `N` by `M` matrices filled with random integer entries.
This program has been adapted from the code written for exercise 2 in which random vectors are added.
We implement matrices as one-dimensional arrays using row-major vectorization to reduce the amount of modification required.
See the following for further reference:
https://en.wikipedia.org/wiki/Row-_and_column-major_order
https://en.wikipedia.org/wiki/Vectorization_(mathematics)
http://www.cplusplus.com/doc/tutorial/arrays/
See the "Multidimensional arrays" section of the last link for discussion of the subtle differences */
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#define N 2048 // The number of matrix rows (i.e. matrix height)
#define M 1000 // The number of matrix columns (i.e. matrix width)
/* We are required to use a 2D grid of thread blocks with 256 threads per block to perform the matrix addition
We choose to arrange the 256 threads within each block into a 16 by 16 square.
Blocks are split into 'warps' (groups of 32 threads) by the hardware, so threads per block should always be a multiple of 32.
More consideration needed on optimal block dimensions to minimise expected leftover threads over typical matrix shapes */
#define BLOCK_WIDTH 16
#define BLOCK_HEIGHT 16
#define THREADS_PER_BLOCK BLOCK_HEIGHT*BLOCK_WIDTH // 16 * 16 = 256, which is divisible by 32
void checkCUDAError(const char*);
void random_ints(int *a);
/* 3.3 Rename your CPU implementation to `matrixAddCPU` and update it and the `validate` function accordingly.
Implements matrix addition using a CPU, taking inputs `a`, `b`, and writing the output to `c`. */
void matrixAddCPU(int *a, int *b, int *c) {
for (unsigned int i = 0; i < N; i++) {
for (unsigned int j = 0; j < M; j++) {
unsigned int idx = i * M + j;
c[idx] = a[idx] + b[idx];
}
}
}
/* The `validate` function compares the GPU result to the CPU result.
It prints an error for each value which is incorrect and returns a value indicating the total number of errors. */
int validate(int *c_1, int *c_2) {
int errors = 0;
for (unsigned int i = 0; i < N; i++) {
for (unsigned int j = 0; j < M; j++) {
unsigned int idx = i * M + j;
if (c_1[idx] != c_2[idx]) {
errors++;
fprintf(stderr, "Error at index (%d,%d): GPU result %d does not match CPU value %d\n", i, j, c_2[idx], c_1[idx]);
}
}
}
return errors;
}
/* 3.4 Change your launch parameters to launch a 2D grid of thread blocks with 256 threads per block.
Create a new kernel `matrixAdd` to perform the matrix addition. */
__global__ void matrixAdd(int *a, int *b, int *c, unsigned int height, unsigned int width) {
/* `blockDim.x` and `blockDim.y` are set by preprocessor definitions `BLOCK_WIDTH` and `BLOCK_HEIGHT`, respectively. */
unsigned int i = blockIdx.y * blockDim.y + threadIdx.y; // Height/row index
unsigned int j = blockIdx.x * blockDim.x + threadIdx.x; // Width/column index
// To avoid out-of-bounds memory errors with leftover threads in end blocks
if ((i < height) && (j < width)) {
unsigned int idx = i * width + j;
c[idx] = a[idx] + b[idx];
}
}
int main(void) {
int *a, *b, *c, *c_ref; // Host copies of a, b, c
int *d_a, *d_b, *d_c; // Device copies of a, b, c
int errors; // Count the number of errors/differences between GPU output and CPU output
/* 3.1 Modify the value of `size` so that enough memory is allocated and subsequently moved between host and device
for an `N` by `M` matrix of `int` values. */
unsigned int size = N * M * sizeof(int); // Memory size of the matrix data
// Allocate space for device copies of a, b, c
hipMalloc((void **)&d_a, size);
hipMalloc((void **)&d_b, size);
hipMalloc((void **)&d_c, size);
checkCUDAError("CUDA malloc");
// Allocate space for host copies of a, b, c and setup input values
a = (int *)malloc(size); random_ints(a);
b = (int *)malloc(size); random_ints(b);
c = (int *)malloc(size);
c_ref = (int *)malloc(size);
// Copy inputs to device
hipMemcpy(d_a, a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, size, hipMemcpyHostToDevice);
checkCUDAError("CUDA Memcpy Host to Device");
/* 3.4 Change your launch parameters to launch a 2D grid of thread blocks with 256 threads per block.
Create a new kernel `matrixAdd` to perform the matrix addition.*/
// Launch the `matrixAdd` kernel on the GPU device
unsigned int grid_width = (unsigned int) ceil((double) M / BLOCK_WIDTH);
unsigned int grid_height = (unsigned int) ceil((double) N / BLOCK_HEIGHT);
dim3 blocksPerGrid(grid_width, grid_height, 1);
dim3 threadsPerBlock(BLOCK_WIDTH, BLOCK_HEIGHT, 1);
matrixAdd << <blocksPerGrid, threadsPerBlock >> >(d_a, d_b, d_c, N, M);
checkCUDAError("CUDA kernel");
// Run CPU version of the matrix addition function
matrixAddCPU(a, b, c_ref);
// Copy result back to host
hipMemcpy(c, d_c, size, hipMemcpyDeviceToHost);
checkCUDAError("CUDA Memcpy Device to Host");
// Validate the GPU result
errors = validate(c_ref, c);
printf("CUDA GPU result has %d errors.\n", errors);
// Cleanup
hipFree(d_a); hipFree(d_b); hipFree(d_c);
checkCUDAError("CUDA cleanup");
free(a); free(b); free(c);
return 0;
}
void checkCUDAError(const char *msg) {
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "CUDA ERROR: %s: %s.\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/* 3.2 Modify the `random_ints` function to generate a matrix (rather than a vector) of random `int` values.
We use (row-major) vectorization to implement matrices in this exercise, so could merge the two loops into one anyway. */
void random_ints(int *a) {
for (unsigned int i = 0; i < N; i++) {
for (unsigned int j = 0; j < M; j++) {
a[i * M + j] = rand();
}
}
}
|
6d66cbd4ef97c42208e01ef52b513bb21be96b12.cu
|
/* Lab 4 Exercise 3 Program
In this exercise we write a CUDA program to implement a matrix addition kernel, using a GPU for parallel computation.
The kernel performs the addition of two `N` by `M` matrices filled with random integer entries.
This program has been adapted from the code written for exercise 2 in which random vectors are added.
We implement matrices as one-dimensional arrays using row-major vectorization to reduce the amount of modification required.
See the following for further reference:
https://en.wikipedia.org/wiki/Row-_and_column-major_order
https://en.wikipedia.org/wiki/Vectorization_(mathematics)
http://www.cplusplus.com/doc/tutorial/arrays/
See the "Multidimensional arrays" section of the last link for discussion of the subtle differences */
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#define N 2048 // The number of matrix rows (i.e. matrix height)
#define M 1000 // The number of matrix columns (i.e. matrix width)
/* We are required to use a 2D grid of thread blocks with 256 threads per block to perform the matrix addition
We choose to arrange the 256 threads within each block into a 16 by 16 square.
Blocks are split into 'warps' (groups of 32 threads) by the hardware, so threads per block should always be a multiple of 32.
More consideration needed on optimal block dimensions to minimise expected leftover threads over typical matrix shapes */
#define BLOCK_WIDTH 16
#define BLOCK_HEIGHT 16
#define THREADS_PER_BLOCK BLOCK_HEIGHT*BLOCK_WIDTH // 16 * 16 = 256, which is divisible by 32
void checkCUDAError(const char*);
void random_ints(int *a);
/* 3.3 Rename your CPU implementation to `matrixAddCPU` and update it and the `validate` function accordingly.
Implements matrix addition using a CPU, taking inputs `a`, `b`, and writing the output to `c`. */
void matrixAddCPU(int *a, int *b, int *c) {
for (unsigned int i = 0; i < N; i++) {
for (unsigned int j = 0; j < M; j++) {
unsigned int idx = i * M + j;
c[idx] = a[idx] + b[idx];
}
}
}
/* The `validate` function compares the GPU result to the CPU result.
It prints an error for each value which is incorrect and returns a value indicating the total number of errors. */
int validate(int *c_1, int *c_2) {
int errors = 0;
for (unsigned int i = 0; i < N; i++) {
for (unsigned int j = 0; j < M; j++) {
unsigned int idx = i * M + j;
if (c_1[idx] != c_2[idx]) {
errors++;
fprintf(stderr, "Error at index (%d,%d): GPU result %d does not match CPU value %d\n", i, j, c_2[idx], c_1[idx]);
}
}
}
return errors;
}
/* 3.4 Change your launch parameters to launch a 2D grid of thread blocks with 256 threads per block.
Create a new kernel `matrixAdd` to perform the matrix addition. */
__global__ void matrixAdd(int *a, int *b, int *c, unsigned int height, unsigned int width) {
/* `blockDim.x` and `blockDim.y` are set by preprocessor definitions `BLOCK_WIDTH` and `BLOCK_HEIGHT`, respectively. */
unsigned int i = blockIdx.y * blockDim.y + threadIdx.y; // Height/row index
unsigned int j = blockIdx.x * blockDim.x + threadIdx.x; // Width/column index
// To avoid out-of-bounds memory errors with leftover threads in end blocks
if ((i < height) && (j < width)) {
unsigned int idx = i * width + j;
c[idx] = a[idx] + b[idx];
}
}
int main(void) {
int *a, *b, *c, *c_ref; // Host copies of a, b, c
int *d_a, *d_b, *d_c; // Device copies of a, b, c
int errors; // Count the number of errors/differences between GPU output and CPU output
/* 3.1 Modify the value of `size` so that enough memory is allocated and subsequently moved between host and device
for an `N` by `M` matrix of `int` values. */
unsigned int size = N * M * sizeof(int); // Memory size of the matrix data
// Allocate space for device copies of a, b, c
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
checkCUDAError("CUDA malloc");
// Allocate space for host copies of a, b, c and setup input values
a = (int *)malloc(size); random_ints(a);
b = (int *)malloc(size); random_ints(b);
c = (int *)malloc(size);
c_ref = (int *)malloc(size);
// Copy inputs to device
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
checkCUDAError("CUDA Memcpy Host to Device");
/* 3.4 Change your launch parameters to launch a 2D grid of thread blocks with 256 threads per block.
Create a new kernel `matrixAdd` to perform the matrix addition.*/
// Launch the `matrixAdd` kernel on the GPU device
unsigned int grid_width = (unsigned int) ceil((double) M / BLOCK_WIDTH);
unsigned int grid_height = (unsigned int) ceil((double) N / BLOCK_HEIGHT);
dim3 blocksPerGrid(grid_width, grid_height, 1);
dim3 threadsPerBlock(BLOCK_WIDTH, BLOCK_HEIGHT, 1);
matrixAdd << <blocksPerGrid, threadsPerBlock >> >(d_a, d_b, d_c, N, M);
checkCUDAError("CUDA kernel");
// Run CPU version of the matrix addition function
matrixAddCPU(a, b, c_ref);
// Copy result back to host
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
checkCUDAError("CUDA Memcpy Device to Host");
// Validate the GPU result
errors = validate(c_ref, c);
printf("CUDA GPU result has %d errors.\n", errors);
// Cleanup
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
checkCUDAError("CUDA cleanup");
free(a); free(b); free(c);
return 0;
}
void checkCUDAError(const char *msg) {
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "CUDA ERROR: %s: %s.\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/* 3.2 Modify the `random_ints` function to generate a matrix (rather than a vector) of random `int` values.
We use (row-major) vectorization to implement matrices in this exercise, so could merge the two loops into one anyway. */
void random_ints(int *a) {
for (unsigned int i = 0; i < N; i++) {
for (unsigned int j = 0; j < M; j++) {
a[i * M + j] = rand();
}
}
}
|
57cbda5c2426677713cb540a580a7200c1ac9b2e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common.h"
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
hipError_t err = hipGetLastError();
if (hipSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
namespace StreamCompaction {
namespace Common {
/**
* Maps an array to an array of 0s and 1s for stream compaction. Elements
* which map to 0 will be removed, and elements which map to 1 will be kept.
*/
__global__ void kernMapToBoolean(int n, int *bools, const int *idata) {
int thrId = threadIdx.x + (blockIdx.x * blockDim.x);
if (idata[thrId] == 0) {
bools[thrId] = 0;
}
else {
bools[thrId] = 1;
}
}
/**
* Performs scatter on an array. That is, for each element in idata,
* if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]].
*/
__global__ void kernScatter(int n, int *odata,
const int *idata, const int *bools, const int *indices) {
int thrId = threadIdx.x + (blockIdx.x * blockDim.x);
if (thrId < n) {
if (bools[thrId] == 1) {
odata[indices[thrId]] = idata[thrId];
}
}
}
}
}
|
57cbda5c2426677713cb540a580a7200c1ac9b2e.cu
|
#include "common.h"
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
namespace StreamCompaction {
namespace Common {
/**
* Maps an array to an array of 0s and 1s for stream compaction. Elements
* which map to 0 will be removed, and elements which map to 1 will be kept.
*/
__global__ void kernMapToBoolean(int n, int *bools, const int *idata) {
int thrId = threadIdx.x + (blockIdx.x * blockDim.x);
if (idata[thrId] == 0) {
bools[thrId] = 0;
}
else {
bools[thrId] = 1;
}
}
/**
* Performs scatter on an array. That is, for each element in idata,
* if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]].
*/
__global__ void kernScatter(int n, int *odata,
const int *idata, const int *bools, const int *indices) {
int thrId = threadIdx.x + (blockIdx.x * blockDim.x);
if (thrId < n) {
if (bools[thrId] == 1) {
odata[indices[thrId]] = idata[thrId];
}
}
}
}
}
|
e5e927941256a94d5d09a7e4c0271139e5fc2a78.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include <thrust/device_ptr.h>
#define SIZE int(pow(2,10))
#define RANGE int(pow(2,5)) /* Numbers are generated from 0 to RANGE-1*/
#define BLOCKSIZE 1024
#define NUMBLOCKS SIZE/BLOCKSIZE
#define nBITS int(log(RANGE)/log(2)) /* log(n)+1 bits to represent n */
#define OFFSET 2 /* Number of bits for sorting in each pass */
#define HISTO_SIZE 4
#define digit(n,exp) (n/exp)%HISTO_SIZE
__global__ void RadixSort(int*,int,int*,int*);
__global__ void BlockWiseSort(int*,int,int*,int*);
void CheckSolution(int*);
int main()
{
int in=0,out=1;
int** array=new int*[2];
for(int i=0;i<2;i++)
array[i]=new int[SIZE];
for(int i=0;i<SIZE;i++)
array[in][i]=rand()%RANGE;
int* array_d;
hipMalloc((void**)&array_d,SIZE*sizeof(int));
int host_histo[HISTO_SIZE];
int* device_histo;
hipMalloc((void**)&device_histo,HISTO_SIZE*sizeof(int));
int BlockWiseHistograms[NUMBLOCKS*HISTO_SIZE];
int* BlockWiseHistograms_d;
hipMalloc((void**)&BlockWiseHistograms_d,NUMBLOCKS*HISTO_SIZE*sizeof(int));
int exp,rank;
for(int i=0;i<nBITS;i+=OFFSET)
{
hipMemcpy(array_d,array[in],SIZE*sizeof(int),hipMemcpyHostToDevice);
exp=pow(2,i);
/* Perform block wise histogram computation, add them up to the global histogram */
hipLaunchKernelGGL(( RadixSort), dim3(NUMBLOCKS),dim3(BLOCKSIZE),HISTO_SIZE, 0, array_d,exp,device_histo,BlockWiseHistograms_d);
hipMemcpy(BlockWiseHistograms,BlockWiseHistograms_d,NUMBLOCKS*HISTO_SIZE*sizeof(int),hipMemcpyDeviceToHost);
/* Scan each of the block wise histograms */
thrust::device_vector<int> ThrustBlockWiseHistograms(BlockWiseHistograms,BlockWiseHistograms+NUMBLOCKS*HISTO_SIZE);
for(int j=0;j<NUMBLOCKS;j++)
thrust::inclusive_scan(ThrustBlockWiseHistograms.begin()+j*BLOCKSIZE,
ThrustBlockWiseHistograms.begin()+(j+1)*BLOCKSIZE,
ThrustBlockWiseHistograms.begin()+j*BLOCKSIZE
);
thrust::copy(ThrustBlockWiseHistograms.begin(), ThrustBlockWiseHistograms.end(), thrust::device_pointer_cast(BlockWiseHistograms));
hipMemcpy(BlockWiseHistograms_d,BlockWiseHistograms,NUMBLOCKS*HISTO_SIZE*sizeof(int),hipMemcpyHostToDevice);
int* BlockWiseSortedArray_d;
hipMalloc((void**)&BlockWiseSortedArray_d,SIZE*sizeof(int));
/* Sort the array blockwise based on the scanned histograms */
hipLaunchKernelGGL(( BlockWiseSort), dim3(NUMBLOCKS),dim3(BLOCKSIZE),HISTO_SIZE, 0, array_d,exp,BlockWiseSortedArray_d,BlockWiseHistograms_d);
hipMemcpy(host_histo,device_histo,HISTO_SIZE*sizeof(int),hipMemcpyDeviceToHost);
thrust::device_vector<int> GlobalHistogram(host_histo,host_histo+HISTO_SIZE);
thrust::inclusive_scan(GlobalHistogram.begin(),GlobalHistogram.end(),GlobalHistogram.begin());
thrust::copy(GlobalHistogram.begin(), GlobalHistogram.end(), thrust::device_pointer_cast(host_histo));
/* Sort the array using the global histogram */
for(int j=SIZE-1;j>=0;j--)
{
rank=host_histo[digit(array[in][j],exp)]-1;
array[out][rank]=array[in][j];
host_histo[digit(array[in][j],exp)]--;
}
in=1-in;
out=1-out;
}
CheckSolution(array[in]);
}
__global__ void RadixSort(int* array_d, int exp, int* device_histo,int* BlockWiseHistograms_d)
{
int tx=threadIdx.x,bx=blockIdx.x;
int inx=bx*blockDim.x+tx;
extern __shared__ int shared_histo[];
if(inx==0)
{
for(int i=0;i<HISTO_SIZE;i++)
device_histo[i]=0;
}
if(tx==0)
{
for(int i=0;i<HISTO_SIZE;i++)
shared_histo[i]=0;
}
__syncthreads();
atomicAdd(&shared_histo[digit(array_d[inx],exp)],1);
__syncthreads();
if(tx==0)
{
for(int i=0;i<HISTO_SIZE;i++)
{
atomicAdd(&device_histo[i],shared_histo[i]);
BlockWiseHistograms_d[bx*HISTO_SIZE+i]=shared_histo[i];
}
}
}
__global__ void BlockWiseSort(int* array_d,int exp,int* BlockWiseSortedArray_d, int* BlockWiseHistograms_d)
{
int tx=threadIdx.x,bx=blockIdx.x,inx=bx*blockDim.x+tx;
int rank;
if(tx==0)
{
for(int j=inx+BLOCKSIZE-1;j>=inx;j--)
{
rank=BlockWiseHistograms_d[digit(array_d[j],exp)]-1;
BlockWiseSortedArray_d[rank+inx]=array_d[j];
BlockWiseHistograms_d[digit(array_d[j],exp)]--;
}
}
__syncthreads();
array_d[inx]=BlockWiseSortedArray_d[inx];
}
void CheckSolution(int* array)
{
int i;
for(i=0;i<SIZE-1;i++)
if(array[i]>array[i+1])
{
printf("Solution is wrong!\n");
break;
}
if(i==SIZE-1)
printf("Solution is right!\n");
}
|
e5e927941256a94d5d09a7e4c0271139e5fc2a78.cu
|
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include <thrust/device_ptr.h>
#define SIZE int(pow(2,10))
#define RANGE int(pow(2,5)) /* Numbers are generated from 0 to RANGE-1*/
#define BLOCKSIZE 1024
#define NUMBLOCKS SIZE/BLOCKSIZE
#define nBITS int(log(RANGE)/log(2)) /* log(n)+1 bits to represent n */
#define OFFSET 2 /* Number of bits for sorting in each pass */
#define HISTO_SIZE 4
#define digit(n,exp) (n/exp)%HISTO_SIZE
__global__ void RadixSort(int*,int,int*,int*);
__global__ void BlockWiseSort(int*,int,int*,int*);
void CheckSolution(int*);
int main()
{
int in=0,out=1;
int** array=new int*[2];
for(int i=0;i<2;i++)
array[i]=new int[SIZE];
for(int i=0;i<SIZE;i++)
array[in][i]=rand()%RANGE;
int* array_d;
cudaMalloc((void**)&array_d,SIZE*sizeof(int));
int host_histo[HISTO_SIZE];
int* device_histo;
cudaMalloc((void**)&device_histo,HISTO_SIZE*sizeof(int));
int BlockWiseHistograms[NUMBLOCKS*HISTO_SIZE];
int* BlockWiseHistograms_d;
cudaMalloc((void**)&BlockWiseHistograms_d,NUMBLOCKS*HISTO_SIZE*sizeof(int));
int exp,rank;
for(int i=0;i<nBITS;i+=OFFSET)
{
cudaMemcpy(array_d,array[in],SIZE*sizeof(int),cudaMemcpyHostToDevice);
exp=pow(2,i);
/* Perform block wise histogram computation, add them up to the global histogram */
RadixSort<<<NUMBLOCKS,BLOCKSIZE,HISTO_SIZE>>>(array_d,exp,device_histo,BlockWiseHistograms_d);
cudaMemcpy(BlockWiseHistograms,BlockWiseHistograms_d,NUMBLOCKS*HISTO_SIZE*sizeof(int),cudaMemcpyDeviceToHost);
/* Scan each of the block wise histograms */
thrust::device_vector<int> ThrustBlockWiseHistograms(BlockWiseHistograms,BlockWiseHistograms+NUMBLOCKS*HISTO_SIZE);
for(int j=0;j<NUMBLOCKS;j++)
thrust::inclusive_scan(ThrustBlockWiseHistograms.begin()+j*BLOCKSIZE,
ThrustBlockWiseHistograms.begin()+(j+1)*BLOCKSIZE,
ThrustBlockWiseHistograms.begin()+j*BLOCKSIZE
);
thrust::copy(ThrustBlockWiseHistograms.begin(), ThrustBlockWiseHistograms.end(), thrust::device_pointer_cast(BlockWiseHistograms));
cudaMemcpy(BlockWiseHistograms_d,BlockWiseHistograms,NUMBLOCKS*HISTO_SIZE*sizeof(int),cudaMemcpyHostToDevice);
int* BlockWiseSortedArray_d;
cudaMalloc((void**)&BlockWiseSortedArray_d,SIZE*sizeof(int));
/* Sort the array blockwise based on the scanned histograms */
BlockWiseSort<<<NUMBLOCKS,BLOCKSIZE,HISTO_SIZE>>>(array_d,exp,BlockWiseSortedArray_d,BlockWiseHistograms_d);
cudaMemcpy(host_histo,device_histo,HISTO_SIZE*sizeof(int),cudaMemcpyDeviceToHost);
thrust::device_vector<int> GlobalHistogram(host_histo,host_histo+HISTO_SIZE);
thrust::inclusive_scan(GlobalHistogram.begin(),GlobalHistogram.end(),GlobalHistogram.begin());
thrust::copy(GlobalHistogram.begin(), GlobalHistogram.end(), thrust::device_pointer_cast(host_histo));
/* Sort the array using the global histogram */
for(int j=SIZE-1;j>=0;j--)
{
rank=host_histo[digit(array[in][j],exp)]-1;
array[out][rank]=array[in][j];
host_histo[digit(array[in][j],exp)]--;
}
in=1-in;
out=1-out;
}
CheckSolution(array[in]);
}
__global__ void RadixSort(int* array_d, int exp, int* device_histo,int* BlockWiseHistograms_d)
{
int tx=threadIdx.x,bx=blockIdx.x;
int inx=bx*blockDim.x+tx;
extern __shared__ int shared_histo[];
if(inx==0)
{
for(int i=0;i<HISTO_SIZE;i++)
device_histo[i]=0;
}
if(tx==0)
{
for(int i=0;i<HISTO_SIZE;i++)
shared_histo[i]=0;
}
__syncthreads();
atomicAdd(&shared_histo[digit(array_d[inx],exp)],1);
__syncthreads();
if(tx==0)
{
for(int i=0;i<HISTO_SIZE;i++)
{
atomicAdd(&device_histo[i],shared_histo[i]);
BlockWiseHistograms_d[bx*HISTO_SIZE+i]=shared_histo[i];
}
}
}
__global__ void BlockWiseSort(int* array_d,int exp,int* BlockWiseSortedArray_d, int* BlockWiseHistograms_d)
{
int tx=threadIdx.x,bx=blockIdx.x,inx=bx*blockDim.x+tx;
int rank;
if(tx==0)
{
for(int j=inx+BLOCKSIZE-1;j>=inx;j--)
{
rank=BlockWiseHistograms_d[digit(array_d[j],exp)]-1;
BlockWiseSortedArray_d[rank+inx]=array_d[j];
BlockWiseHistograms_d[digit(array_d[j],exp)]--;
}
}
__syncthreads();
array_d[inx]=BlockWiseSortedArray_d[inx];
}
void CheckSolution(int* array)
{
int i;
for(i=0;i<SIZE-1;i++)
if(array[i]>array[i+1])
{
printf("Solution is wrong!\n");
break;
}
if(i==SIZE-1)
printf("Solution is right!\n");
}
|
0149554208251813b7d24a1d825b344913182d95.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#include <hip/hip_runtime.h>
#define N 100
#define DIM 2
char le_entrada();
char inicializa_parametros();
double * aloca_matriz(int, int);
void cal_cond_robin();
char parametro_independentes();
char copia_dados_para_gpu();
void copia_dados_para_cpu();
char calcula_pressao_velocidade(int, int, int, int, int);
char atualiza_mult_lagrange(int tid);
static void HandleError( hipError_t err,
const char *file,
int line ) {
if (err != hipSuccess) {
printf( "%s in %s at line %d\n", hipGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
#define HANDLE_NULL( a ) {if (a == NULL) { \
printf( "Host memory failed in %s at line %d\n", \
__FILE__, __LINE__ ); \
exit( EXIT_FAILURE );}}
//- - - - - - - - - - - - - - GLOBAIS - - - - - - - - - - - - - - //
/* - - - - - - - Entradas Externas - - - - - - - */
int tam_mat_interna = 3, tam_mat_real = 3 + 2, max_interacoes = 1000, op_contorno = 1;
double tam_regiao = 20000.00, erro_max = 1e-5, valor_contor = 2.00;
double h = 20000.00 / 3; // ALTURA H = TAM_REGIAO / TAM_MAT_INTERNA
double *mat_perm = NULL, *mat_font = NULL, *mat_epsilon = NULL;
/* - - - - - - - Fim das Entradas Externas - - - - - - - */
/* - - - - - - - Ponteiros para CPU - - - - - - - */
double *q_R = NULL, *q_L = NULL, *q_U = NULL, *q_D = NULL;
double *q_R_old = NULL, *q_L_old = NULL, *q_U_old = NULL, *q_D_old = NULL;
double *l_R = NULL, *l_L = NULL, *l_U = NULL, *l_D = NULL;
double *l_R_old = NULL, *l_L_old = NULL, *l_U_old = NULL, *l_D_old = NULL;
double *b_R = NULL, *b_L = NULL, *b_U = NULL, *b_D = NULL;
double *b_R_old = NULL, *b_L_old = NULL, *b_U_old = NULL, *b_D_old = NULL;
double *pressao = NULL, *pressao_old = NULL;
/* - - - - - - - Ponteiros para GPU - - - - - - - */
double *dev_mat_perm = NULL, *dev_mat_font = NULL, *dev_mat_epsilon = NULL;
double *dev_q_R = NULL, *dev_q_L = NULL, *dev_q_U = NULL, *dev_q_D = NULL;
double *dev_q_R_old = NULL, *dev_q_L_old = NULL, *dev_q_U_old = NULL, *dev_q_D_old = NULL;
double *dev_l_R = NULL, *dev_l_L = NULL, *dev_l_U = NULL, *dev_l_D = NULL;
double *dev_l_R_old = NULL, *dev_l_L_old = NULL, *dev_l_U_old = NULL, *dev_l_D_old = NULL;
double *dev_b_R = NULL, *dev_b_L = NULL, *dev_b_U = NULL, *dev_b_D = NULL;
double *dev_b_R_old = NULL, *dev_b_L_old = NULL, *dev_b_U_old = NULL, *dev_b_D_old = NULL;
double *dev_pressao = NULL, *dev_pressao_old = NULL;
double *dev_aux = NULL, *dev_erro = NULL, *dev_media = NULL, *dev_sum1 = NULL, *dev_sum2 = NULL;
//- - - - - - - - - - - - - - FIM - GLOBAIS - - - - - - - - - - - - - - //
__global__ void escoamento_monofasico(
double *dev_mat_perm, double *dev_mat_font, double *dev_mat_epsilon,
double *dev_q_R, double *dev_q_L, double *dev_q_U, double *dev_q_D,
double *dev_q_R_old, double *dev_q_L_old, double *dev_q_U_old, double *dev_q_D_old,
double *dev_l_R, double *dev_l_L, double *dev_l_U, double *dev_l_D,
double *dev_l_R_old, double *dev_l_L_old, double *dev_l_U_old, double *dev_l_D_old,
double *dev_b_R, double *dev_b_L, double *dev_b_U, double *dev_b_D,
double *dev_b_R_old, double *dev_b_L_old, double *dev_b_U_old, double *dev_b_D_old,
double *dev_pressao, double *dev_pressao_old,
double *dev_aux, double dev_erro, double dev_media, double dev_sum1, double dev_sum2
){
/*int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
a[offset] = offset;*/
/*vificar as condies de contorno*/
int flag_thread_centrais = 1;
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
/*int offset = (blockDim.x * gridDim.x) + 1; // deslocamento para o tamanho da regio (tam_regiao = n + 2)
*/
int tid = x + y * blockDim.x * gridDim.x;
//verificar esse deslocamento para n causar problema (somente na hora de armazenar utilizar o deslocamento)
//int tid = (x + y * blockDim.x * gridDim.x) + offset; // tid fornece o indice do vetor
int dimensao_x = blockDim.x * gridDim.x;
int dimensao_y = blockDim.y * gridDim.y;
int eq_tid_cant_sup_dir = blockDim.x * gridDim.x - 1; // posio extremo sup direito
int eq_tid_cant_inf_dir = ((gridDim.x * blockDim.x) * (gridDim.y * blockDim.y)) - 1; // posio extremo inf direito
int eq_tid_cant_inf_esq = (gridDim.x * blockDim.x) * (gridDim.y * blockDim.y - 1); // posio extremo inf esquerdo
if(tid == 0){//canto superior esquerdo
/*VERIFICAR AS CONDIES DE CONTORNO*/
calcula_pressao_velocidade(tid, 0, 1, 1, 0);
atualiza_mult_lagrange(tid);
flag_thread_centrais = 0;
}
if(tid == eq_tid_cant_sup_dir){//canto superior direito
/*VERIFICAR AS CONDIES DE CONTORNO*/
calcula_pressao_velocidade(tid, 0, 0, 1, 1);
atualiza_mult_lagrange(tid);
flag_thread_centrais = 0;
}
if(tid == eq_tid_cant_inf_esq){//canto inferior esquerdo
/*VERIFICAR AS CONDIES DE CONTORNO*/
calcula_pressao_velocidade(tid, 0, 1, 1, 0);
atualiza_mult_lagrange(tid);
flag_thread_centrais = 0;
}
if(tid == eq_tid_cant_inf_dir){//canto inferior direito
/*VERIFICAR AS CONDIES DE CONTORNO*/
calcula_pressao_velocidade(tid, 1, 0, 0, 1);
atualiza_mult_lagrange(tid);
flag_thread_centrais = 0;
}
if((tid > 0) && (tid < eq_tid_cant_sup_dir)){//fronteira superior
/*VERIFICAR AS CONDIES DE CONTORNO*/
calcula_pressao_velocidade(tid, 0, 1, 1, 1);
atualiza_mult_lagrange(tid);
flag_thread_centrais = 0;
}
if((tid > eq_tid_cant_sup_dir) && (tid < eq_tid_cant_inf_dir) && (tid % dimensao_x == eq_tid_cant_sup_dir)){ //fronteira direita
/*VERIFICAR AS CONDIES DE CONTORNO*/
calcula_pressao_velocidade(tid, 1, 0, 1, 1);
atualiza_mult_lagrange(tid);
flag_thread_centrais = 0;
}
if((tid > eq_tid_cant_inf_esq) && (tid < eq_tid_cant_inf_dir)){ //fronteira inferior
/*VERIFICAR AS CONDIES DE CONTORNO*/
calcula_pressao_velocidade(tid, 1, 1, 0, 1);
atualiza_mult_lagrange(tid);
flag_thread_centrais = 0;
}
if((tid > 0) && (tid < eq_tid_cant_inf_dir) && (tid < eq_tid_cant_inf_esq) && (tid % dimensao_y == 0)){//fronteira esquerda
/*VERIFICAR AS CONDIES DE CONTORNO*/
calcula_pressao_velocidade(tid, 1, 1, 1, 0);
atualiza_mult_lagrange(tid);
flag_thread_centrais = 0;
}
if(flag_thread_centrais){
/*VERIFICAR AS CONDIES DE CONTORNO*/
calcula_pressao_velocidade(tid, 1, 1, 1, 1);
atualiza_mult_lagrange(tid);
}
/*
*
*SINCRONIZA
*COMENTARIOS
*ALOCAR VARIVEL aux com o tamanho de "tids"
*VERIFICAR ATOMICIDADE PRA VALORES FLOAT
*VERIFICAR ALOCAO DAS MEMRIAS GLOBAIS
*alocar memria erro
*alocar double media = 0.0, sum1 = 0.0, sum2 = 0.0;
*/
__syncthreads();
if(tid == eq_tid_cant_inf_dir){
int i = 0;
for(i = 0; i <= eq_tid_cant_inf_dir; i++){
dev_media = dev_media + dev_pressao[i];
}
dev_media = dev_media / (eq_tid_cant_inf_dir + 1);
}
__syncthreads();
dev_pressao[tid] -= dev_media;
dev_l_D[tid] -= dev_media;
dev_l_U[tid] -= dev_media;
dev_l_L[tid] -= dev_media;
dev_l_R[tid] -= dev_media;
//avaliando criterio de convergencia
dev_aux[tid] = dev_pressao[tid] - dev_pressao_old[tid];
__syncthreads();
if(tid == eq_tid_cant_inf_dir){
int i = 0;
for(i = 0; i <= eq_tid_cant_inf_dir; i++){
dev_sum1 += dev_aux[i] * dev_aux[i];
dev_sum2 += dev_pressao[i] * dev_pressao[i];
}
dev_erro = sqrt(dev_sum1/dev_sum2);
}
__syncthreads();
if (dev_erro < 1e-5)
break;
dev_pressao_old[tid] = dev_pressao_old[tid];
dev_q_U_old[tid] = dev_q_U[tid];
dev_q_R_old[tid] = dev_q_R[tid];
dev_q_L_old[tid] = dev_q_L[tid];
dev_q_D_old[tid] = dev_q_D[tid];
dev_l_D_old[tid] = dev_l_D[tid];
dev_l_U_old[tid] = dev_l_U[tid];
dev_l_L_old[tid] = dev_l_L[tid];
dev_l_R_old[tid] = dev_l_R[tid];
/*
* Imponiendo a media cero na distribuicao de presiones
* Calculo de la media
*/
/*
atomicAdd( &media, dev_pressao[tid] );
//atomicSub( &aux[tid], dev_pressao[tid] - dev_pressao_old[tid] );
__syncthreads();
dev_pressao[tid] -= M;
dev_l_D[tid] -= M;
dev_l_U[tid] -= M;
dev_l_L[tid] -= M;
dev_l_R[tid] -= M;
//avaliando criterio de convergencia
aux[tid] = dev_pressao[tid] - dev_b_D_old[tid];
__syncthreads();
atomicAdd( &sum1, aux[tid] * aux[tid] );
atomicAdd( &sum2, dev_pressao[tid] * dev_pressao[tid] );
__syncthreads();
if(tid == 0)
erro = sqrt(sum1/sum2);
if (erro < 1e-5) return 0;
p_old[j][k] = p[j][k];
dev_pressao_old[tid] = dev_pressao_old[tid];
dev_q_U_old[tid] = dev_q_U[tid];
dev_q_R_old[tid] = dev_q_R[tid];
dev_q_L_old[tid] = dev_q_L[tid];
dev_q_D_old[tid] = dev_q_D[tid];
dev_l_D_old[tid] = dev_l_D[tid];
dev_l_U_old[tid] = dev_l_U[tid];
dev_l_L_old[tid] = dev_l_L[tid];
dev_l_R_old[tid] = dev_l_R[tid];*/
}
__device__ char atualiza_mult_lagrange(int tid){
int index_mem_central = 0, index_mem_down = 0, index_mem_uper = 0;
int index_mem_left = 0, index_mem_right = 0;
int comprimento_kernel = blockDim.x * gridDim.x;
int offset = (blockDim.x * gridDim.x) + 1;
index_mem_central = tid + ((tid/comprimento_kernel)*2) + offset;
index_mem_uper = index_mem_central - (offset -1); // (offset -1) = comprimento do kernel
index_mem_down = index_mem_central + (offset -1);
index_mem_left = index_mem_central - 1;
index_mem_right = index_mem_central + 1;
dev_l_U[index_mem_central] = dev_b_U[index_mem_central] * (dev_q_U[index_mem_central] + dev_q_D_old[index_mem_uper]) + dev_l_D_old[index_mem_uper];
dev_l_D[index_mem_central] = dev_b_D[index_mem_central] * (dev_q_D[index_mem_central] + dev_q_U_old[index_mem_down]) + dev_l_U_old[index_mem_down];
dev_l_R[index_mem_central] = dev_b_R[index_mem_central] * (dev_q_R[index_mem_central] + dev_q_L_old[index_mem_right]) + dev_l_L_old[index_mem_right];
dev_l_L[index_mem_central] = dev_b_L[index_mem_central] * (dev_q_L[index_mem_central] + dev_q_R_old[index_mem_left]) + dev_l_R_old[index_mem_left];
return 0;
}
__device__ char calcula_pressao_velocidade(int tid, int uper, int right, int down, int left){
double auxU = 0.0, auxD = 0.0, auxR = 0.0, auxL = 0.0, DU = 0.0, DD = 0.0, DR = 0.0, DL = 0.0;
int index_mem_central = 0, index_mem_down = 0, index_mem_uper = 0;
int index_mem_left = 0, index_mem_right = 0;
int comprimento_kernel = blockDim.x * gridDim.x;
int offset = (blockDim.x * gridDim.x) + 1;
index_mem_central = tid + ((tid/comprimento_kernel)*2) + offset;
index_mem_uper = index_mem_central - (offset -1); // (offset -1) = comprimento do kernel
index_mem_down = index_mem_central + (offset -1);
index_mem_left = index_mem_central - 1;
index_mem_right = index_mem_central + 1;
if(uper == 1){
auxU = dev_mat_epsilon[index_mem_central] / (1 + dev_b_U[index_mem_central] * dev_mat_epsilon[index_mem_central]);
DU = auxU * (dev_b_U[index_mem_central] * dev_q_D_old[index_mem_uper] + dev_l_D_old[index_mem_uper]);
}
if(right == 1){
auxR = dev_mat_epsilon[index_mem_central] / (1 + dev_b_R[index_mem_central] * dev_mat_epsilon[index_mem_central]);
DR = auxR * (dev_b_R[index_mem_central] * dev_q_L_old[index_mem_right] + dev_l_L_old[index_mem_right]);
}
if(down == 1){
auxD = dev_mat_epsilon[index_mem_central] / (1 + dev_b_D[index_mem_central] * dev_mat_epsilon[index_mem_central]);
DD = auxD * (dev_b_D[index_mem_central] * dev_q_U_old[index_mem_down] + dev_l_U_old[index_mem_down]);
}
if(left == 1){
auxL = dev_mat_epsilon[index_mem_central] / (1 + dev_b_L[index_mem_central] * dev_mat_epsilon[index_mem_central]);
DL = auxL * (dev_b_L[index_mem_central] * dev_q_R_old[index_mem_left] + dev_l_R_old[index_mem_left]);
}
dev_pressao[index_mem_central] = (dev_mat_font[index_mem_central] + DU + DR + DD + DL) / (auxU + auxR + auxD + auxL);
dev_q_L[index_mem_central] = auxL * dev_pressao[index_mem_central] - DL;
dev_q_R[index_mem_central] = auxR * dev_pressao[index_mem_central] - DR;
dev_q_U[index_mem_central] = auxU * dev_pressao[index_mem_central] - DU;
dev_q_D[index_mem_central] = auxD * dev_pressao[index_mem_central] - DD;
return 0;
}
int main(void){
le_entrada();
inicializa_parametros();
cal_cond_robin();
parametro_independentes();
int i = 0, j = 0;
/*
printf("\ntam_mat_interna = %d\n", tam_mat_interna);
printf("tam_mat_real = %d\n", tam_mat_real);
printf("max_interacoes = %d\n", max_interacoes);
printf("op_contorno = %d\n", op_contorno);
printf("tam_regiao = %lf\n", tam_regiao);
printf("erro_max = %lf\n", erro_max);
printf("valor_contor = %lf\n", valor_contor);
printf("\n\nmat_font:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%1.e ", mat_font[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\nmat_perm:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%1.e ", mat_perm[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\nmat_epsilon:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%1.e ", mat_epsilon[i*tam_mat_real + j]);
printf("\n");
}
*/
printf("\n\nb_U:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%1.e ", b_U[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\nb_R:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%1.e ", b_R[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\nb_D:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%1.e ", b_D[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\nb_L:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%1.e ", b_L[i*tam_mat_real + j]);
printf("\n");
}
system("pause");
return 0;
}
char le_entrada(){
printf("\n\n\t\t - - CARREGANDO ENTRADA - - \n\n");
FILE *arq = NULL;
arq = fopen("../dir_entrada/parametro_entrada.txt", "r");
if(arq == NULL){
printf("Erro ao abrir aquivo: 'parametro_entrada.txt'\n\t\tCertifique-se que o arquivo exite.\n");
exit(1);
}
else{
printf("\t\t - - LENDO ARQUIVO DE ENTRADA - -\n");
/*char c[2], dados[255], buffer[255];*/
char buffer[255];
int cont = 1;
while(cont < 9){
fscanf(arq, "%s", buffer);
//puts(buffer);
int i = 0, j = 0;
switch(strlen(buffer)){
case 8: //erro_maximo
fscanf(arq, "%lf", &erro_max);
break;
case 10: //tam_regiao
fscanf(arq, "%lf", &tam_regiao);
break;
case 11: //opcao_contorno
fscanf(arq, "%d", &op_contorno);
break;
case 12: //valor_contor
fscanf(arq, "%lf", &valor_contor);
break;
case 14: //max_interacoes
fscanf(arq, "%d", &max_interacoes);
break;
case 15: //tam_mat_interna
fscanf(arq, "%d", &tam_mat_interna);
break;
case 16: //matriz_de_fontes
//uso (tam_mat_interna + 2) - pois ainda no inicializei 'tam_mat_real'
mat_font = aloca_matriz(tam_mat_interna + 2, tam_mat_interna + 2);
for(i = 1; i < (tam_mat_interna + 2) - 1; i ++)
for(j = 1; j < (tam_mat_interna + 2) - 1 ; j++)
fscanf(arq, "%lf", &mat_font[i*(tam_mat_interna+2) + j]);
break;
case 18: //matriz_permeabilidade
mat_perm = aloca_matriz(tam_mat_interna + 2, tam_mat_interna + 2);
mat_epsilon = aloca_matriz(tam_mat_interna + 2, tam_mat_interna + 2);
for(i = 1; i < (tam_mat_interna + 2) - 1; i ++)
for(j = 1; j < (tam_mat_interna + 2) - 1 ; j++)
fscanf(arq, "%lf", &mat_perm[i*(tam_mat_interna+2) + j]);
break;
default:
printf("\n\n\t\tHouve algum erro no aquivo de entrada!\n\n");
return 0;
}
//int tam = strlen(buffer);
cont++;
}
printf("\t\t - - ARQUIVO DE ENTRADA CARREGADO - -\n");
}
printf("\n\n\t\t - - ENTRADA CARREGA - - \n\n");
return 1;
}
char inicializa_parametros(){
printf("\n\n\t\t- - INICIALIZANDO PARAMETROS - - \n\n\n");
tam_mat_real = tam_mat_interna + 2;
h = tam_regiao / tam_mat_interna;
q_R = aloca_matriz(tam_mat_real, tam_mat_real);
if(q_R == NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_q_R, tam_mat_real * tam_mat_real * sizeof(double) ) );
q_L = aloca_matriz(tam_mat_real, tam_mat_real);
if(q_L == NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_q_L, tam_mat_real * tam_mat_real * sizeof(double) ) );
q_U = aloca_matriz(tam_mat_real, tam_mat_real);
if(q_U == NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_q_U, tam_mat_real * tam_mat_real * sizeof(double) ) );
q_D = aloca_matriz(tam_mat_real, tam_mat_real);
if(q_D == NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_q_D, tam_mat_real * tam_mat_real * sizeof(double) ) );
q_R_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(q_R_old == NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_q_R_old, tam_mat_real * tam_mat_real * sizeof(double) ) );
q_L_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(q_L_old == NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_q_L_old, tam_mat_real * tam_mat_real * sizeof(double) ) );
q_U_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(q_U_old == NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_q_U_old, tam_mat_real * tam_mat_real * sizeof(double) ) );
q_D_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(q_D_old == NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_q_D_old, tam_mat_real * tam_mat_real * sizeof(double) ) );
l_R = aloca_matriz(tam_mat_real, tam_mat_real);
if(l_R == NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_l_R, tam_mat_real * tam_mat_real * sizeof(double) ) );
l_L = aloca_matriz(tam_mat_real, tam_mat_real);
if(l_L == NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_l_L, tam_mat_real * tam_mat_real * sizeof(double) ) );
l_U = aloca_matriz(tam_mat_real, tam_mat_real);
if(l_U == NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_l_U, tam_mat_real * tam_mat_real * sizeof(double) ) );
l_D = aloca_matriz(tam_mat_real, tam_mat_real);
if(l_D == NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_l_D, tam_mat_real * tam_mat_real * sizeof(double) ) );
l_R_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(l_R_old == NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_l_R_old, tam_mat_real * tam_mat_real * sizeof(double) ) );
l_L_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(l_L_old == NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_l_L_old, tam_mat_real * tam_mat_real * sizeof(double) ) );
l_U_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(l_U_old == NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_l_U_old, tam_mat_real * tam_mat_real * sizeof(double) ) );
l_D_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(l_D_old == NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_l_D_old, tam_mat_real * tam_mat_real * sizeof(double) ) );
b_R = aloca_matriz(tam_mat_real, tam_mat_real);
if(b_R == NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_b_R, tam_mat_real * tam_mat_real * sizeof(double) ) );
b_L = aloca_matriz(tam_mat_real, tam_mat_real);
if(b_L == NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_b_L, tam_mat_real * tam_mat_real * sizeof(double) ) );
b_U = aloca_matriz(tam_mat_real, tam_mat_real);
if(b_U == NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_b_U, tam_mat_real * tam_mat_real * sizeof(double) ) );
b_D = aloca_matriz(tam_mat_real, tam_mat_real);
if(b_D == NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_b_D, tam_mat_real * tam_mat_real * sizeof(double) ) );
b_R_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(b_R_old == NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_b_R_old, tam_mat_real * tam_mat_real * sizeof(double) ) );
b_L_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(b_L_old == NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_b_L_old, tam_mat_real * tam_mat_real * sizeof(double) ) );
b_U_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(b_U_old == NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_b_U_old, tam_mat_real * tam_mat_real * sizeof(double) ) );
b_D_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(b_D_old == NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_b_D_old, tam_mat_real * tam_mat_real * sizeof(double) ) );
pressao = aloca_matriz(tam_mat_real, tam_mat_real);
if(pressao == NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_pressao, tam_mat_real * tam_mat_real * sizeof(double) ) );
pressao_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(pressao_old == NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_pressao_old, tam_mat_real * tam_mat_real * sizeof(double) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_aux, tam_mat_real * tam_mat_real * sizeof(double) ) );
HANDLE_ERROR( hipMemset( dev_aux, 0, tam_mat_real * tam_mat_real * sizeof(double) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_erro, sizeof(double) ) );
HANDLE_ERROR( hipMemset( dev_erro, 0, sizeof(double) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_media, sizeof(double) ) );
HANDLE_ERROR( hipMemset( dev_media, 0, sizeof(double) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_sum1, sizeof(double) ) );
HANDLE_ERROR( hipMemset( dev_sum1, 0, sizeof(double) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_sum2, sizeof(double) ) );
HANDLE_ERROR( hipMemset( dev_sum2, 0, sizeof(double) ) );
int i = 0;
switch(op_contorno){
case 1: //Inicializa contorno superior
for(i = 0; i < tam_mat_real -1; i++){
q_D[i] = valor_contor;
q_D_old[i] = valor_contor;
}
break;
case 2://Inicializa contorno esquerdo
for(i = 0; i < tam_mat_real; i++){
q_R[i*tam_mat_real] = valor_contor;
q_R_old[i*tam_mat_real] = valor_contor;
}
break;
case 3://Inicializa contorno direito
for(i = 0; i < tam_mat_real; i++){
q_L[i*tam_mat_real + (tam_mat_real - 1)] = valor_contor;
q_L_old[i*tam_mat_real + (tam_mat_real - 1)] = valor_contor;
}
break;
case 4://Inicializa contorno inferior
for(i = 0; i < tam_mat_real; i++){
q_L[(tam_mat_real-1)*tam_mat_real + i] = valor_contor;
q_L_old[(tam_mat_real-1)*tam_mat_real + i] = valor_contor;
}
break;
default:
printf("\n\n\t\t - - OCORREU ALGUM ERRO NA OPCAO DE CONTORNO - - \n\n");
break;
}
printf("\n\n\t\t- - FIM DA INICIALIZACAO PARAMETROS - - \n\n\n");
return 1;
}
double * aloca_matriz(int L, int C){
double *aux = NULL;
aux = (double *) calloc(L * C, sizeof(double));
if(aux == NULL){
printf("\n\n\t\tErro ao alocar memoria\n\n");
exit(1);
}else{
return aux;
}
return NULL;
}
/*
*
*VERIFICAR RETORNO
*
*/
void cal_cond_robin(){
double keff = 0.0, numerador = 0.0, denominador = 0.0;
double C = 1.0; // Cte adimensional que se ajusta experimentalmente C = 1.0
//Canto superior esquerdo
numerador = ( 2 * mat_perm[tam_mat_real + 1] * mat_perm[tam_mat_real + 2] );
denominador = ( mat_perm[tam_mat_real + 1] + mat_perm[tam_mat_real + 2] );
keff = numerador / denominador;
b_R[tam_mat_real + 1] = C*h/keff;
numerador = (2 * mat_perm[tam_mat_real + 1] * mat_perm[(2*tam_mat_real) + 1]);
denominador = ( mat_perm[tam_mat_real + 1] + mat_perm[(2*tam_mat_real) + 1]);
keff = numerador / denominador;
b_D[tam_mat_real + 1] = C*h/keff;
//Canto superior direito
numerador = ( 2 * mat_perm[tam_mat_real + tam_mat_interna] * mat_perm[tam_mat_real + (tam_mat_interna - 1)] );
denominador = ( mat_perm[tam_mat_real + tam_mat_interna] + mat_perm[tam_mat_real + (tam_mat_interna - 1)] );
keff = numerador / denominador;
b_L[tam_mat_real + tam_mat_interna] = C*h/keff;
numerador = ( 2 * mat_perm[tam_mat_real + tam_mat_interna] * mat_perm[(2 * tam_mat_real) + tam_mat_interna] );
denominador = ( mat_perm[tam_mat_real + tam_mat_interna] + mat_perm[(2 * tam_mat_real) + tam_mat_interna] );
keff = numerador / denominador;
b_D[tam_mat_real + tam_mat_interna] = C*h/keff;
//Canto infeior esquerdo
numerador = ( 2 * mat_perm[(tam_mat_real * tam_mat_interna) + 1] * mat_perm[(tam_mat_real * (tam_mat_interna - 1)) + 1] );
denominador = ( mat_perm[(tam_mat_real * tam_mat_interna) + 1] + mat_perm[(tam_mat_real * (tam_mat_interna - 1)) + 1] );
keff = numerador / denominador;
b_U[(tam_mat_real * tam_mat_interna) + 1] = C*h/keff;
numerador = ( 2 * mat_perm[(tam_mat_real * tam_mat_interna) + 1] * mat_perm[(tam_mat_real * tam_mat_interna) + 2] );
denominador = ( mat_perm[(tam_mat_real * tam_mat_interna) + 1] + mat_perm[(tam_mat_real * tam_mat_interna) + 2] );
keff = numerador / denominador;
b_R[(tam_mat_real * tam_mat_interna) + 1] = C*h/keff;
//Canto infeior direito
numerador = ( 2 * mat_perm[(tam_mat_real * tam_mat_interna) + tam_mat_interna] * mat_perm[(tam_mat_real * (tam_mat_interna - 1)) + tam_mat_interna] );
denominador = ( mat_perm[(tam_mat_real * tam_mat_interna) + tam_mat_interna] + mat_perm[(tam_mat_real * (tam_mat_interna - 1)) + tam_mat_interna] );
keff = numerador / denominador;
b_U[(tam_mat_real * tam_mat_interna) + tam_mat_interna] = C*h/keff;
numerador = ( 2 * mat_perm[(tam_mat_real * tam_mat_interna) + tam_mat_interna] * mat_perm[(tam_mat_real * tam_mat_interna) + (tam_mat_interna - 1)] );
denominador = ( mat_perm[(tam_mat_real * tam_mat_interna) + tam_mat_interna] + mat_perm[(tam_mat_real * tam_mat_interna) + (tam_mat_interna - 1)] );
keff = numerador / denominador;
b_L[(tam_mat_real * tam_mat_interna) + tam_mat_interna] = C*h/keff;
//Calculo das fronteiras e regio interna para betas
int i = 0;
for(i = 2; i < tam_mat_interna; i ++){
//Calcula fronteira superior
numerador = ( 2 * mat_perm[tam_mat_real + i] * mat_perm[tam_mat_real + (i-1)] );
denominador = ( mat_perm[tam_mat_real + i] + mat_perm[tam_mat_real + (i-1)] );
keff = numerador / denominador;
b_L[tam_mat_real + i] = C*h/keff;
numerador = ( 2 * mat_perm[tam_mat_real + i] * mat_perm[tam_mat_real + (i+1)] );
denominador = ( mat_perm[tam_mat_real + i] + mat_perm[tam_mat_real + (i+1)] );
keff = numerador / denominador;
b_R[tam_mat_real + i] = C*h/keff;
numerador = ( 2 * mat_perm[tam_mat_real + i] * mat_perm[(2 * tam_mat_real) + i] );
denominador = ( mat_perm[tam_mat_real + i] + mat_perm[(2 * tam_mat_real) + i] );
keff = numerador / denominador;
b_D[tam_mat_real + i] = C*h/keff;
//Calcula fronteira esquerda
numerador = ( 2 * mat_perm[(i * tam_mat_real) + 1] * mat_perm[((i - 1) * tam_mat_real) + 1] );
denominador = ( mat_perm[(i * tam_mat_real) + 1] + mat_perm[((i - 1) * tam_mat_real) + 1] );
keff = numerador / denominador;
b_U[(i * tam_mat_real) + 1] = C*h/keff;
numerador = ( 2 * mat_perm[(i * tam_mat_real) + 1] * mat_perm[(i * tam_mat_real) + 2] );
denominador = ( mat_perm[(i * tam_mat_real) + 1] + mat_perm[(i * tam_mat_real) + 2] );
keff = numerador / denominador;
b_R[(i * tam_mat_real) + 1] = C*h/keff;
numerador = ( 2 * mat_perm[(i * tam_mat_real) + 1] * mat_perm[((i + 1) * tam_mat_real) + 1] );
denominador = ( mat_perm[(i * tam_mat_real) + 1] + mat_perm[((i + 1) * tam_mat_real) + 1] );
keff = numerador / denominador;
b_D[(i * tam_mat_real) + 1] = C*h/keff;
//Calcula fronteira inferior
numerador = ( 2 * mat_perm[(tam_mat_interna * tam_mat_real) + i] * mat_perm[(tam_mat_interna * tam_mat_real) + (i - 1)] );
denominador = ( mat_perm[(tam_mat_interna * tam_mat_real) + i] + mat_perm[(tam_mat_interna * tam_mat_real) + (i - 1)] );
keff = numerador / denominador;
b_L[(tam_mat_interna * tam_mat_real) + i] = C*h/keff;
numerador = ( 2 * mat_perm[(tam_mat_interna * tam_mat_real) + i] * mat_perm[((tam_mat_interna - 1) * tam_mat_real) + i] );
denominador = ( mat_perm[(tam_mat_interna * tam_mat_real) + i] + mat_perm[((tam_mat_interna - 1) * tam_mat_real) + i] );
keff = numerador / denominador;
b_U[(tam_mat_interna * tam_mat_real) + i] = C*h/keff;
numerador = ( 2 * mat_perm[(tam_mat_interna * tam_mat_real) + i] * mat_perm[(tam_mat_interna * tam_mat_real) + (i + 1)] );
denominador = ( mat_perm[(tam_mat_interna * tam_mat_real) + i] + mat_perm[(tam_mat_interna * tam_mat_real) + (i + 1)] );
keff = numerador / denominador;
b_R[(tam_mat_interna * tam_mat_real) + i] = C*h/keff;
//Calcula fronteira direita
numerador = ( 2 * mat_perm[(i * tam_mat_real) + tam_mat_interna] * mat_perm[((i-1) * tam_mat_real) + tam_mat_interna] );
denominador = ( mat_perm[(i * tam_mat_real) + tam_mat_interna] + mat_perm[((i-1) * tam_mat_real) + tam_mat_interna] );
keff = numerador / denominador;
b_U[(i * tam_mat_real) + tam_mat_interna] = C*h/keff;
numerador = ( 2 * mat_perm[(i * tam_mat_real) + tam_mat_interna] * mat_perm[(i * tam_mat_real) + (tam_mat_interna - 1)] );
denominador = ( mat_perm[(i * tam_mat_real) + tam_mat_interna] + mat_perm[(i * tam_mat_real) + (tam_mat_interna - 1)] );
keff = numerador / denominador;
b_L[(i * tam_mat_real) + tam_mat_interna] = C*h/keff;
numerador = ( 2 * mat_perm[(i * tam_mat_real) + tam_mat_interna] * mat_perm[((i+1) * tam_mat_real) + tam_mat_interna] );
denominador = ( mat_perm[(i * tam_mat_real) + tam_mat_interna] + mat_perm[((i+1) * tam_mat_real) + tam_mat_interna] );
keff = numerador / denominador;
b_D[(i * tam_mat_real) + tam_mat_interna] = C*h/keff;
//Calcula dados internos
int j = 0;
for(j = 2; j < tam_mat_interna; j ++){
numerador = ( 2 * mat_perm[(i * tam_mat_real) + j] * mat_perm[(i * tam_mat_real) + (j - 1)] );
denominador = ( mat_perm[(i * tam_mat_real) + j] + mat_perm[(i * tam_mat_real) + (j - 1)] );
keff = numerador / denominador;
b_L[(i * tam_mat_real) + j] = C*h/keff;
numerador = ( 2 * mat_perm[(i * tam_mat_real) + j] * mat_perm[(i * tam_mat_real) + (j + 1)] );
denominador = ( mat_perm[(i * tam_mat_real) + j] + mat_perm[(i * tam_mat_real) + (j + 1)] );
keff = numerador / denominador;
b_R[(i * tam_mat_real) + j] = C*h/keff;
numerador = ( 2 * mat_perm[(i * tam_mat_real) + j] * mat_perm[((i - 1) * tam_mat_real) + j] );
denominador = ( mat_perm[(i * tam_mat_real) + j] + mat_perm[((i - 1) * tam_mat_real) + j] );
keff = numerador / denominador;
b_U[(i * tam_mat_real) + j] = C*h/keff;
numerador = ( 2 * mat_perm[(i * tam_mat_real) + j] * mat_perm[((i + 1) * tam_mat_real) + j] );
denominador = ( mat_perm[(i * tam_mat_real) + j] + mat_perm[((i + 1) * tam_mat_real) + j] );
keff = numerador / denominador;
b_D[(i * tam_mat_real) + j] = C*h/keff;
}
}
}
/*
*
*VERIFICAR RETORNO
*
*/
char parametro_independentes(){
int i = 0, j = 0;
double constante = 2/h;
for(i = 0; i < tam_mat_real; i ++)
for(j = 0; j < tam_mat_real; j++){
mat_epsilon[i*tam_mat_real + j] = constante * mat_perm[i*tam_mat_real + j];
mat_font[i*tam_mat_real + j] *= h;
}
return 0;
}
char copia_dados_para_gpu(){
HANDLE_ERROR( hipMemcpy( dev_q_R, q_R, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_q_L, q_L, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_q_U, q_U, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_q_D, q_D, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_q_R_old, q_R_old, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_q_L_old, q_L_old, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_q_U_old, q_U_old, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_q_D_old, q_D_old, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_l_R, l_R, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_l_L, l_L, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_l_U, l_U, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_l_D, l_D, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_l_R_old, l_R_old, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_l_L_old, l_L_old, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_l_U_old, l_U_old, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_l_D_old, l_D_old, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_b_R, b_R, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_b_L, b_L, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_b_U, b_U, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_b_D, b_D, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_b_R_old, b_R_old, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_b_L_old, b_L_old, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_b_U_old, b_U_old, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_b_D_old, b_D_old, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_pressao, pressao, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_pressao_old, pressao_old, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_mat_font, mat_font, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_mat_perm, mat_perm, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_mat_epsilon, mat_epsilon, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
return 0;
}
void copia_dados_para_cpu(){
HANDLE_ERROR( hipMemcpy( q_R, dev_q_R, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( q_L, dev_q_L, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( q_U, dev_q_U, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( q_D, dev_q_D, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( q_R_old, dev_q_R_old, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( q_L_old, dev_q_L_old, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( q_U_old, dev_q_U_old, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( q_D_old, dev_q_D_old, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( l_R, dev_l_R, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( l_L, dev_l_L, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( l_U, dev_l_U, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( l_D, dev_l_D, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( l_R_old, dev_l_R_old, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( l_L_old, dev_l_L_old, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( l_U_old, dev_l_U_old, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( l_D_old, dev_l_D_old, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( b_R, dev_b_R, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( b_L, dev_b_L, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( b_U, dev_b_U, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( b_D, dev_b_D, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( b_R_old, dev_b_R_old, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( b_L_old, dev_b_L_old, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( b_U_old, dev_b_U_old, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( b_D_old, dev_b_D_old, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( mat_font, dev_mat_font, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( mat_perm, dev_mat_perm, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( mat_epsilon, dev_mat_epsilon, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
}
|
0149554208251813b7d24a1d825b344913182d95.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <cuda.h>
#include <curand_kernel.h>
#include <cuda_runtime.h>
#define N 100
#define DIM 2
char le_entrada();
char inicializa_parametros();
double * aloca_matriz(int, int);
void cal_cond_robin();
char parametro_independentes();
char copia_dados_para_gpu();
void copia_dados_para_cpu();
char calcula_pressao_velocidade(int, int, int, int, int);
char atualiza_mult_lagrange(int tid);
static void HandleError( cudaError_t err,
const char *file,
int line ) {
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
#define HANDLE_NULL( a ) {if (a == NULL) { \
printf( "Host memory failed in %s at line %d\n", \
__FILE__, __LINE__ ); \
exit( EXIT_FAILURE );}}
//- - - - - - - - - - - - - - GLOBAIS - - - - - - - - - - - - - - //
/* - - - - - - - Entradas Externas - - - - - - - */
int tam_mat_interna = 3, tam_mat_real = 3 + 2, max_interacoes = 1000, op_contorno = 1;
double tam_regiao = 20000.00, erro_max = 1e-5, valor_contor = 2.00;
double h = 20000.00 / 3; // ALTURA H = TAM_REGIAO / TAM_MAT_INTERNA
double *mat_perm = NULL, *mat_font = NULL, *mat_epsilon = NULL;
/* - - - - - - - Fim das Entradas Externas - - - - - - - */
/* - - - - - - - Ponteiros para CPU - - - - - - - */
double *q_R = NULL, *q_L = NULL, *q_U = NULL, *q_D = NULL;
double *q_R_old = NULL, *q_L_old = NULL, *q_U_old = NULL, *q_D_old = NULL;
double *l_R = NULL, *l_L = NULL, *l_U = NULL, *l_D = NULL;
double *l_R_old = NULL, *l_L_old = NULL, *l_U_old = NULL, *l_D_old = NULL;
double *b_R = NULL, *b_L = NULL, *b_U = NULL, *b_D = NULL;
double *b_R_old = NULL, *b_L_old = NULL, *b_U_old = NULL, *b_D_old = NULL;
double *pressao = NULL, *pressao_old = NULL;
/* - - - - - - - Ponteiros para GPU - - - - - - - */
double *dev_mat_perm = NULL, *dev_mat_font = NULL, *dev_mat_epsilon = NULL;
double *dev_q_R = NULL, *dev_q_L = NULL, *dev_q_U = NULL, *dev_q_D = NULL;
double *dev_q_R_old = NULL, *dev_q_L_old = NULL, *dev_q_U_old = NULL, *dev_q_D_old = NULL;
double *dev_l_R = NULL, *dev_l_L = NULL, *dev_l_U = NULL, *dev_l_D = NULL;
double *dev_l_R_old = NULL, *dev_l_L_old = NULL, *dev_l_U_old = NULL, *dev_l_D_old = NULL;
double *dev_b_R = NULL, *dev_b_L = NULL, *dev_b_U = NULL, *dev_b_D = NULL;
double *dev_b_R_old = NULL, *dev_b_L_old = NULL, *dev_b_U_old = NULL, *dev_b_D_old = NULL;
double *dev_pressao = NULL, *dev_pressao_old = NULL;
double *dev_aux = NULL, *dev_erro = NULL, *dev_media = NULL, *dev_sum1 = NULL, *dev_sum2 = NULL;
//- - - - - - - - - - - - - - FIM - GLOBAIS - - - - - - - - - - - - - - //
__global__ void escoamento_monofasico(
double *dev_mat_perm, double *dev_mat_font, double *dev_mat_epsilon,
double *dev_q_R, double *dev_q_L, double *dev_q_U, double *dev_q_D,
double *dev_q_R_old, double *dev_q_L_old, double *dev_q_U_old, double *dev_q_D_old,
double *dev_l_R, double *dev_l_L, double *dev_l_U, double *dev_l_D,
double *dev_l_R_old, double *dev_l_L_old, double *dev_l_U_old, double *dev_l_D_old,
double *dev_b_R, double *dev_b_L, double *dev_b_U, double *dev_b_D,
double *dev_b_R_old, double *dev_b_L_old, double *dev_b_U_old, double *dev_b_D_old,
double *dev_pressao, double *dev_pressao_old,
double *dev_aux, double dev_erro, double dev_media, double dev_sum1, double dev_sum2
){
/*int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
a[offset] = offset;*/
/*vificar as condições de contorno*/
int flag_thread_centrais = 1;
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
/*int offset = (blockDim.x * gridDim.x) + 1; // deslocamento para o tamanho da região (tam_regiao = n + 2)
*/
int tid = x + y * blockDim.x * gridDim.x;
//verificar esse deslocamento para n causar problema (somente na hora de armazenar utilizar o deslocamento)
//int tid = (x + y * blockDim.x * gridDim.x) + offset; // tid fornece o indice do vetor
int dimensao_x = blockDim.x * gridDim.x;
int dimensao_y = blockDim.y * gridDim.y;
int eq_tid_cant_sup_dir = blockDim.x * gridDim.x - 1; // posição extremo sup direito
int eq_tid_cant_inf_dir = ((gridDim.x * blockDim.x) * (gridDim.y * blockDim.y)) - 1; // posição extremo inf direito
int eq_tid_cant_inf_esq = (gridDim.x * blockDim.x) * (gridDim.y * blockDim.y - 1); // posição extremo inf esquerdo
if(tid == 0){//canto superior esquerdo
/*VERIFICAR AS CONDIÇÕES DE CONTORNO*/
calcula_pressao_velocidade(tid, 0, 1, 1, 0);
atualiza_mult_lagrange(tid);
flag_thread_centrais = 0;
}
if(tid == eq_tid_cant_sup_dir){//canto superior direito
/*VERIFICAR AS CONDIÇÕES DE CONTORNO*/
calcula_pressao_velocidade(tid, 0, 0, 1, 1);
atualiza_mult_lagrange(tid);
flag_thread_centrais = 0;
}
if(tid == eq_tid_cant_inf_esq){//canto inferior esquerdo
/*VERIFICAR AS CONDIÇÕES DE CONTORNO*/
calcula_pressao_velocidade(tid, 0, 1, 1, 0);
atualiza_mult_lagrange(tid);
flag_thread_centrais = 0;
}
if(tid == eq_tid_cant_inf_dir){//canto inferior direito
/*VERIFICAR AS CONDIÇÕES DE CONTORNO*/
calcula_pressao_velocidade(tid, 1, 0, 0, 1);
atualiza_mult_lagrange(tid);
flag_thread_centrais = 0;
}
if((tid > 0) && (tid < eq_tid_cant_sup_dir)){//fronteira superior
/*VERIFICAR AS CONDIÇÕES DE CONTORNO*/
calcula_pressao_velocidade(tid, 0, 1, 1, 1);
atualiza_mult_lagrange(tid);
flag_thread_centrais = 0;
}
if((tid > eq_tid_cant_sup_dir) && (tid < eq_tid_cant_inf_dir) && (tid % dimensao_x == eq_tid_cant_sup_dir)){ //fronteira direita
/*VERIFICAR AS CONDIÇÕES DE CONTORNO*/
calcula_pressao_velocidade(tid, 1, 0, 1, 1);
atualiza_mult_lagrange(tid);
flag_thread_centrais = 0;
}
if((tid > eq_tid_cant_inf_esq) && (tid < eq_tid_cant_inf_dir)){ //fronteira inferior
/*VERIFICAR AS CONDIÇÕES DE CONTORNO*/
calcula_pressao_velocidade(tid, 1, 1, 0, 1);
atualiza_mult_lagrange(tid);
flag_thread_centrais = 0;
}
if((tid > 0) && (tid < eq_tid_cant_inf_dir) && (tid < eq_tid_cant_inf_esq) && (tid % dimensao_y == 0)){//fronteira esquerda
/*VERIFICAR AS CONDIÇÕES DE CONTORNO*/
calcula_pressao_velocidade(tid, 1, 1, 1, 0);
atualiza_mult_lagrange(tid);
flag_thread_centrais = 0;
}
if(flag_thread_centrais){
/*VERIFICAR AS CONDIÇÕES DE CONTORNO*/
calcula_pressao_velocidade(tid, 1, 1, 1, 1);
atualiza_mult_lagrange(tid);
}
/*
*
*SINCRONIZA
*COMENTARIOS
*ALOCAR VARIÁVEL aux com o tamanho de "tids"
*VERIFICAR ATOMICIDADE PRA VALORES FLOAT
*VERIFICAR ALOCAÇÃO DAS MEMÓRIAS GLOBAIS
*alocar memória erro
*alocar double media = 0.0, sum1 = 0.0, sum2 = 0.0;
*/
__syncthreads();
if(tid == eq_tid_cant_inf_dir){
int i = 0;
for(i = 0; i <= eq_tid_cant_inf_dir; i++){
dev_media = dev_media + dev_pressao[i];
}
dev_media = dev_media / (eq_tid_cant_inf_dir + 1);
}
__syncthreads();
dev_pressao[tid] -= dev_media;
dev_l_D[tid] -= dev_media;
dev_l_U[tid] -= dev_media;
dev_l_L[tid] -= dev_media;
dev_l_R[tid] -= dev_media;
//avaliando criterio de convergencia
dev_aux[tid] = dev_pressao[tid] - dev_pressao_old[tid];
__syncthreads();
if(tid == eq_tid_cant_inf_dir){
int i = 0;
for(i = 0; i <= eq_tid_cant_inf_dir; i++){
dev_sum1 += dev_aux[i] * dev_aux[i];
dev_sum2 += dev_pressao[i] * dev_pressao[i];
}
dev_erro = sqrt(dev_sum1/dev_sum2);
}
__syncthreads();
if (dev_erro < 1e-5)
break;
dev_pressao_old[tid] = dev_pressao_old[tid];
dev_q_U_old[tid] = dev_q_U[tid];
dev_q_R_old[tid] = dev_q_R[tid];
dev_q_L_old[tid] = dev_q_L[tid];
dev_q_D_old[tid] = dev_q_D[tid];
dev_l_D_old[tid] = dev_l_D[tid];
dev_l_U_old[tid] = dev_l_U[tid];
dev_l_L_old[tid] = dev_l_L[tid];
dev_l_R_old[tid] = dev_l_R[tid];
/*
* Imponiendo a media cero na distribuicao de presiones
* Calculo de la media
*/
/*
atomicAdd( &media, dev_pressao[tid] );
//atomicSub( &aux[tid], dev_pressao[tid] - dev_pressao_old[tid] );
__syncthreads();
dev_pressao[tid] -= M;
dev_l_D[tid] -= M;
dev_l_U[tid] -= M;
dev_l_L[tid] -= M;
dev_l_R[tid] -= M;
//avaliando criterio de convergencia
aux[tid] = dev_pressao[tid] - dev_b_D_old[tid];
__syncthreads();
atomicAdd( &sum1, aux[tid] * aux[tid] );
atomicAdd( &sum2, dev_pressao[tid] * dev_pressao[tid] );
__syncthreads();
if(tid == 0)
erro = sqrt(sum1/sum2);
if (erro < 1e-5) return 0;
p_old[j][k] = p[j][k];
dev_pressao_old[tid] = dev_pressao_old[tid];
dev_q_U_old[tid] = dev_q_U[tid];
dev_q_R_old[tid] = dev_q_R[tid];
dev_q_L_old[tid] = dev_q_L[tid];
dev_q_D_old[tid] = dev_q_D[tid];
dev_l_D_old[tid] = dev_l_D[tid];
dev_l_U_old[tid] = dev_l_U[tid];
dev_l_L_old[tid] = dev_l_L[tid];
dev_l_R_old[tid] = dev_l_R[tid];*/
}
__device__ char atualiza_mult_lagrange(int tid){
int index_mem_central = 0, index_mem_down = 0, index_mem_uper = 0;
int index_mem_left = 0, index_mem_right = 0;
int comprimento_kernel = blockDim.x * gridDim.x;
int offset = (blockDim.x * gridDim.x) + 1;
index_mem_central = tid + ((tid/comprimento_kernel)*2) + offset;
index_mem_uper = index_mem_central - (offset -1); // (offset -1) = comprimento do kernel
index_mem_down = index_mem_central + (offset -1);
index_mem_left = index_mem_central - 1;
index_mem_right = index_mem_central + 1;
dev_l_U[index_mem_central] = dev_b_U[index_mem_central] * (dev_q_U[index_mem_central] + dev_q_D_old[index_mem_uper]) + dev_l_D_old[index_mem_uper];
dev_l_D[index_mem_central] = dev_b_D[index_mem_central] * (dev_q_D[index_mem_central] + dev_q_U_old[index_mem_down]) + dev_l_U_old[index_mem_down];
dev_l_R[index_mem_central] = dev_b_R[index_mem_central] * (dev_q_R[index_mem_central] + dev_q_L_old[index_mem_right]) + dev_l_L_old[index_mem_right];
dev_l_L[index_mem_central] = dev_b_L[index_mem_central] * (dev_q_L[index_mem_central] + dev_q_R_old[index_mem_left]) + dev_l_R_old[index_mem_left];
return 0;
}
__device__ char calcula_pressao_velocidade(int tid, int uper, int right, int down, int left){
double auxU = 0.0, auxD = 0.0, auxR = 0.0, auxL = 0.0, DU = 0.0, DD = 0.0, DR = 0.0, DL = 0.0;
int index_mem_central = 0, index_mem_down = 0, index_mem_uper = 0;
int index_mem_left = 0, index_mem_right = 0;
int comprimento_kernel = blockDim.x * gridDim.x;
int offset = (blockDim.x * gridDim.x) + 1;
index_mem_central = tid + ((tid/comprimento_kernel)*2) + offset;
index_mem_uper = index_mem_central - (offset -1); // (offset -1) = comprimento do kernel
index_mem_down = index_mem_central + (offset -1);
index_mem_left = index_mem_central - 1;
index_mem_right = index_mem_central + 1;
if(uper == 1){
auxU = dev_mat_epsilon[index_mem_central] / (1 + dev_b_U[index_mem_central] * dev_mat_epsilon[index_mem_central]);
DU = auxU * (dev_b_U[index_mem_central] * dev_q_D_old[index_mem_uper] + dev_l_D_old[index_mem_uper]);
}
if(right == 1){
auxR = dev_mat_epsilon[index_mem_central] / (1 + dev_b_R[index_mem_central] * dev_mat_epsilon[index_mem_central]);
DR = auxR * (dev_b_R[index_mem_central] * dev_q_L_old[index_mem_right] + dev_l_L_old[index_mem_right]);
}
if(down == 1){
auxD = dev_mat_epsilon[index_mem_central] / (1 + dev_b_D[index_mem_central] * dev_mat_epsilon[index_mem_central]);
DD = auxD * (dev_b_D[index_mem_central] * dev_q_U_old[index_mem_down] + dev_l_U_old[index_mem_down]);
}
if(left == 1){
auxL = dev_mat_epsilon[index_mem_central] / (1 + dev_b_L[index_mem_central] * dev_mat_epsilon[index_mem_central]);
DL = auxL * (dev_b_L[index_mem_central] * dev_q_R_old[index_mem_left] + dev_l_R_old[index_mem_left]);
}
dev_pressao[index_mem_central] = (dev_mat_font[index_mem_central] + DU + DR + DD + DL) / (auxU + auxR + auxD + auxL);
dev_q_L[index_mem_central] = auxL * dev_pressao[index_mem_central] - DL;
dev_q_R[index_mem_central] = auxR * dev_pressao[index_mem_central] - DR;
dev_q_U[index_mem_central] = auxU * dev_pressao[index_mem_central] - DU;
dev_q_D[index_mem_central] = auxD * dev_pressao[index_mem_central] - DD;
return 0;
}
int main(void){
le_entrada();
inicializa_parametros();
cal_cond_robin();
parametro_independentes();
int i = 0, j = 0;
/*
printf("\ntam_mat_interna = %d\n", tam_mat_interna);
printf("tam_mat_real = %d\n", tam_mat_real);
printf("max_interacoes = %d\n", max_interacoes);
printf("op_contorno = %d\n", op_contorno);
printf("tam_regiao = %lf\n", tam_regiao);
printf("erro_max = %lf\n", erro_max);
printf("valor_contor = %lf\n", valor_contor);
printf("\n\nmat_font:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%1.e ", mat_font[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\nmat_perm:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%1.e ", mat_perm[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\nmat_epsilon:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%1.e ", mat_epsilon[i*tam_mat_real + j]);
printf("\n");
}
*/
printf("\n\nb_U:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%1.e ", b_U[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\nb_R:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%1.e ", b_R[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\nb_D:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%1.e ", b_D[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\nb_L:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%1.e ", b_L[i*tam_mat_real + j]);
printf("\n");
}
system("pause");
return 0;
}
char le_entrada(){
printf("\n\n\t\t - - CARREGANDO ENTRADA - - \n\n");
FILE *arq = NULL;
arq = fopen("../dir_entrada/parametro_entrada.txt", "r");
if(arq == NULL){
printf("Erro ao abrir aquivo: 'parametro_entrada.txt'\n\t\tCertifique-se que o arquivo exite.\n");
exit(1);
}
else{
printf("\t\t - - LENDO ARQUIVO DE ENTRADA - -\n");
/*char c[2], dados[255], buffer[255];*/
char buffer[255];
int cont = 1;
while(cont < 9){
fscanf(arq, "%s", buffer);
//puts(buffer);
int i = 0, j = 0;
switch(strlen(buffer)){
case 8: //erro_maximo
fscanf(arq, "%lf", &erro_max);
break;
case 10: //tam_regiao
fscanf(arq, "%lf", &tam_regiao);
break;
case 11: //opcao_contorno
fscanf(arq, "%d", &op_contorno);
break;
case 12: //valor_contor
fscanf(arq, "%lf", &valor_contor);
break;
case 14: //max_interacoes
fscanf(arq, "%d", &max_interacoes);
break;
case 15: //tam_mat_interna
fscanf(arq, "%d", &tam_mat_interna);
break;
case 16: //matriz_de_fontes
//uso (tam_mat_interna + 2) - pois ainda não inicializei 'tam_mat_real'
mat_font = aloca_matriz(tam_mat_interna + 2, tam_mat_interna + 2);
for(i = 1; i < (tam_mat_interna + 2) - 1; i ++)
for(j = 1; j < (tam_mat_interna + 2) - 1 ; j++)
fscanf(arq, "%lf", &mat_font[i*(tam_mat_interna+2) + j]);
break;
case 18: //matriz_permeabilidade
mat_perm = aloca_matriz(tam_mat_interna + 2, tam_mat_interna + 2);
mat_epsilon = aloca_matriz(tam_mat_interna + 2, tam_mat_interna + 2);
for(i = 1; i < (tam_mat_interna + 2) - 1; i ++)
for(j = 1; j < (tam_mat_interna + 2) - 1 ; j++)
fscanf(arq, "%lf", &mat_perm[i*(tam_mat_interna+2) + j]);
break;
default:
printf("\n\n\t\tHouve algum erro no aquivo de entrada!\n\n");
return 0;
}
//int tam = strlen(buffer);
cont++;
}
printf("\t\t - - ARQUIVO DE ENTRADA CARREGADO - -\n");
}
printf("\n\n\t\t - - ENTRADA CARREGA - - \n\n");
return 1;
}
char inicializa_parametros(){
printf("\n\n\t\t- - INICIALIZANDO PARAMETROS - - \n\n\n");
tam_mat_real = tam_mat_interna + 2;
h = tam_regiao / tam_mat_interna;
q_R = aloca_matriz(tam_mat_real, tam_mat_real);
if(q_R == NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_q_R, tam_mat_real * tam_mat_real * sizeof(double) ) );
q_L = aloca_matriz(tam_mat_real, tam_mat_real);
if(q_L == NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_q_L, tam_mat_real * tam_mat_real * sizeof(double) ) );
q_U = aloca_matriz(tam_mat_real, tam_mat_real);
if(q_U == NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_q_U, tam_mat_real * tam_mat_real * sizeof(double) ) );
q_D = aloca_matriz(tam_mat_real, tam_mat_real);
if(q_D == NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_q_D, tam_mat_real * tam_mat_real * sizeof(double) ) );
q_R_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(q_R_old == NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_q_R_old, tam_mat_real * tam_mat_real * sizeof(double) ) );
q_L_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(q_L_old == NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_q_L_old, tam_mat_real * tam_mat_real * sizeof(double) ) );
q_U_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(q_U_old == NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_q_U_old, tam_mat_real * tam_mat_real * sizeof(double) ) );
q_D_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(q_D_old == NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_q_D_old, tam_mat_real * tam_mat_real * sizeof(double) ) );
l_R = aloca_matriz(tam_mat_real, tam_mat_real);
if(l_R == NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_l_R, tam_mat_real * tam_mat_real * sizeof(double) ) );
l_L = aloca_matriz(tam_mat_real, tam_mat_real);
if(l_L == NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_l_L, tam_mat_real * tam_mat_real * sizeof(double) ) );
l_U = aloca_matriz(tam_mat_real, tam_mat_real);
if(l_U == NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_l_U, tam_mat_real * tam_mat_real * sizeof(double) ) );
l_D = aloca_matriz(tam_mat_real, tam_mat_real);
if(l_D == NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_l_D, tam_mat_real * tam_mat_real * sizeof(double) ) );
l_R_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(l_R_old == NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_l_R_old, tam_mat_real * tam_mat_real * sizeof(double) ) );
l_L_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(l_L_old == NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_l_L_old, tam_mat_real * tam_mat_real * sizeof(double) ) );
l_U_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(l_U_old == NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_l_U_old, tam_mat_real * tam_mat_real * sizeof(double) ) );
l_D_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(l_D_old == NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_l_D_old, tam_mat_real * tam_mat_real * sizeof(double) ) );
b_R = aloca_matriz(tam_mat_real, tam_mat_real);
if(b_R == NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_b_R, tam_mat_real * tam_mat_real * sizeof(double) ) );
b_L = aloca_matriz(tam_mat_real, tam_mat_real);
if(b_L == NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_b_L, tam_mat_real * tam_mat_real * sizeof(double) ) );
b_U = aloca_matriz(tam_mat_real, tam_mat_real);
if(b_U == NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_b_U, tam_mat_real * tam_mat_real * sizeof(double) ) );
b_D = aloca_matriz(tam_mat_real, tam_mat_real);
if(b_D == NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_b_D, tam_mat_real * tam_mat_real * sizeof(double) ) );
b_R_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(b_R_old == NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_b_R_old, tam_mat_real * tam_mat_real * sizeof(double) ) );
b_L_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(b_L_old == NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_b_L_old, tam_mat_real * tam_mat_real * sizeof(double) ) );
b_U_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(b_U_old == NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_b_U_old, tam_mat_real * tam_mat_real * sizeof(double) ) );
b_D_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(b_D_old == NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_b_D_old, tam_mat_real * tam_mat_real * sizeof(double) ) );
pressao = aloca_matriz(tam_mat_real, tam_mat_real);
if(pressao == NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_pressao, tam_mat_real * tam_mat_real * sizeof(double) ) );
pressao_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(pressao_old == NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_pressao_old, tam_mat_real * tam_mat_real * sizeof(double) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_aux, tam_mat_real * tam_mat_real * sizeof(double) ) );
HANDLE_ERROR( cudaMemset( dev_aux, 0, tam_mat_real * tam_mat_real * sizeof(double) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_erro, sizeof(double) ) );
HANDLE_ERROR( cudaMemset( dev_erro, 0, sizeof(double) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_media, sizeof(double) ) );
HANDLE_ERROR( cudaMemset( dev_media, 0, sizeof(double) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_sum1, sizeof(double) ) );
HANDLE_ERROR( cudaMemset( dev_sum1, 0, sizeof(double) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_sum2, sizeof(double) ) );
HANDLE_ERROR( cudaMemset( dev_sum2, 0, sizeof(double) ) );
int i = 0;
switch(op_contorno){
case 1: //Inicializa contorno superior
for(i = 0; i < tam_mat_real -1; i++){
q_D[i] = valor_contor;
q_D_old[i] = valor_contor;
}
break;
case 2://Inicializa contorno esquerdo
for(i = 0; i < tam_mat_real; i++){
q_R[i*tam_mat_real] = valor_contor;
q_R_old[i*tam_mat_real] = valor_contor;
}
break;
case 3://Inicializa contorno direito
for(i = 0; i < tam_mat_real; i++){
q_L[i*tam_mat_real + (tam_mat_real - 1)] = valor_contor;
q_L_old[i*tam_mat_real + (tam_mat_real - 1)] = valor_contor;
}
break;
case 4://Inicializa contorno inferior
for(i = 0; i < tam_mat_real; i++){
q_L[(tam_mat_real-1)*tam_mat_real + i] = valor_contor;
q_L_old[(tam_mat_real-1)*tam_mat_real + i] = valor_contor;
}
break;
default:
printf("\n\n\t\t - - OCORREU ALGUM ERRO NA OPCAO DE CONTORNO - - \n\n");
break;
}
printf("\n\n\t\t- - FIM DA INICIALIZACAO PARAMETROS - - \n\n\n");
return 1;
}
double * aloca_matriz(int L, int C){
double *aux = NULL;
aux = (double *) calloc(L * C, sizeof(double));
if(aux == NULL){
printf("\n\n\t\tErro ao alocar memoria\n\n");
exit(1);
}else{
return aux;
}
return NULL;
}
/*
*
*VERIFICAR RETORNO
*
*/
void cal_cond_robin(){
double keff = 0.0, numerador = 0.0, denominador = 0.0;
double C = 1.0; // Cte adimensional que se ajusta experimentalmente C = 1.0
//Canto superior esquerdo
numerador = ( 2 * mat_perm[tam_mat_real + 1] * mat_perm[tam_mat_real + 2] );
denominador = ( mat_perm[tam_mat_real + 1] + mat_perm[tam_mat_real + 2] );
keff = numerador / denominador;
b_R[tam_mat_real + 1] = C*h/keff;
numerador = (2 * mat_perm[tam_mat_real + 1] * mat_perm[(2*tam_mat_real) + 1]);
denominador = ( mat_perm[tam_mat_real + 1] + mat_perm[(2*tam_mat_real) + 1]);
keff = numerador / denominador;
b_D[tam_mat_real + 1] = C*h/keff;
//Canto superior direito
numerador = ( 2 * mat_perm[tam_mat_real + tam_mat_interna] * mat_perm[tam_mat_real + (tam_mat_interna - 1)] );
denominador = ( mat_perm[tam_mat_real + tam_mat_interna] + mat_perm[tam_mat_real + (tam_mat_interna - 1)] );
keff = numerador / denominador;
b_L[tam_mat_real + tam_mat_interna] = C*h/keff;
numerador = ( 2 * mat_perm[tam_mat_real + tam_mat_interna] * mat_perm[(2 * tam_mat_real) + tam_mat_interna] );
denominador = ( mat_perm[tam_mat_real + tam_mat_interna] + mat_perm[(2 * tam_mat_real) + tam_mat_interna] );
keff = numerador / denominador;
b_D[tam_mat_real + tam_mat_interna] = C*h/keff;
//Canto infeior esquerdo
numerador = ( 2 * mat_perm[(tam_mat_real * tam_mat_interna) + 1] * mat_perm[(tam_mat_real * (tam_mat_interna - 1)) + 1] );
denominador = ( mat_perm[(tam_mat_real * tam_mat_interna) + 1] + mat_perm[(tam_mat_real * (tam_mat_interna - 1)) + 1] );
keff = numerador / denominador;
b_U[(tam_mat_real * tam_mat_interna) + 1] = C*h/keff;
numerador = ( 2 * mat_perm[(tam_mat_real * tam_mat_interna) + 1] * mat_perm[(tam_mat_real * tam_mat_interna) + 2] );
denominador = ( mat_perm[(tam_mat_real * tam_mat_interna) + 1] + mat_perm[(tam_mat_real * tam_mat_interna) + 2] );
keff = numerador / denominador;
b_R[(tam_mat_real * tam_mat_interna) + 1] = C*h/keff;
//Canto infeior direito
numerador = ( 2 * mat_perm[(tam_mat_real * tam_mat_interna) + tam_mat_interna] * mat_perm[(tam_mat_real * (tam_mat_interna - 1)) + tam_mat_interna] );
denominador = ( mat_perm[(tam_mat_real * tam_mat_interna) + tam_mat_interna] + mat_perm[(tam_mat_real * (tam_mat_interna - 1)) + tam_mat_interna] );
keff = numerador / denominador;
b_U[(tam_mat_real * tam_mat_interna) + tam_mat_interna] = C*h/keff;
numerador = ( 2 * mat_perm[(tam_mat_real * tam_mat_interna) + tam_mat_interna] * mat_perm[(tam_mat_real * tam_mat_interna) + (tam_mat_interna - 1)] );
denominador = ( mat_perm[(tam_mat_real * tam_mat_interna) + tam_mat_interna] + mat_perm[(tam_mat_real * tam_mat_interna) + (tam_mat_interna - 1)] );
keff = numerador / denominador;
b_L[(tam_mat_real * tam_mat_interna) + tam_mat_interna] = C*h/keff;
//Calculo das fronteiras e região interna para betas
int i = 0;
for(i = 2; i < tam_mat_interna; i ++){
//Calcula fronteira superior
numerador = ( 2 * mat_perm[tam_mat_real + i] * mat_perm[tam_mat_real + (i-1)] );
denominador = ( mat_perm[tam_mat_real + i] + mat_perm[tam_mat_real + (i-1)] );
keff = numerador / denominador;
b_L[tam_mat_real + i] = C*h/keff;
numerador = ( 2 * mat_perm[tam_mat_real + i] * mat_perm[tam_mat_real + (i+1)] );
denominador = ( mat_perm[tam_mat_real + i] + mat_perm[tam_mat_real + (i+1)] );
keff = numerador / denominador;
b_R[tam_mat_real + i] = C*h/keff;
numerador = ( 2 * mat_perm[tam_mat_real + i] * mat_perm[(2 * tam_mat_real) + i] );
denominador = ( mat_perm[tam_mat_real + i] + mat_perm[(2 * tam_mat_real) + i] );
keff = numerador / denominador;
b_D[tam_mat_real + i] = C*h/keff;
//Calcula fronteira esquerda
numerador = ( 2 * mat_perm[(i * tam_mat_real) + 1] * mat_perm[((i - 1) * tam_mat_real) + 1] );
denominador = ( mat_perm[(i * tam_mat_real) + 1] + mat_perm[((i - 1) * tam_mat_real) + 1] );
keff = numerador / denominador;
b_U[(i * tam_mat_real) + 1] = C*h/keff;
numerador = ( 2 * mat_perm[(i * tam_mat_real) + 1] * mat_perm[(i * tam_mat_real) + 2] );
denominador = ( mat_perm[(i * tam_mat_real) + 1] + mat_perm[(i * tam_mat_real) + 2] );
keff = numerador / denominador;
b_R[(i * tam_mat_real) + 1] = C*h/keff;
numerador = ( 2 * mat_perm[(i * tam_mat_real) + 1] * mat_perm[((i + 1) * tam_mat_real) + 1] );
denominador = ( mat_perm[(i * tam_mat_real) + 1] + mat_perm[((i + 1) * tam_mat_real) + 1] );
keff = numerador / denominador;
b_D[(i * tam_mat_real) + 1] = C*h/keff;
//Calcula fronteira inferior
numerador = ( 2 * mat_perm[(tam_mat_interna * tam_mat_real) + i] * mat_perm[(tam_mat_interna * tam_mat_real) + (i - 1)] );
denominador = ( mat_perm[(tam_mat_interna * tam_mat_real) + i] + mat_perm[(tam_mat_interna * tam_mat_real) + (i - 1)] );
keff = numerador / denominador;
b_L[(tam_mat_interna * tam_mat_real) + i] = C*h/keff;
numerador = ( 2 * mat_perm[(tam_mat_interna * tam_mat_real) + i] * mat_perm[((tam_mat_interna - 1) * tam_mat_real) + i] );
denominador = ( mat_perm[(tam_mat_interna * tam_mat_real) + i] + mat_perm[((tam_mat_interna - 1) * tam_mat_real) + i] );
keff = numerador / denominador;
b_U[(tam_mat_interna * tam_mat_real) + i] = C*h/keff;
numerador = ( 2 * mat_perm[(tam_mat_interna * tam_mat_real) + i] * mat_perm[(tam_mat_interna * tam_mat_real) + (i + 1)] );
denominador = ( mat_perm[(tam_mat_interna * tam_mat_real) + i] + mat_perm[(tam_mat_interna * tam_mat_real) + (i + 1)] );
keff = numerador / denominador;
b_R[(tam_mat_interna * tam_mat_real) + i] = C*h/keff;
//Calcula fronteira direita
numerador = ( 2 * mat_perm[(i * tam_mat_real) + tam_mat_interna] * mat_perm[((i-1) * tam_mat_real) + tam_mat_interna] );
denominador = ( mat_perm[(i * tam_mat_real) + tam_mat_interna] + mat_perm[((i-1) * tam_mat_real) + tam_mat_interna] );
keff = numerador / denominador;
b_U[(i * tam_mat_real) + tam_mat_interna] = C*h/keff;
numerador = ( 2 * mat_perm[(i * tam_mat_real) + tam_mat_interna] * mat_perm[(i * tam_mat_real) + (tam_mat_interna - 1)] );
denominador = ( mat_perm[(i * tam_mat_real) + tam_mat_interna] + mat_perm[(i * tam_mat_real) + (tam_mat_interna - 1)] );
keff = numerador / denominador;
b_L[(i * tam_mat_real) + tam_mat_interna] = C*h/keff;
numerador = ( 2 * mat_perm[(i * tam_mat_real) + tam_mat_interna] * mat_perm[((i+1) * tam_mat_real) + tam_mat_interna] );
denominador = ( mat_perm[(i * tam_mat_real) + tam_mat_interna] + mat_perm[((i+1) * tam_mat_real) + tam_mat_interna] );
keff = numerador / denominador;
b_D[(i * tam_mat_real) + tam_mat_interna] = C*h/keff;
//Calcula dados internos
int j = 0;
for(j = 2; j < tam_mat_interna; j ++){
numerador = ( 2 * mat_perm[(i * tam_mat_real) + j] * mat_perm[(i * tam_mat_real) + (j - 1)] );
denominador = ( mat_perm[(i * tam_mat_real) + j] + mat_perm[(i * tam_mat_real) + (j - 1)] );
keff = numerador / denominador;
b_L[(i * tam_mat_real) + j] = C*h/keff;
numerador = ( 2 * mat_perm[(i * tam_mat_real) + j] * mat_perm[(i * tam_mat_real) + (j + 1)] );
denominador = ( mat_perm[(i * tam_mat_real) + j] + mat_perm[(i * tam_mat_real) + (j + 1)] );
keff = numerador / denominador;
b_R[(i * tam_mat_real) + j] = C*h/keff;
numerador = ( 2 * mat_perm[(i * tam_mat_real) + j] * mat_perm[((i - 1) * tam_mat_real) + j] );
denominador = ( mat_perm[(i * tam_mat_real) + j] + mat_perm[((i - 1) * tam_mat_real) + j] );
keff = numerador / denominador;
b_U[(i * tam_mat_real) + j] = C*h/keff;
numerador = ( 2 * mat_perm[(i * tam_mat_real) + j] * mat_perm[((i + 1) * tam_mat_real) + j] );
denominador = ( mat_perm[(i * tam_mat_real) + j] + mat_perm[((i + 1) * tam_mat_real) + j] );
keff = numerador / denominador;
b_D[(i * tam_mat_real) + j] = C*h/keff;
}
}
}
/*
*
*VERIFICAR RETORNO
*
*/
char parametro_independentes(){
int i = 0, j = 0;
double constante = 2/h;
for(i = 0; i < tam_mat_real; i ++)
for(j = 0; j < tam_mat_real; j++){
mat_epsilon[i*tam_mat_real + j] = constante * mat_perm[i*tam_mat_real + j];
mat_font[i*tam_mat_real + j] *= h;
}
return 0;
}
char copia_dados_para_gpu(){
HANDLE_ERROR( cudaMemcpy( dev_q_R, q_R, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_q_L, q_L, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_q_U, q_U, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_q_D, q_D, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_q_R_old, q_R_old, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_q_L_old, q_L_old, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_q_U_old, q_U_old, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_q_D_old, q_D_old, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_l_R, l_R, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_l_L, l_L, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_l_U, l_U, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_l_D, l_D, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_l_R_old, l_R_old, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_l_L_old, l_L_old, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_l_U_old, l_U_old, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_l_D_old, l_D_old, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_b_R, b_R, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_b_L, b_L, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_b_U, b_U, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_b_D, b_D, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_b_R_old, b_R_old, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_b_L_old, b_L_old, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_b_U_old, b_U_old, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_b_D_old, b_D_old, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_pressao, pressao, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_pressao_old, pressao_old, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_mat_font, mat_font, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_mat_perm, mat_perm, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_mat_epsilon, mat_epsilon, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
return 0;
}
void copia_dados_para_cpu(){
HANDLE_ERROR( cudaMemcpy( q_R, dev_q_R, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( q_L, dev_q_L, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( q_U, dev_q_U, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( q_D, dev_q_D, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( q_R_old, dev_q_R_old, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( q_L_old, dev_q_L_old, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( q_U_old, dev_q_U_old, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( q_D_old, dev_q_D_old, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( l_R, dev_l_R, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( l_L, dev_l_L, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( l_U, dev_l_U, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( l_D, dev_l_D, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( l_R_old, dev_l_R_old, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( l_L_old, dev_l_L_old, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( l_U_old, dev_l_U_old, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( l_D_old, dev_l_D_old, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( b_R, dev_b_R, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( b_L, dev_b_L, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( b_U, dev_b_U, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( b_D, dev_b_D, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( b_R_old, dev_b_R_old, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( b_L_old, dev_b_L_old, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( b_U_old, dev_b_U_old, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( b_D_old, dev_b_D_old, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( mat_font, dev_mat_font, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( mat_perm, dev_mat_perm, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( mat_epsilon, dev_mat_epsilon, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
}
|
f38befd81e66dac3c6b000d029a0239e6b3ed801.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "common/book.h"
#include "lock.h"
#include <hip/hip_runtime.h>
#include <cuda_device_runtime_api.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#define SIZE (100 * 1024 * 1024)
#define ELEMENTS (SIZE / sizeof(unsigned int))
#define HASH_ENTRIES 1024
struct Entry {
unsigned int key;
void *value;
Entry *next;
};
struct Table {
size_t count;
Entry **entries;
Entry *pool;
};
void copy_table_to_host(Table const &table, Table &host_table) {
host_table.count = table.count;
host_table.entries = (Entry **)calloc(table.count, sizeof(Entry *));
host_table.pool = (Entry *)malloc(ELEMENTS * sizeof(Entry));
HANDLE_ERROR(hipMemcpy(host_table.entries, table.entries,
table.count * sizeof(Entry *),
hipMemcpyDeviceToHost));
HANDLE_ERROR(hipMemcpy(host_table.pool, table.pool, ELEMENTS * sizeof(Entry),
hipMemcpyDeviceToHost));
for (int i = 0; i < table.count; i++) {
if (host_table.entries[i] != NULL) {
host_table.entries[i] =
(Entry *)((size_t)host_table.entries[i] - (size_t)table.pool +
(size_t)host_table.pool);
}
}
for (int i = 0; i < ELEMENTS; i++) {
if (host_table.pool[i].next != NULL) {
host_table.pool[i].next =
(Entry *)((size_t)host_table.pool[i].next - (size_t)table.pool +
(size_t)host_table.pool);
}
}
}
void initialize_table(Table &table, int const entries, int const elements) {
table.count = entries;
HANDLE_ERROR(hipMalloc((void **)&table.entries, entries * sizeof(Entry *)));
HANDLE_ERROR(hipMemset(table.entries, 0, entries * sizeof(Entry *)));
HANDLE_ERROR(hipMalloc((void **)&table.pool, elements * sizeof(Entry)));
}
void free_table(Table &table) {
HANDLE_ERROR(hipFree(table.entries));
HANDLE_ERROR(hipFree(table.pool));
}
__device__ __host__ size_t hash(unsigned int key, size_t count) {
return key % count;
}
__global__ void add_to_table(unsigned int *keys, void **values, Table &table,
Lock *lock) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int const stride = blockDim.x * gridDim.x;
while (tid < ELEMENTS) {
unsigned int const key = keys[tid];
size_t const hash_value = hash(key, table.count);
for (int i = 0; i < 32; ++i) {
if (tid % 32 == i) {
Entry *location = &table.pool[tid];
location->key = key;
/*location->value = values[tid];*/
/*lock[hash_value].lock();*/
/*location->next = table.entries[hash_value];*/
/*table.entries[hash_value] = location;*/
/*lock[hash_value].unlock();*/
}
}
tid += stride;
}
}
void verify_table(Table const &dev_table) {
Table table;
copy_table_to_host(dev_table, table);
int count = 0;
for (size_t i = 0; i < table.count; ++i) {
Entry *current = table.entries[i];
while (current != NULL) {
++count;
if (hash(current->key, table.count) != i) {
printf("%d hashed to %ld, but located at %ld.\n", current->key,
hash(current->key, table.count), i);
}
current = current->next;
}
}
if (count != ELEMENTS) {
printf("%d elements found in the hash table. Should be %ld.\n", count,
ELEMENTS);
} else {
printf("All %d elements found in the hash table.\n", count);
}
free(table.pool);
free(table.entries);
}
int main(void) {
unsigned int *buffer = (unsigned int *)big_random_block(SIZE);
/*hipEvent_t start, stop;*/
/*HANDLE_ERROR(hipEventCreate(&start));*/
/*HANDLE_ERROR(hipEventCreate(&stop));*/
/*HANDLE_ERROR(hipEventRecord(start, 0));*/
unsigned int *dev_keys;
void **dev_values;
HANDLE_ERROR(hipMalloc((void **)&dev_keys, SIZE));
HANDLE_ERROR(hipMalloc((void **)&dev_values, SIZE));
HANDLE_ERROR(hipMemcpy(dev_keys, buffer, SIZE, hipMemcpyHostToDevice));
Table table;
initialize_table(table, HASH_ENTRIES, ELEMENTS);
Lock lock[HASH_ENTRIES];
Lock *dev_lock;
HANDLE_ERROR(hipMalloc((void **)&dev_lock, HASH_ENTRIES * sizeof(Lock)));
HANDLE_ERROR(hipMemcpy(dev_lock, lock, HASH_ENTRIES * sizeof(Lock),
hipMemcpyHostToDevice));
hipLaunchKernelGGL(( add_to_table), dim3(60), dim3(256), 0, 0, dev_keys, dev_values, table, dev_lock);
/*HANDLE_ERROR(hipEventRecord(stop, 0));*/
/*HANDLE_ERROR(hipEventSynchronize(stop));*/
/*float elapsed_time;*/
/*HANDLE_ERROR(hipEventElapsedTime(&elapsed_time, start, stop));*/
/*printf("Time to hash: %3.1f ms.\n", elapsed_time);*/
/*verify_table(table);*/
/*HANDLE_ERROR(hipEventDestroy(start));*/
/*HANDLE_ERROR(hipEventDestroy(stop));*/
free_table(table);
hipFree(dev_lock);
hipFree(dev_keys);
hipFree(dev_values);
free(buffer);
return 0;
}
|
f38befd81e66dac3c6b000d029a0239e6b3ed801.cu
|
#include "common/book.h"
#include "lock.h"
#include <cuda.h>
#include <cuda_device_runtime_api.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#define SIZE (100 * 1024 * 1024)
#define ELEMENTS (SIZE / sizeof(unsigned int))
#define HASH_ENTRIES 1024
struct Entry {
unsigned int key;
void *value;
Entry *next;
};
struct Table {
size_t count;
Entry **entries;
Entry *pool;
};
void copy_table_to_host(Table const &table, Table &host_table) {
host_table.count = table.count;
host_table.entries = (Entry **)calloc(table.count, sizeof(Entry *));
host_table.pool = (Entry *)malloc(ELEMENTS * sizeof(Entry));
HANDLE_ERROR(cudaMemcpy(host_table.entries, table.entries,
table.count * sizeof(Entry *),
cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaMemcpy(host_table.pool, table.pool, ELEMENTS * sizeof(Entry),
cudaMemcpyDeviceToHost));
for (int i = 0; i < table.count; i++) {
if (host_table.entries[i] != NULL) {
host_table.entries[i] =
(Entry *)((size_t)host_table.entries[i] - (size_t)table.pool +
(size_t)host_table.pool);
}
}
for (int i = 0; i < ELEMENTS; i++) {
if (host_table.pool[i].next != NULL) {
host_table.pool[i].next =
(Entry *)((size_t)host_table.pool[i].next - (size_t)table.pool +
(size_t)host_table.pool);
}
}
}
void initialize_table(Table &table, int const entries, int const elements) {
table.count = entries;
HANDLE_ERROR(cudaMalloc((void **)&table.entries, entries * sizeof(Entry *)));
HANDLE_ERROR(cudaMemset(table.entries, 0, entries * sizeof(Entry *)));
HANDLE_ERROR(cudaMalloc((void **)&table.pool, elements * sizeof(Entry)));
}
void free_table(Table &table) {
HANDLE_ERROR(cudaFree(table.entries));
HANDLE_ERROR(cudaFree(table.pool));
}
__device__ __host__ size_t hash(unsigned int key, size_t count) {
return key % count;
}
__global__ void add_to_table(unsigned int *keys, void **values, Table &table,
Lock *lock) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int const stride = blockDim.x * gridDim.x;
while (tid < ELEMENTS) {
unsigned int const key = keys[tid];
size_t const hash_value = hash(key, table.count);
for (int i = 0; i < 32; ++i) {
if (tid % 32 == i) {
Entry *location = &table.pool[tid];
location->key = key;
/*location->value = values[tid];*/
/*lock[hash_value].lock();*/
/*location->next = table.entries[hash_value];*/
/*table.entries[hash_value] = location;*/
/*lock[hash_value].unlock();*/
}
}
tid += stride;
}
}
void verify_table(Table const &dev_table) {
Table table;
copy_table_to_host(dev_table, table);
int count = 0;
for (size_t i = 0; i < table.count; ++i) {
Entry *current = table.entries[i];
while (current != NULL) {
++count;
if (hash(current->key, table.count) != i) {
printf("%d hashed to %ld, but located at %ld.\n", current->key,
hash(current->key, table.count), i);
}
current = current->next;
}
}
if (count != ELEMENTS) {
printf("%d elements found in the hash table. Should be %ld.\n", count,
ELEMENTS);
} else {
printf("All %d elements found in the hash table.\n", count);
}
free(table.pool);
free(table.entries);
}
int main(void) {
unsigned int *buffer = (unsigned int *)big_random_block(SIZE);
/*cudaEvent_t start, stop;*/
/*HANDLE_ERROR(cudaEventCreate(&start));*/
/*HANDLE_ERROR(cudaEventCreate(&stop));*/
/*HANDLE_ERROR(cudaEventRecord(start, 0));*/
unsigned int *dev_keys;
void **dev_values;
HANDLE_ERROR(cudaMalloc((void **)&dev_keys, SIZE));
HANDLE_ERROR(cudaMalloc((void **)&dev_values, SIZE));
HANDLE_ERROR(cudaMemcpy(dev_keys, buffer, SIZE, cudaMemcpyHostToDevice));
Table table;
initialize_table(table, HASH_ENTRIES, ELEMENTS);
Lock lock[HASH_ENTRIES];
Lock *dev_lock;
HANDLE_ERROR(cudaMalloc((void **)&dev_lock, HASH_ENTRIES * sizeof(Lock)));
HANDLE_ERROR(cudaMemcpy(dev_lock, lock, HASH_ENTRIES * sizeof(Lock),
cudaMemcpyHostToDevice));
add_to_table<<<60, 256>>>(dev_keys, dev_values, table, dev_lock);
/*HANDLE_ERROR(cudaEventRecord(stop, 0));*/
/*HANDLE_ERROR(cudaEventSynchronize(stop));*/
/*float elapsed_time;*/
/*HANDLE_ERROR(cudaEventElapsedTime(&elapsed_time, start, stop));*/
/*printf("Time to hash: %3.1f ms.\n", elapsed_time);*/
/*verify_table(table);*/
/*HANDLE_ERROR(cudaEventDestroy(start));*/
/*HANDLE_ERROR(cudaEventDestroy(stop));*/
free_table(table);
cudaFree(dev_lock);
cudaFree(dev_keys);
cudaFree(dev_values);
free(buffer);
return 0;
}
|
b6b9bafcf47616c0555dea3eedfc85e562605b63.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "../NativeOps.h"
#include <hip/hip_runtime.h>
#include <cuda_launch_config.h>
#include <buffer.h>
#include <shape.h>
#include <rocblas.h>
#include <reduce3.h>
#include <reduce.h>
#include <indexreduce.h>
#include <pairwise_transform.h>
#include <transform.h>
#include <scalar.h>
#include <broadcasting.h>
#include <summarystatsreduce.h>
#include <thread>
#include <map>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include <cuda_device_runtime_api.h>
#include <pointercast.h>
#include <stdio.h>
#include <stdlib.h>
hipDeviceProp_t *deviceProperties;
hipFuncAttributes *funcAttributes = new hipFuncAttributes[64];
int blockLimit = 128;
int maxThreads = 512;
bool debug = false;
bool verbose = true;
bool allowedP2P = false;
__constant__ char deviceConstantMemory[49152];
int getDeviceId(Nd4jPointer ptrToDeviceId) {
return (int)(Nd4jIndex)ptrToDeviceId;
}
template <typename T>
dim3 getOptimalDimensions(Nd4jIndex n,hipFuncAttributes attributes, hipDeviceProp_t properties) {
// we can combine the two to compute a block size
int num_threads = block_size_with_maximum_potential_occupancy(attributes, properties);
// no real sense launching more threads, then number of elements we have
if (num_threads > n) num_threads = n;
if (maxThreads > 0 && num_threads > maxThreads) num_threads = maxThreads;
// compute the number of blocks of size num_threads to launch
int num_blocks = n / num_threads;
// check for partial block at the end
if (num_blocks > blockLimit) num_blocks = blockLimit;
if (num_blocks < 4 && n > 128) {
num_blocks = 4;
num_threads = n / num_blocks;
}
if (num_threads >= 768) {
num_blocks = num_blocks * 2;
num_threads = num_threads / 2;
}
if(n % num_threads && num_blocks < blockLimit) ++num_blocks;
//(num_threads * sizeof(T)) + attributes.sharedSizeBytes);
return dim3(num_blocks,num_threads, 3000);
}
int getBaseMemorySize(int xRank, hipFuncAttributes funcAttr) {
int memory_limit = 256; //funcAttr.sharedSizeBytes;
// TODO: remove this later
memory_limit += sizeof(UnifiedSharedMemory) + 32; // sizeof(shape::TAD) + (xRank * 4 * 4)
/*
if (xRank == 0) xRank = 2;
memory_limit += (xRank * 2 + 4) * 3 * 4; // we reserve memory for xShape + T1/T2 shapes
memory_limit += yRank == 0 ? 0 : (yRank * 2 + 4) * 4;
memory_limit += zRank == 0 ? 0 : (zRank * 2 + 4) * 4;
memory_limit += (xRank * 4) * 6;
memory_limit += MAX_RANK * 4; // special case, needed roughtly in one pase
*/
return memory_limit;
}
int getDeviceBlockThreshold(int deviceId) {
int ccMinor = deviceProperties[deviceId].minor;
int ccMajor = deviceProperties[deviceId].major;
int blockThreshold = 8;
if (ccMajor >= 5)
blockThreshold = 32;
else if (ccMajor == 3)
blockThreshold = 16;
else if (ccMajor < 3)
blockThreshold = 8;
return blockThreshold;
}
dim3 getBasicLaunchParams(int deviceId, long problemLength, int sharedMemoryPerThread, hipFuncAttributes funcAttr) {
int countMP = deviceProperties[deviceId].multiProcessorCount;
int blockThreshold = getDeviceBlockThreshold(deviceId);
int num_threads = problemLength / (countMP * blockThreshold);
num_threads = nd4j::math::nd4j_min<int>(num_threads, maxThreads);
num_threads = nd4j::math::nd4j_max<int>(num_threads, 64);
int num_blocks = nd4j::math::nd4j_max<int>(problemLength / num_threads, 1);
num_blocks = nd4j::math::nd4j_min<int>(num_blocks, blockLimit);
int memory_limit = (sharedMemoryPerThread * num_threads) + getBaseMemorySize(1, funcAttr);
dim3 launchDims = dim3(num_blocks, num_threads, memory_limit);
if (debug && verbose)
printf("Preliminary basic launch params: gridSize: [%i], blockSize: [%i], base shmem: [%i]\n", num_blocks, num_threads, memory_limit);
return launchDims;
}
int getDeviceSharedThreshold(int deviceId) {
int ccMinor = deviceProperties[deviceId].minor;
int ccMajor = deviceProperties[deviceId].major;
// please note threshold isn't multiple of 32, and that's NOT a mistake
int shmemThreshold;
if (ccMajor == 6 && ccMinor == 0)
shmemThreshold = 65536;
else if (ccMajor == 6 && ccMinor == 1)
shmemThreshold = 98304;
else if (ccMajor == 5 && ccMinor == 2)
shmemThreshold = 98304;
else if (ccMajor == 5)
shmemThreshold = 65536;
else if (ccMajor == 3 && ccMinor == 7)
shmemThreshold = 114688;
else shmemThreshold = 49152;
return shmemThreshold;
}
dim3 getBetterDimensions(int deviceId, int numTads, int tadLength, int xRank, hipFuncAttributes funcAttr, int dimensionLength, int elementSize, int reduction) {
int num_threads = nd4j::math::nd4j_min<int>(tadLength, maxThreads);
int countMP = deviceProperties[deviceId].multiProcessorCount;
int regPerBlock = deviceProperties[deviceId].regsPerBlock;
int warpSize = deviceProperties[deviceId].warpSize;
int blockThreshold = getDeviceBlockThreshold(deviceId);
int shmemThreshold = getDeviceSharedThreshold(deviceId);
// round num_threads to nearest warpSize
num_threads -= num_threads % warpSize;
num_threads = nd4j::math::nd4j_max<int>(32, num_threads);
// since we use shared memory as fast memory for some cases - we need to count that in
int memory_limit = getBaseMemorySize(xRank, funcAttr);
int memory_floor = memory_limit;
int effective_block_limit = countMP * blockThreshold;
int num_blocks = nd4j::math::nd4j_min<int>(numTads, effective_block_limit);
int desiredShared = shmemThreshold / nd4j::math::nd4j_max<int>((num_blocks / countMP), 1);
if (debug && verbose)
printf("numBlocks: [%i], numThreads: [%i], countMap: [%i], shmemThreshold: [%i], desiredShared: [%i]\n", num_blocks, num_threads, countMP, shmemThreshold, desiredShared);
// at this moment we've stored all required information for things. time to count in reduction multipliers
int reduction_per_block = 0;
bool found = false;
if (reduction > 0)
while (!found) {
reduction_per_block = (num_threads * elementSize * reduction);
if (memory_limit + reduction_per_block < desiredShared) {
memory_limit += reduction_per_block;
found = true;
} else {
if (num_threads > 128) {
num_threads -= 32;
} else {
memory_limit += reduction_per_block;
found = true;
}
}
}
// at this moment we know total memory used per block, and we also know per-mp limit.
int max_active_blocks = shmemThreshold / nd4j::math::nd4j_max<int>(memory_limit, 1);
if (debug && verbose)
printf("MAB: [%i], memory_limit: [%i]\n", max_active_blocks, memory_limit);
// we don't want to spawn more blocks, that gpu can actually handle without queue
num_blocks = nd4j::math::nd4j_min<int>(num_blocks, max_active_blocks);
num_blocks = nd4j::math::nd4j_min<int>(num_blocks, blockLimit);
// if (num_blocks > countMP)
// num_blocks = num_blocks - (num_blocks % countMP);
num_blocks = nd4j::math::nd4j_max<int>(num_blocks, 1);
int targetBlocksPerMP = num_blocks / countMP;
// now we know desired number of blocks wrt to shared memory. So, now we should take in account number of threads per SM
if (targetBlocksPerMP * num_threads > 2048) {
while (targetBlocksPerMP * num_threads > 2048) {
if (num_threads <= 96)
break;
num_threads -= 32;
}
reduction_per_block = (num_threads * elementSize * reduction);
memory_limit = memory_floor + reduction_per_block;
}
if (debug && verbose)
printf("Preliminary reduce launch params: gridSize: [%i], blockSize: [%i], base shmem: [%i], reduction_per_block: [%i], blocksPerMP: [%i]\n", num_blocks, num_threads, memory_limit, reduction_per_block, targetBlocksPerMP);
return dim3(num_blocks,num_threads, memory_limit);
}
dim3 getFlatLaunchParams(int deviceId, int *xShapeInfo, int *yShapeInfo, hipFuncAttributes funcAttr) {
int xRank = shape::rank(xShapeInfo);
int yRank = yShapeInfo == nullptr ? 0 : shape::rank(yShapeInfo);
int zRank = 0;
int memory_limit = getBaseMemorySize(xRank, funcAttr);
int countMP = deviceProperties[deviceId].multiProcessorCount;
int regPerBlock = deviceProperties[deviceId].regsPerBlock;
int blockThreshold = getDeviceBlockThreshold(deviceId);
int shmemThreshold = getDeviceSharedThreshold(deviceId);
int xLength = shape::length(xShapeInfo);
int effective_block_limit = countMP * blockThreshold;
// for flat calls we just want as much concurrent blocks, as possible, and we're not tied to TAD here
int num_threads = xLength / effective_block_limit;
if (num_threads < 64)
num_threads = 64;
num_threads = num_threads - (num_threads % 32);
int memory_floor = memory_limit;
int num_blocks = xLength / num_threads;
num_blocks = nd4j::math::nd4j_min<int>(num_blocks, blockLimit);
num_blocks = nd4j::math::nd4j_min<int>(num_blocks, effective_block_limit);
num_blocks = nd4j::math::nd4j_max<int>(num_blocks, 1);
int targetBlocksPerMP = num_blocks / countMP;
// now we know desired number of blocks wrt to shared memory. So, now we should take in account number of threads per SM
if (targetBlocksPerMP * num_threads > 2048 && num_threads >= 64) {
while (targetBlocksPerMP * num_threads > 2048) {
if (num_threads <= 32)
break;
num_threads -= 32;
}
}
dim3 launchDims = dim3(num_blocks, num_threads, memory_limit);
if (debug && verbose)
printf("Preliminary scalar launch params: gridSize: [%i], blockSize: [%i], base shmem: [%i], blocksPerMP: [%i], problemLength: [%i], effectiveBlockLimit: [%i]\n", num_blocks, num_threads, memory_limit, targetBlocksPerMP, xLength, effective_block_limit);
return launchDims;
}
dim3 getReduceLaunchParams(int deviceId, int *xShapeInfo, int *tadShapeInfo, hipFuncAttributes funcAttr, int dimensionLength, int elementSize, int reductionSize) {
int tadLength = 0;
int numTads = 0;
if (tadShapeInfo != nullptr) {
tadLength = shape::length(tadShapeInfo);
numTads = shape::length(xShapeInfo) / tadLength;
if (tadLength == 1) {
if (debug && verbose)
printf("A xLength: [%i], zLength: [%i]\n", shape::length(xShapeInfo), shape::length(tadShapeInfo));
}
} else{
// we have special case - reduction along all dimensions
tadLength = nd4j::math::nd4j_min<int>(shape::length(xShapeInfo), 768);
numTads = shape::length(xShapeInfo) / tadLength;
}
int xRank = shape::rank(xShapeInfo);
int zRank = tadShapeInfo == nullptr ? 0 : shape::rank(tadShapeInfo);
dim3 launchDims = getBetterDimensions(deviceId, numTads, tadLength, xRank, funcAttr, dimensionLength, elementSize, reductionSize);
if ((debug && verbose ) ) { //|| launchDims.x == 1
printf("B xLength: [%i], numTads: [%i], tadLength: [%i], launchDims.x: [%i], launchDims.y: [%i]\n", shape::length(xShapeInfo), numTads, tadLength, launchDims.x, launchDims.y);
//shape::printShapeInfo(xShapeInfo);
}
return launchDims;
}
/**
* Returns optimal launch parameters
* given the extra pointers passed in.
* The extra pointer should be
* the host pointer for the shape information
* associated with the data.
* From there it is used to obtain the length
* from which we can derive the optimal launch parameters.
*
*/
template <typename T>
dim3 getOptimalLaunchParameters(Nd4jPointer *extraPointers, hipFuncAttributes attributes, hipDeviceProp_t properties) {
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
Nd4jIndex n = shape::length(hostXShapeInfo);
dim3 launchDims = getOptimalDimensions<T>(n,attributes, properties);
if (debug && verbose)
printf("Params: gridSize: [%i], blockSize: [%i], shMem: [%i], problemLength: [%i], totalThreads:[%i]\n", launchDims.x, launchDims.y, launchDims.z, n, (launchDims.x * launchDims.y));
return launchDims;
}
float cpu_half2float(half h) {
unsigned sign = ((h.x >> 15) & 1);
unsigned exponent = ((h.x >> 10) & 0x1f);
unsigned mantissa = ((h.x & 0x3ff) << 13);
if (exponent == 0x1f) { /* NaN or Inf */
mantissa = (mantissa ? (sign = 0, 0x7fffff) : 0);
exponent = 0xff;
} else if (!exponent) { /* Denorm or Zero */
if (mantissa) {
unsigned int msb;
exponent = 0x71;
do {
msb = (mantissa & 0x400000);
mantissa <<= 1; /* normalize */
--exponent;
} while (!msb);
mantissa &= 0x7fffff; /* 1.mantissa is implicit */
}
} else {
exponent += 0x70;
}
int temp = ((sign << 31) | (exponent << 23) | mantissa);
return *((float*)((void*)&temp));
}
nd4j::buffer::Buffer<int> * createScalarBuffer(hipStream_t stream) {
int *scalarShapeInfo = shape::createScalarShapeInfo();
nd4j::buffer::Buffer<int> *buff = nd4j::buffer::createBuffer(scalarShapeInfo,shape::shapeInfoLength(2), stream);
nd4j::buffer::copyDataToGpu(&buff, stream);
return buff;
}
class ScalarShapeInformation {
private:
nd4j::buffer::Buffer<int> *scalarDimension;
nd4j::buffer::Buffer<int> *scalarShapeInfo;
std::thread::id threadId;
public:
ScalarShapeInformation(hipStream_t stream) {
int *scalarDimensionBuff = (int *) malloc(sizeof(int));
scalarDimensionBuff[0] = MAX_DIMENSION;
scalarDimension = nd4j::buffer::createBuffer(scalarDimensionBuff,1, stream);
scalarShapeInfo = createScalarBuffer(stream);
threadId = std::this_thread::get_id();
}
~ScalarShapeInformation() {
nd4j::buffer::freeBuffer(&scalarShapeInfo);
nd4j::buffer::freeBuffer(&scalarDimension);
}
int *getShapeInfoHostPointer() {
return scalarShapeInfo->data;
}
int * getShapeInfoGpuPointer() {
return scalarShapeInfo->gData;
}
int * getDimensionHostPointer() {
return scalarDimension->data;
}
int * getDimensionGpuPointer() {
return scalarDimension->gData;
}
};
template <typename T>
class ScalarInfo {
nd4j::buffer::Buffer<T> *scalarData;
ScalarShapeInformation *shapeInfo;
T finalResult;
hipStream_t streamRef;
public:
ScalarInfo(hipStream_t stream) {
T *scalarResult = (T*)malloc(sizeof(T));
shapeInfo = new ScalarShapeInformation(stream);
scalarData = nd4j::buffer::createBuffer(scalarResult,1, stream);
streamRef = stream;
nd4j::buffer::copyDataToGpu(&scalarData, stream);
}
T getFinalResultFromDevice() {
nd4j::buffer::copyDataFromGpu(&scalarData, streamRef);
return scalarData->data[0];
}
/**
* Get the device shape information
* representing a scalar
*/
int *getDeviceShapeInfo() {
return shapeInfo->getShapeInfoGpuPointer();
}
/**
* Get the result pointers
*/
T *getDevicePointer() {
return scalarData->gData;
}
/**
* Get the infinite dimension device pointer
*/
int *getDimensionDevicePointer() {
return shapeInfo->getDimensionGpuPointer();
}
~ScalarInfo() {
nd4j::buffer::freeBuffer(&scalarData);
delete shapeInfo;
}
};
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
*/
double NativeOps::execIndexReduceScalarDouble(Nd4jPointer *extraPointers,int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams) {
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
if (debug && verbose)
printf("D1 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[27], deviceProperties[getDeviceId(extraPointers[2])]);
double *resultPointer = reinterpret_cast<double *>(extraPointers[5]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[27], 1, sizeof(double), 2);
hipLaunchKernelGGL(( indexReduceDouble), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
extraParamsPointer,
resultPointer,
nullptr, 0,
nullptr,
1,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
checkCudaErrors(hipStreamSynchronize(*stream));
double result = resultPointer[0];
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfoBuffer
* @param dimension
* @param dimensionLength
*/
void NativeOps::execIndexReduceDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfoBuffer,
Nd4jPointer dimension, int dimensionLength) {
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfoBuffer);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
if (debug && verbose)
printf("D2 opNum:[%i]\n", opNum);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[27], dimensionLength, sizeof(double), 2);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
hipLaunchKernelGGL(( indexReduceDouble), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
extraParamsPointer,
resultPointer,
resultShapeInfoPointer, shape::rank(hostZShapeInfo),
dimensionPointer,
dimensionLength,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param dimension
* @param dimensionLength
*/
void NativeOps::execBroadcastDouble(Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer dimension, int dimensionLength){
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *yPointer = reinterpret_cast<double *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
if (debug && verbose)
printf("D3 opNum:[%i]\n", opNum);
// dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[26], deviceProperties[getDeviceId(extraPointers[2])]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[26], dimensionLength, sizeof(double), 0);
hipLaunchKernelGGL(( broadcastDouble), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
yPointer,
yShapeInfoPointer, shape::rank(hostYShapeInfo),
resultPointer,
resultShapeInfoPointer, shape::rank(hostZShapeInfo),
dimensionPointer,
dimensionLength, deviceTADShapeInfo, deviceTADOffsets);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xStride
* @param y
* @param yStride
* @param result
* @param resultStride
* @param extraParams
* @param n
*/
void NativeOps::execPairwiseTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
int xStride,
Nd4jPointer y,
int yStride,
Nd4jPointer result,
int resultStride,
Nd4jPointer extraParams, Nd4jIndex n) {
double *xPointer = reinterpret_cast<double *>(dx);
double *yPointer = reinterpret_cast<double *>(y);
double *resultPointer = reinterpret_cast<double *>(result);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
if (debug && verbose)
printf("D4 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[25], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[25]);
hipLaunchKernelGGL(( pairWiseTransformStridedDouble), dim3(launchDims.x),dim3(launchDims.y), launchDims.z, *stream,
opNum,
n,
xPointer,
yPointer,
xStride,
yStride,
extraParamsPointer,
resultPointer,
resultStride, allocationPointer, deviceTADShapeInfo);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
* @param xIndexes
* @param yIndexes
* @param resultIndexes
*/
void NativeOps::execPairwiseTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
Nd4jPointer xShapeInfo,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer xIndexes,
Nd4jPointer yIndexes,
Nd4jPointer resultIndexes) {
double *xPointer = reinterpret_cast<double *>(dx);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *yPointer = reinterpret_cast<double *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
int *xIndexesPointer = reinterpret_cast<int *>(xIndexes);
int *yIndexesPointer = reinterpret_cast<int *>(yIndexes);
int *resultIndexesPointer = reinterpret_cast<int *>(resultIndexes);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
if (debug && verbose)
printf("D5 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[24], deviceProperties[getDeviceId(extraPointers[2])]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[24]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
hipLaunchKernelGGL(( pairWiseTransformDoubleIndex) , dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream,
opNum,
xPointer,
yPointer,
extraParamsPointer,
resultPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
yShapeInfoPointer, shape::rank(hostYShapeInfo),
resultShapeInfoPointer, shape::rank(hostZShapeInfo),
xIndexesPointer,
yIndexesPointer,
resultIndexesPointer, allocationPointer, deviceTADShapeInfo);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execPairwiseTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
Nd4jPointer xShapeInfo,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer extraParams) {
double *xPointer = reinterpret_cast<double *>(dx);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *yPointer = reinterpret_cast<double *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
if (debug && verbose)
printf("D6 opNum:[%i]\n", opNum);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[23], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[23]);
hipLaunchKernelGGL(( pairWiseTransformDouble), dim3(launchDims.x),dim3(launchDims.y), launchDims.z, *stream,
opNum,
xPointer,
yPointer,
extraParamsPointer,
resultPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
yShapeInfoPointer, shape::rank(hostYShapeInfo),
resultShapeInfoPointer, shape::rank(hostZShapeInfo), allocationPointer, deviceTADShapeInfo);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduceDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfo) {
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
if (debug && verbose)
printf("D7 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[22], deviceProperties[getDeviceId(extraPointers[2])]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[22], 1, sizeof(double), 1);
hipLaunchKernelGGL(( reduceScalarDouble), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
nullptr,
1,
reductionPointer, deviceTADShapeInfo);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduceDouble(
Nd4jPointer *extraPointers
,int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer dimension,
int dimensionLength) {
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
if (debug && verbose)
printf("D8 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[22], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[22], dimensionLength, sizeof(double), 1);
if (dimensionLength == 1) {
hipLaunchKernelGGL(( reduceDouble1D), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
} else if (shape::rank(hostTADShapeInfo) <= 3) {
hipLaunchKernelGGL(( reduceDouble6D), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
} else {
hipLaunchKernelGGL(( reduceDouble), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
}
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @return
*/
double NativeOps::execReduceScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams){
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
if (debug && verbose)
printf("D9 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[22], deviceProperties[getDeviceId(extraPointers[2])]);
double *resultPointer = reinterpret_cast<double *>(extraPointers[5]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[22], 1, sizeof(double), 1);
hipLaunchKernelGGL(( reduceScalarDouble), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
nullptr,
nullptr,
1,
reductionPointer, deviceTADShapeInfo);
checkCudaErrors(hipStreamSynchronize(*stream));
double result = resultPointer[0];
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduce3Double(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParamsVals,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo) {
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *yPointer = reinterpret_cast<double *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParamsVals);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
if (debug && verbose)
printf("D10 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[21], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
//dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), (int *) extraPointers[0], yShapeInfoPointer, resultShapeInfoPointer, 1, sizeof(double), 2);
//dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), (int *) extraPointers[0], yShapeInfoPointer);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[21]);
hipLaunchKernelGGL(( reduce3Double), dim3(1),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer,
yPointer,
yShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
nullptr,
1,
1, allocationPointer, deviceTADShapeInfo, deviceTADOffsets);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
*/
double NativeOps::execReduce3ScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParamsVals,
Nd4jPointer y,
Nd4jPointer yShapeInfo){
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *yPointer = reinterpret_cast<double *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParamsVals);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[21], deviceProperties[getDeviceId(extraPointers[2])]);
if (debug && verbose)
printf("D11 opNum:[%i]\n", opNum);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
double *resultPointer = reinterpret_cast<double *>(extraPointers[5]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
//dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), (int *) extraPointers[0], yShapeInfoPointer, nullptr, 1, sizeof(double), 2);
//dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), (int *) extraPointers[0], yShapeInfoPointer);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[21]);
hipLaunchKernelGGL(( reduce3Double), dim3(1),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer,
yPointer,
yShapeInfoPointer,
extraParamsPointer,
resultPointer,
nullptr,
nullptr,
1,
1, allocationPointer, deviceTADShapeInfo, deviceTADOffsets);
checkCudaErrors(hipStreamSynchronize(*stream));
double result = resultPointer[0];
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfoBuffer
* @param dimension
* @param dimensionLength
*/
void NativeOps::execReduce3Double(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParamsVals,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfoBuffer,
Nd4jPointer dimension,
int dimensionLength){
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *yPointer = reinterpret_cast<double *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfoBuffer);
double *extraParamsPointer = reinterpret_cast<double *>(extraParamsVals);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
if (debug && verbose)
printf("D12 opNum:[%i]\n", opNum);
// dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[21], deviceProperties[getDeviceId(extraPointers[2])]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
//dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), (int *) extraPointers[0], yShapeInfoPointer, resultShapeInfoPointer, dimensionLength, sizeof(double), 2);
//dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), (int *) extraPointers[0], yShapeInfoPointer);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[21]);
hipLaunchKernelGGL(( reduce3Double), dim3(1),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer,
yPointer,
yShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
1, allocationPointer, deviceTADShapeInfo, deviceTADOffsets);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xStride
* @param result
* @param resultStride
* @param scalar
* @param extraParams
* @param n
*/
void NativeOps::execScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
int xStride,
Nd4jPointer result,
int resultStride,
double scalar,
Nd4jPointer extraParams,
Nd4jIndex n) {
double *xPointer = reinterpret_cast<double *>(x);
double *resultPointer = reinterpret_cast<double *>(result);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
if (debug && verbose)
printf("D13 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[20], deviceProperties[getDeviceId(extraPointers[2])]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[20]);
hipLaunchKernelGGL(( scalarDouble), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
n,
scalar,
xPointer,
xStride,
extraParamsPointer,
resultPointer,resultStride, allocPointer);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param scalar
* @param extraParams
* @param n
*/
void NativeOps::execScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
double scalar,
Nd4jPointer extraParams){
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
if (debug && verbose)
printf("D14 opNum:[%i]\n", opNum);
// dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[19], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[19]);
hipLaunchKernelGGL(( scalarDouble), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
scalar,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
extraParamsPointer,
resultPointer,resultShapeInfoPointer, shape::rank(hostZShapeInfo), allocPointer);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param scalar
* @param extraParams
* @param n
* @param xIndexes
* @param resultIndexes
*/
void NativeOps::execScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
double scalar,
Nd4jPointer extraParams,
Nd4jIndex n,
Nd4jPointer xIndexes,
Nd4jPointer resultIndexes){
double *xPointer = reinterpret_cast<double *>(x);
double *resultPointer = reinterpret_cast<double *>(result);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
int *resultIndexesPointer = reinterpret_cast<int *>(resultIndexes);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
if (debug && verbose)
printf("D15 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[18], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[18]);
hipLaunchKernelGGL(( scalarDoubleIndexes), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
n,
scalar,
xPointer,
extraParamsPointer,
resultPointer,
resultIndexesPointer, allocPointer);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
*/
double NativeOps::execSummaryStatsScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,bool biasCorrected){
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
if (debug && verbose)
printf("D16 opNum:[%i]\n", opNum);
// dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[17], deviceProperties[getDeviceId(extraPointers[2])]);
double *resultPointer = reinterpret_cast<double *>(extraPointers[5]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[17], 1, sizeof(double), 8);
hipLaunchKernelGGL(( summaryStatsReduceDouble), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
extraParamsPointer,
resultPointer,
nullptr, 0,
nullptr,
1,
1,biasCorrected, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
checkCudaErrors(hipStreamSynchronize(*stream));
double result = resultPointer[0];
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execSummaryStatsDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,bool biasCorrected) {
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
if (debug && verbose)
printf("D17 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[17], deviceProperties[getDeviceId(extraPointers[2])]);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[17], 1, sizeof(double), 8);
hipLaunchKernelGGL(( summaryStatsReduceDouble), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
extraParamsPointer,
resultPointer,
resultShapeInfoPointer, shape::rank(hostZShapeInfo),
nullptr,
1,
1,biasCorrected, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfoBuffer
* @param dimension
* @param dimensionLength
*/
void NativeOps::execSummaryStatsDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfoBuffer,
Nd4jPointer dimension, int dimensionLength,bool biasCorrected){
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfoBuffer);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
if (debug && verbose)
printf("D18 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[17], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[17], dimensionLength, sizeof(double), 8);
hipLaunchKernelGGL(( summaryStatsReduceDouble), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
extraParamsPointer,
resultPointer,
resultShapeInfoPointer, shape::rank(hostZShapeInfo),
dimensionPointer,
dimensionLength,
1,biasCorrected, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xStride
* @param result
* @param resultStride
* @param extraParams
* @param n
*/
void NativeOps::execTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
int xStride,
Nd4jPointer result,
int resultStride,
Nd4jPointer extraParams,
Nd4jIndex n) {
double *xPointer = reinterpret_cast<double *>(dx);
double *resultPointer = reinterpret_cast<double *>(result);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
if (debug && verbose)
printf("D19 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[16], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[16]);
hipLaunchKernelGGL(( transformDouble), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
n,
xPointer,
xStride,
extraParamsPointer,
resultPointer,resultStride, allocPointer, reductionPointer);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
Nd4jPointer xShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer extraParams){
double *xPointer = reinterpret_cast<double *>(dx);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
if (debug && verbose)
printf("D20 opNum:[%i]\n", opNum);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[1], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
// special pointer for special buffer for special ops
double *specialPointer = reinterpret_cast<double *>(extraPointers[6]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[1]);
int *dimension = (int *) specialPointer;
int *maxDimension = dimension + 1;
int *maxShapeBuffer = (int *) maxDimension + 1;
double * special = (double *) maxShapeBuffer + (MAX_RANK * 2 + 4);
// simple trick to get workaround over reductions into scalar
if (opNum >= 38 && opNum <= 41) {
if (shape::isVector(hostXShapeInfo) && opNum != 41) {
// if that's vector, we just go directly to op in 1 block
int length = shape::length(hostXShapeInfo);
int block = nd4j::math::nd4j_min<int>(256, length);
hipLaunchKernelGGL(( transformDouble), dim3(1), dim3(block),launchDims.z + (block * sizeof(double) * 8), *stream ,
opNum,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
extraParamsPointer,
resultPointer, resultShapeInfoPointer, shape::rank(hostZShapeInfo), allocPointer, reductionPointer);
} else {
// going for blockwise specials
//float *xpf = reinterpret_cast<float *>(dx);
int *shape = shape::shapeOf(hostXShapeInfo);
//printf("Rows num: %i\n", shape[0]);
switch (opNum) {
case 40: // LogSoftMax
case 39: // SoftMax Derivative
case 38: {// softmax
Nd4jPointer tempPointers[16];
tempPointers[0] = extraPointers[0];
tempPointers[1] = extraPointers[1];
tempPointers[2] = extraPointers[2];
tempPointers[3] = extraPointers[3];
tempPointers[4] = extraPointers[4];
tempPointers[5] = extraPointers[5];
tempPointers[6] = extraPointers[6];
tempPointers[7] = extraPointers[7];
tempPointers[8] = extraPointers[8];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
tempPointers[12] = extraPointers[12];
tempPointers[13] = extraPointers[13];
tempPointers[14] = extraPointers[14];
tempPointers[15] = extraPointers[15];
int maxShape[2] = {shape::shapeOf(hostXShapeInfo)[0], 1};
int *hostMaxShapeBuffer = shape::shapeBuffer(2, maxShape);
tempPointers[7] = (Nd4jPointer) hostMaxShapeBuffer;
tempPointers[8] = (Nd4jPointer) hostMaxShapeBuffer;
prepareShapeBuffer << < 1, 1, 128, *stream >> > (dimension, maxDimension, maxShapeBuffer, shape[0]);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
tempPointers[9] = extraPointers[12];
tempPointers[10] = extraPointers[13];
tempPointers[11] = extraPointers[14];
// max 3
execReduceDouble(tempPointers, 3, dx, xShapeInfo, extraParams, (Nd4jPointer) special,
(Nd4jPointer) maxShapeBuffer, (Nd4jPointer) maxDimension, 1);
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
// sub 1
execBroadcastDouble(tempPointers, 1, dx, xShapeInfo, (Nd4jPointer) special,
(Nd4jPointer) maxShapeBuffer, dx, xShapeInfo, (Nd4jPointer) dimension, 1);
// exp 3
execTransformDouble(extraPointers, 3, dx, xShapeInfo, dx, xShapeInfo, extraParams);
tempPointers[8] = tempPointers[7];
tempPointers[9] = extraPointers[12];
tempPointers[10] = extraPointers[13];
tempPointers[11] = extraPointers[14];
//sum 1
execReduceDouble(tempPointers, 1, dx, xShapeInfo, extraParams, (Nd4jPointer) special,
(Nd4jPointer) maxShapeBuffer, (Nd4jPointer) maxDimension, 1);
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
// divide 3
execBroadcastDouble(tempPointers, 3, dx, xShapeInfo, (Nd4jPointer) special,
(Nd4jPointer) maxShapeBuffer, dx, xShapeInfo, (Nd4jPointer) dimension, 1);
// log 3
if (opNum == 40)
execTransformDouble(extraPointers, 5, dx, xShapeInfo, dx, xShapeInfo, extraParams);
else if (opNum == 39)
execTransformDouble(extraPointers, 42, dx, xShapeInfo, dx, xShapeInfo, extraParams);
delete hostMaxShapeBuffer;
break;
}
case 41: {
// IsMax along all dimensions
bool scalarCheat = false;
if (extraParamsPointer == nullptr) {
scalarCheat = true;
} else {
//extraParamsPointer == nullptr || (shape::isVector(hostXShapeInfo))
//if (shape::isVector(hostXShapeInfo) && extraParamsPointer[1] == 1) {
// scalarCheat = true;
//}
}
if (scalarCheat) {
//printf("Going for scalar IsMax\n");
int maxIdx = (int) execIndexReduceScalarDouble(extraPointers, 0, dx, xShapeInfo, extraParams);
int targetIdx = 0;
if (shape::order(hostXShapeInfo) == 'c' || shape::order(hostXShapeInfo) == 'f' && maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1] >= shape::length(hostXShapeInfo))
targetIdx = maxIdx;
else
targetIdx = maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1];
hipLaunchKernelGGL(( fillIsMaxDouble), dim3(1), dim3(128), 0, *stream , resultPointer, shape::length(hostXShapeInfo), targetIdx);
} else {
// going for dimension-based IsMax
//printf("Going for dimension-based IsMax\n");
int *tadMaxShapeInfo = reinterpret_cast<int *> (extraPointers[10]);
int *tadMaxOffsets = reinterpret_cast<int *> (extraPointers[11]);
int *dimensionPointer = reinterpret_cast<int *> (extraPointers[15]);
// we call for IMax on specified dimension
execIndexReduceDouble(extraPointers, 0, dx, xShapeInfo, extraParams, (Nd4jPointer) special, (Nd4jPointer) hostYShapeInfo, (Nd4jPointer) dimensionPointer, 1);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
// at this point, all IMax indexes are gathered, and we execute
hipLaunchKernelGGL(( fillDimensionalIsMaxDouble), dim3(768), dim3(16), funcAttributes[37].sharedSizeBytes, *stream, special, hostYShapeInfo, resultPointer, resultShapeInfoPointer, tadMaxShapeInfo, dimensionPointer, 1, tadMaxOffsets );
checkCudaErrors(hipStreamSynchronize(*stream));
}
break;
}
default: {
printf("Bad case for transformFloat\n");
break;
}
}
}
} else {
hipLaunchKernelGGL(( transformDouble), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
extraParamsPointer,
resultPointer, resultShapeInfoPointer, shape::rank(hostZShapeInfo), allocPointer, reductionPointer);
}
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
Nd4jPointer xShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer xIndexes,
Nd4jPointer resultIndexes) {
double *xPointer = reinterpret_cast<double *>(dx);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
int *resultIndexesPointer = reinterpret_cast<int *>(resultIndexes);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
if (debug && verbose)
printf("D21 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[14], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[14]);
hipLaunchKernelGGL(( transformDoubleIndexes), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
extraParamsPointer,
resultPointer,
resultIndexesPointer, allocPointer, reductionPointer);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
*/
float NativeOps::execIndexReduceScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
if (debug && verbose)
printf("F1 opNum:[%i]\n", opNum);
// dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[13], deviceProperties[getDeviceId(extraPointers[2])]);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
float *resultPointer = reinterpret_cast<float *>(extraPointers[5]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[13], 1, sizeof(float), 2);
if (debug && verbose && launchDims.x == 1)
printf("AF1 opNum:[%i]\n", opNum);
hipLaunchKernelGGL(( indexReduceFloat), dim3(launchDims.x),dim3(launchDims.y), launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
extraParamsPointer,
resultPointer,
nullptr, 0,
nullptr,
1,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
checkCudaErrors(hipStreamSynchronize(*stream));
float result = resultPointer[0];
return result;
}
float NativeOps::execIndexReduceScalarHalf(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams){
nd4j::float16 *xPointer = reinterpret_cast<nd4j::float16 *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
nd4j::float16 *extraParamsPointer = reinterpret_cast<nd4j::float16 *>(extraParams);
if (debug && verbose)
printf("H1 opNum:[%i]\n", opNum);
// dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[13], deviceProperties[getDeviceId(extraPointers[2])]);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
nd4j::float16 *resultPointer = reinterpret_cast<nd4j::float16 *>(extraPointers[5]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
nd4j::float16 *reductionPointer = reinterpret_cast<nd4j::float16 *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[13], 1, sizeof(nd4j::float16), 2);
if (debug && verbose && launchDims.x == 1)
printf("AH1 opNum:[%i]\n", opNum);
hipLaunchKernelGGL(( indexReduceHalf), dim3(launchDims.x),dim3(launchDims.y), launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
extraParamsPointer,
resultPointer,
nullptr, 0,
nullptr,
1,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
checkCudaErrors(hipStreamSynchronize(*stream));
float result = (float) resultPointer[0];
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfoBuffer
* @param dimension
* @param dimensionLength
*/
void NativeOps::execIndexReduceFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfoBuffer,
Nd4jPointer dimension,
int dimensionLength){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfoBuffer);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
if (debug && verbose)
printf("F2 opNum:[%i]\n", opNum);
// dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[13], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[13], dimensionLength, sizeof(float), 2);
if (verbose && launchDims.x == 1)
printf("AF2 opNum:[%i]\n", opNum);
hipLaunchKernelGGL(( indexReduceFloat), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
extraParamsPointer,
resultPointer,
resultShapeInfoPointer, shape::rank(hostZShapeInfo),
dimensionPointer,
dimensionLength,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::execIndexReduceHalf(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfoBuffer,
Nd4jPointer dimension,
int dimensionLength){
nd4j::float16 *xPointer = reinterpret_cast<nd4j::float16 *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
nd4j::float16 *extraParamsPointer = reinterpret_cast<nd4j::float16 *>(extraParams);
nd4j::float16 *resultPointer = reinterpret_cast<nd4j::float16 *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfoBuffer);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
if (debug && verbose)
printf("H2 opNum:[%i]\n", opNum);
// dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[13], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
nd4j::float16 *reductionPointer = reinterpret_cast<nd4j::float16 *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[13], dimensionLength, sizeof(nd4j::float16), 2);
if (verbose && launchDims.x == 1)
printf("AH2 opNum:[%i]\n", opNum);
hipLaunchKernelGGL(( indexReduceHalf), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
extraParamsPointer,
resultPointer,
resultShapeInfoPointer, shape::rank(hostZShapeInfo),
dimensionPointer,
dimensionLength,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param dimension
* @param dimensionLength
*/
void NativeOps::execBroadcastFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer dimension, int dimensionLength){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *yPointer = reinterpret_cast<float *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
if (debug && verbose)
printf("F3 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[12], deviceProperties[getDeviceId(extraPointers[2])]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[12], 1, sizeof(float), 0);
hipLaunchKernelGGL(( broadcastFloat), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
yPointer,
yShapeInfoPointer, shape::rank(hostYShapeInfo),
resultPointer,
resultShapeInfoPointer, shape::rank(hostZShapeInfo),
dimensionPointer,
dimensionLength, deviceTADShapeInfo, deviceTADOffsets);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::execBroadcastHalf(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer dimension, int dimensionLength){
nd4j::float16 *xPointer = reinterpret_cast<nd4j::float16 *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
nd4j::float16 *yPointer = reinterpret_cast<nd4j::float16 *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
nd4j::float16 *resultPointer = reinterpret_cast<nd4j::float16 *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
if (debug && verbose)
printf("H3 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[12], deviceProperties[getDeviceId(extraPointers[2])]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[12], 1, sizeof(nd4j::float16), 0);
hipLaunchKernelGGL(( broadcastHalf), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
yPointer,
yShapeInfoPointer, shape::rank(hostYShapeInfo),
resultPointer,
resultShapeInfoPointer, shape::rank(hostZShapeInfo),
dimensionPointer,
dimensionLength, deviceTADShapeInfo, deviceTADOffsets);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xStride
* @param y
* @param yStride
* @param result
* @param resultStride
* @param extraParams
* @param n
*/
void NativeOps::execPairwiseTransformFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
int xStride,
Nd4jPointer y,
int yStride,
Nd4jPointer result,
int resultStride,
Nd4jPointer extraParams, Nd4jIndex n){
float *xPointer = reinterpret_cast<float *>(dx);
float *yPointer = reinterpret_cast<float *>(y);
float *resultPointer = reinterpret_cast<float *>(result);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
if (debug && verbose)
printf("F4 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[11], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
//dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), (int *) extraPointers[0], nullptr, (int *) extraPointers[7], 1, sizeof(float), 0);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[11]);
if (verbose && launchDims.x == 1)
printf("AF4 opNum:[%i], xLength: [%i]\n", opNum, shape::length(hostXShapeInfo));
hipLaunchKernelGGL(( pairWiseTransformStridedFloat), dim3(launchDims.x),dim3(launchDims.y), launchDims.z, *stream,
opNum,
n,
xPointer,
yPointer,
xStride,
yStride,
extraParamsPointer,
resultPointer,
resultStride, allocationPointer, deviceTADShapeInfo);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::execPairwiseTransformHalf(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
int xStride,
Nd4jPointer y,
int yStride,
Nd4jPointer result,
int resultStride,
Nd4jPointer extraParams, Nd4jIndex n){
nd4j::float16 *xPointer = reinterpret_cast<nd4j::float16 *>(dx);
nd4j::float16 *yPointer = reinterpret_cast<nd4j::float16 *>(y);
nd4j::float16 *resultPointer = reinterpret_cast<nd4j::float16 *>(result);
nd4j::float16 *extraParamsPointer = reinterpret_cast<nd4j::float16 *>(extraParams);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
if (debug && verbose)
printf("H4 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[11], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
//dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), (int *) extraPointers[0], nullptr, (int *) extraPointers[7], 1, sizeof(float), 0);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[11]);
if (verbose && launchDims.x == 1)
printf("AH4 opNum:[%i], xLength: [%i]\n", opNum, shape::length(hostXShapeInfo));
hipLaunchKernelGGL(( pairWiseTransformStridedHalf), dim3(launchDims.x),dim3(launchDims.y), launchDims.z, *stream,
opNum,
n,
xPointer,
yPointer,
xStride,
yStride,
extraParamsPointer,
resultPointer,
resultStride, allocationPointer, deviceTADShapeInfo);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
* @param xIndexes
* @param yIndexes
* @param resultIndexes
*/
void NativeOps::execPairwiseTransformFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
Nd4jPointer xShapeInfo,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer xIndexes,
Nd4jPointer yIndexes,
Nd4jPointer resultIndexes){
float *xPointer = reinterpret_cast<float *>(dx);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *yPointer = reinterpret_cast<float *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
int *xIndexesPointer = reinterpret_cast<int *>(xIndexes);
int *yIndexesPointer = reinterpret_cast<int *>(yIndexes);
int *resultIndexesPointer = reinterpret_cast<int *>(resultIndexes);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
if (debug && verbose)
printf("F5 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[10], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[10], 1, sizeof(float), 0);
if (verbose && launchDims.x == 1)
printf("AF5 opNum:[%i]\n", opNum);
hipLaunchKernelGGL(( pairWiseTransformFloatIndex), dim3(launchDims.x),dim3(launchDims.y), launchDims.z, *stream,
opNum,
xPointer,
yPointer,
extraParamsPointer,
resultPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
yShapeInfoPointer, shape::rank(hostYShapeInfo),
resultShapeInfoPointer, shape::rank(hostZShapeInfo),
xIndexesPointer,
yIndexesPointer,
resultIndexesPointer, allocationPointer, deviceTADShapeInfo);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::execPairwiseTransformHalf(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
Nd4jPointer xShapeInfo,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer xIndexes,
Nd4jPointer yIndexes,
Nd4jPointer resultIndexes){
nd4j::float16 *xPointer = reinterpret_cast<nd4j::float16 *>(dx);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
nd4j::float16 *yPointer = reinterpret_cast<nd4j::float16 *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
nd4j::float16 *resultPointer = reinterpret_cast<nd4j::float16 *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
nd4j::float16 *extraParamsPointer = reinterpret_cast<nd4j::float16 *>(extraParams);
int *xIndexesPointer = reinterpret_cast<int *>(xIndexes);
int *yIndexesPointer = reinterpret_cast<int *>(yIndexes);
int *resultIndexesPointer = reinterpret_cast<int *>(resultIndexes);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
if (debug && verbose)
printf("H5 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[10], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[10], 1, sizeof(nd4j::float16), 0);
if (verbose && launchDims.x == 1)
printf("AH5 opNum:[%i]\n", opNum);
hipLaunchKernelGGL(( pairWiseTransformHalfIndex), dim3(launchDims.x),dim3(launchDims.y), launchDims.z, *stream,
opNum,
xPointer,
yPointer,
extraParamsPointer,
resultPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
yShapeInfoPointer, shape::rank(hostYShapeInfo),
resultShapeInfoPointer, shape::rank(hostZShapeInfo),
xIndexesPointer,
yIndexesPointer,
resultIndexesPointer, allocationPointer, deviceTADShapeInfo);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execPairwiseTransformFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
Nd4jPointer xShapeInfo,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer extraParams){
float *xPointer = reinterpret_cast<float *>(dx);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *yPointer = reinterpret_cast<float *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
if (debug && verbose)
printf("F6 opNum:[%i]\n", opNum);
// dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[9], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
//dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), (int *) extraPointers[0], resultShapeInfoPointer, yShapeInfoPointer, 1, sizeof(float), 0);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[9]);
if (verbose && launchDims.x == 1) {
printf("AF6 opNum:[%i], launchDims.x: [%i], launchDims.y: [%i]\n", opNum, launchDims.x, launchDims.y);
shape::printShapeInfoLinear(hostXShapeInfo);
}
hipLaunchKernelGGL(( pairWiseTransformFloat), dim3(launchDims.x),dim3(launchDims.y), launchDims.z, *stream,
opNum,
xPointer,
yPointer,
extraParamsPointer,
resultPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
yShapeInfoPointer, shape::rank(hostYShapeInfo),
resultShapeInfoPointer, shape::rank(hostZShapeInfo), allocationPointer, deviceTADShapeInfo);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::execPairwiseTransformHalf(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
Nd4jPointer xShapeInfo,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer extraParams){
nd4j::float16 *xPointer = reinterpret_cast<nd4j::float16 *>(dx);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
nd4j::float16 *yPointer = reinterpret_cast<nd4j::float16 *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
nd4j::float16 *resultPointer = reinterpret_cast<nd4j::float16 *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
nd4j::float16 *extraParamsPointer = reinterpret_cast<nd4j::float16 *>(extraParams);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
if (debug && verbose)
printf("H6 opNum:[%i]\n", opNum);
// dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[9], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
//dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), (int *) extraPointers[0], resultShapeInfoPointer, yShapeInfoPointer, 1, sizeof(float), 0);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[9]);
if (verbose && launchDims.x == 1) {
printf("HF6 opNum:[%i], launchDims.x: [%i], launchDims.y: [%i]\n", opNum, launchDims.x, launchDims.y);
shape::printShapeInfoLinear(hostXShapeInfo);
}
hipLaunchKernelGGL(( pairWiseTransformHalf), dim3(launchDims.x),dim3(launchDims.y), launchDims.z, *stream,
opNum,
xPointer,
yPointer,
extraParamsPointer,
resultPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
yShapeInfoPointer, shape::rank(hostYShapeInfo),
resultShapeInfoPointer, shape::rank(hostZShapeInfo), allocationPointer, deviceTADShapeInfo);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduceFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfo) {
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
if (debug && verbose)
printf("F7 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[8], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[8], 1, sizeof(float), 1);
if (verbose && launchDims.x == 1)
printf("AF7 opNum:[%i]\n", opNum);
hipLaunchKernelGGL(( reduceScalarFloat), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
nullptr,
1,
reductionPointer, deviceTADShapeInfo);
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::execReduceHalf(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfo) {
nd4j::float16 *xPointer = reinterpret_cast<nd4j::float16 *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
nd4j::float16 *resultPointer = reinterpret_cast<nd4j::float16 *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
nd4j::float16 *extraParamsPointer = reinterpret_cast<nd4j::float16 *>(extraParams);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
if (debug && verbose)
printf("H7 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[8], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
nd4j::float16 *reductionPointer = reinterpret_cast<nd4j::float16 *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[8], 1, sizeof(nd4j::float16), 1);
if (verbose && launchDims.x == 1)
printf("AH7 opNum:[%i]\n", opNum);
hipLaunchKernelGGL(( reduceScalarHalf), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
nullptr,
1,
reductionPointer, deviceTADShapeInfo);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduceFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer dimension,int dimensionLength){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
if (debug && verbose)
printf("F8 opNum:[%i]\n", opNum);
// dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[8], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
// DO NOT REMOVE COMMENTS OR CODE BELOW.
// [email protected]
// shape::TAD *tad = new shape::TAD();
// tad->init(xShapeInfoPointer, dimensionPointer, dimensionLength);
// tad->setOutputBuffer(allocPointer);
// tad->createTadOnlyShapeInfo();
// shape::printShapeInfo(tad->tadOnlyShapeInfo);
// dim3 getBetterDimensions(int deviceId, int numTads, int tadLength, int xRank, int yRank, int zRank, int dimensionLength, int elementSize, int reduction)
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[8], dimensionLength, sizeof(float), 1);
if (verbose && launchDims.x == 1)
printf("AF8 opNum:[%i]\n", opNum);
if (dimensionLength == 1) {
hipLaunchKernelGGL(( reduceFloat1D), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
} else if (shape::rank(hostTADShapeInfo) <= 3) {
hipLaunchKernelGGL(( reduceFloat6D), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
} else {
hipLaunchKernelGGL(( reduceFloat), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
}
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
//delete tad;
}
void NativeOps::execReduceHalf(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer dimension,int dimensionLength){
nd4j::float16 *xPointer = reinterpret_cast<nd4j::float16 *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
nd4j::float16 *resultPointer = reinterpret_cast<nd4j::float16 *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
nd4j::float16 *extraParamsPointer = reinterpret_cast<nd4j::float16 *>(extraParams);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
if (debug && verbose)
printf("H8 opNum:[%i]\n", opNum);
// dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[8], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
nd4j::float16 *reductionPointer = reinterpret_cast<nd4j::float16 *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[8], dimensionLength, sizeof(nd4j::float16), 1);
if (verbose && launchDims.x == 1)
printf("AH8 opNum:[%i]\n", opNum);
if (dimensionLength == 1) {
hipLaunchKernelGGL(( reduceHalf1D), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
} else if (shape::rank(hostTADShapeInfo) <= 3) {
hipLaunchKernelGGL(( reduceHalf6D), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
} else {
hipLaunchKernelGGL(( reduceHalf), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
}
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
//delete tad;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @return
*/
float NativeOps::execReduceScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
if (debug && verbose)
printf("F9 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[8], deviceProperties[getDeviceId(extraPointers[2])]);
float *resultPointer = reinterpret_cast<float *>(extraPointers[5]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
//dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[8], 1, sizeof(float), 1);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 8, funcAttributes[8]);
if (verbose && launchDims.x == 1)
printf("AF9 opNum:[%i]\n", opNum);
hipLaunchKernelGGL(( reduceScalarFloat), dim3(launchDims.x),dim3(launchDims.y), launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
nullptr,
nullptr,
1,
reductionPointer, deviceTADShapeInfo
);
checkCudaErrors(hipStreamSynchronize(*stream));
float result = resultPointer[0];
return result;
}
float NativeOps::execReduceScalarHalf(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams){
nd4j::float16 *xPointer = reinterpret_cast<nd4j::float16 *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
nd4j::float16 *extraParamsPointer = reinterpret_cast<nd4j::float16 *>(extraParams);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
if (debug && verbose)
printf("H9 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[8], deviceProperties[getDeviceId(extraPointers[2])]);
nd4j::float16 *resultPointer = reinterpret_cast<nd4j::float16 *>(extraPointers[5]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
nd4j::float16 *reductionPointer = reinterpret_cast<nd4j::float16 *>(extraPointers[4]);
//dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[8], 1, sizeof(float), 1);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 2, funcAttributes[8]);
if (verbose && launchDims.x == 1)
printf("AH9 opNum:[%i]\n", opNum);
hipLaunchKernelGGL(( reduceScalarHalf), dim3(launchDims.x),dim3(launchDims.y), launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
nullptr,
nullptr,
1,
reductionPointer, deviceTADShapeInfo
);
checkCudaErrors(hipStreamSynchronize(*stream));
float result = (float) resultPointer[0];
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduce3Float(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParamsVals,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *yPointer = reinterpret_cast<float *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParamsVals);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
if (debug && verbose)
printf("F10 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[7], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
//dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), (int *) extraPointers[0], yShapeInfoPointer, resultShapeInfoPointer, 1, sizeof(float), 2);
//dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), (int *) extraPointers[0], yShapeInfoPointer);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[7]);
if (verbose && launchDims.x == 1)
printf("AF10 opNum:[%i]\n", opNum);
hipLaunchKernelGGL(( reduce3ScalarFloat), dim3(1),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer,
yPointer,
yShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
nullptr,
1,
1, allocationPointer, deviceTADShapeInfo, deviceTADOffsets);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::execReduce3Half(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParamsVals,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo){
nd4j::float16 *xPointer = reinterpret_cast<nd4j::float16 *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
nd4j::float16 *yPointer = reinterpret_cast<nd4j::float16 *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
nd4j::float16 *resultPointer = reinterpret_cast<nd4j::float16 *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
nd4j::float16 *extraParamsPointer = reinterpret_cast<nd4j::float16 *>(extraParamsVals);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
if (debug && verbose)
printf("H10 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[7], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
//dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), (int *) extraPointers[0], yShapeInfoPointer, resultShapeInfoPointer, 1, sizeof(float), 2);
//dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), (int *) extraPointers[0], yShapeInfoPointer);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 8, funcAttributes[7]);
if (verbose && launchDims.x == 1)
printf("AH10 opNum:[%i]\n", opNum);
hipLaunchKernelGGL(( reduce3ScalarHalf), dim3(1),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer,
yPointer,
yShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
nullptr,
1,
1, allocationPointer, deviceTADShapeInfo, deviceTADOffsets);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
*/
float NativeOps::execReduce3ScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParamsVals,
Nd4jPointer y,
Nd4jPointer yShapeInfo) {
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *yPointer = reinterpret_cast<float *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParamsVals);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
if (debug && verbose)
printf("F11 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[7], deviceProperties[getDeviceId(extraPointers[2])]);
float *resultPointer = reinterpret_cast<float *>(extraPointers[5]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
//dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), (int *) extraPointers[0], yShapeInfoPointer, nullptr, 1, sizeof(float), 2);
//dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), (int *) extraPointers[0], yShapeInfoPointer);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 32, funcAttributes[7]);
if (verbose && launchDims.x == 1)
printf("AF11 opNum:[%i]\n", opNum);
hipLaunchKernelGGL(( reduce3ScalarFloat), dim3(1),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer,
yPointer,
yShapeInfoPointer,
extraParamsPointer,
resultPointer,
nullptr,
nullptr,
1,
1, allocationPointer, deviceTADShapeInfo, deviceTADOffsets);
checkCudaErrors(hipStreamSynchronize(*stream));
float result = resultPointer[0];
return result;
}
float NativeOps::execReduce3ScalarHalf(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParamsVals,
Nd4jPointer y,
Nd4jPointer yShapeInfo) {
nd4j::float16 *xPointer = reinterpret_cast<nd4j::float16 *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
nd4j::float16 *yPointer = reinterpret_cast<nd4j::float16 *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
nd4j::float16 *extraParamsPointer = reinterpret_cast<nd4j::float16 *>(extraParamsVals);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
if (debug && verbose)
printf("H11 opNum:[%i]\n", opNum);
nd4j::float16 *resultPointer = reinterpret_cast<nd4j::float16 *>(extraPointers[5]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[7]);
if (verbose && launchDims.x == 1)
printf("AH11 opNum:[%i]\n", opNum);
hipLaunchKernelGGL(( reduce3ScalarHalf), dim3(1),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer,
yPointer,
yShapeInfoPointer,
extraParamsPointer,
resultPointer,
nullptr,
nullptr,
1,
1, allocationPointer, deviceTADShapeInfo, deviceTADOffsets);
checkCudaErrors(hipStreamSynchronize(*stream));
float result = (float) resultPointer[0];
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfoBuffer
* @param dimension
* @param dimensionLength
*/
void NativeOps::execReduce3Float(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParamsVals,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfoBuffer,
Nd4jPointer dimension,
int dimensionLength){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *yPointer = reinterpret_cast<float *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfoBuffer);
float *extraParamsPointer = reinterpret_cast<float *>(extraParamsVals);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[7], deviceProperties[getDeviceId(extraPointers[2])]);
if (debug && verbose)
printf("F12 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
//dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), (int *) extraPointers[0], yShapeInfoPointer, resultShapeInfoPointer, dimensionLength, sizeof(float), 2);
//dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), (int *) extraPointers[0], yShapeInfoPointer);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[7]);
if (verbose && launchDims.x == 1)
printf("AF12 opNum:[%i]\n", opNum);
if (shape::isScalar(hostZShapeInfo) || dimensionPointer == nullptr) {
reduce3ScalarFloat << < 1, launchDims.y, launchDims.z, *stream >> > (
opNum,
xPointer,
xShapeInfoPointer,
yPointer,
yShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
1, allocationPointer, deviceTADShapeInfo, deviceTADOffsets);
} else {
reduce3Float << < 1, launchDims.y, launchDims.z, *stream >> > (
opNum,
xPointer,
xShapeInfoPointer,
yPointer,
yShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
1, allocationPointer, deviceTADShapeInfo, deviceTADOffsets);
}
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::execReduce3Half(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParamsVals,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfoBuffer,
Nd4jPointer dimension,
int dimensionLength){
nd4j::float16 *xPointer = reinterpret_cast<nd4j::float16 *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
nd4j::float16 *yPointer = reinterpret_cast<nd4j::float16 *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
nd4j::float16 *resultPointer = reinterpret_cast<nd4j::float16 *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfoBuffer);
nd4j::float16 *extraParamsPointer = reinterpret_cast<nd4j::float16 *>(extraParamsVals);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[7], deviceProperties[getDeviceId(extraPointers[2])]);
if (debug && verbose)
printf("H12 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 8, funcAttributes[7]);
if (verbose && launchDims.x == 1)
printf("AH12 opNum:[%i]\n", opNum);
if (shape::isScalar(hostZShapeInfo) || dimensionPointer == nullptr) {
reduce3ScalarHalf<< < 1, launchDims.y, launchDims.z, *stream >> > (
opNum,
xPointer,
xShapeInfoPointer,
yPointer,
yShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
1, allocationPointer, deviceTADShapeInfo, deviceTADOffsets);
} else {
reduce3Half<< < 1, launchDims.y, launchDims.z, *stream >> > (
opNum,
xPointer,
xShapeInfoPointer,
yPointer,
yShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
1, allocationPointer, deviceTADShapeInfo, deviceTADOffsets);
}
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xStride
* @param result
* @param resultStride
* @param scalar
* @param extraParams
* @param n
*/
void NativeOps::execScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
int xStride,
Nd4jPointer result,
int resultStride,
double scalar,
Nd4jPointer extraParams,
Nd4jIndex n){
float *xPointer = reinterpret_cast<float *>(x);
float *resultPointer = reinterpret_cast<float *>(result);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
if (debug && verbose)
printf("F13 opNum:[%i]\n", opNum);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[6]);
if (verbose && launchDims.x == 1)
printf("AF13 opNum:[%i]\n", opNum);
hipLaunchKernelGGL(( scalarFloat), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
n,
scalar,
xPointer,
xStride,
extraParamsPointer,
resultPointer,resultStride, allocPointer);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param scalar
* @param extraParams
* @param n
*/
void NativeOps::execScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
float scalar,
Nd4jPointer extraParams){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
Nd4jIndex n = shape::length(hostXShapeInfo);
if (debug && verbose)
printf("F14 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[5], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[5]);
if (verbose && launchDims.x == 1)
printf("AF14 opNum:[%i], xLength:[%i]\n", opNum, shape::length(hostXShapeInfo));
hipLaunchKernelGGL(( scalarFloat), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream,
opNum,
scalar,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
extraParamsPointer,
resultPointer,resultShapeInfoPointer, shape::rank(hostZShapeInfo), allocPointer );
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::execScalarHalf(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
float scalar,
Nd4jPointer extraParams){
nd4j::float16 *xPointer = reinterpret_cast<nd4j::float16 *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
nd4j::float16 *resultPointer = reinterpret_cast<nd4j::float16 *>(result);
nd4j::float16 *extraParamsPointer = reinterpret_cast<nd4j::float16 *>(extraParams);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
Nd4jIndex n = shape::length(hostXShapeInfo);
if (debug && verbose)
printf("H14 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[5], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[5]);
if (verbose && launchDims.x == 1)
printf("AH14 opNum:[%i], xLength:[%i]\n", opNum, shape::length(hostXShapeInfo));
hipLaunchKernelGGL(( scalarHalf), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream,
opNum,
scalar,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
extraParamsPointer,
resultPointer,resultShapeInfoPointer, shape::rank(hostZShapeInfo), allocPointer );
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param scalar
* @param extraParams
* @param n
* @param xIndexes
* @param resultIndexes
*/
void NativeOps::execScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
double scalar,
Nd4jPointer extraParams,
Nd4jPointer xIndexes,
Nd4jPointer resultIndexes){
float *xPointer = reinterpret_cast<float *>(x);
float *resultPointer = reinterpret_cast<float *>(result);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
int *resultIndexesPointer = reinterpret_cast<int *>(resultIndexes);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
Nd4jIndex n = shape::length(hostXShapeInfo);
if (debug && verbose)
printf("F15 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[4], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[4]);
if (verbose && launchDims.x == 1)
printf("AF15 opNum:[%i]\n", opNum);
hipLaunchKernelGGL(( scalarFloatIndexes), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
n,
scalar,
xPointer,
extraParamsPointer,
resultPointer,
resultIndexesPointer, allocPointer);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
*/
float NativeOps::execSummaryStatsScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,bool biasCorrected){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
if (debug && verbose)
printf("F16 opNum:[%i]\n", opNum);
// dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[3], deviceProperties[getDeviceId(extraPointers[2])]);
float *resultPointer = reinterpret_cast<float *>(extraPointers[5]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[3], 1, sizeof(float), 8);
if (verbose && launchDims.x == 1)
printf("AF16 opNum:[%i]\n", opNum);
hipLaunchKernelGGL(( summaryStatsReduceFloat), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
extraParamsPointer,
resultPointer,
nullptr, 0,
nullptr,
1,
1,biasCorrected, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
checkCudaErrors(hipStreamSynchronize(*stream));
float result = resultPointer[0];
return result;
}
float NativeOps::execSummaryStatsScalarHalf(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,bool biasCorrected){
nd4j::float16 *xPointer = reinterpret_cast<nd4j::float16 *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
nd4j::float16 *extraParamsPointer = reinterpret_cast<nd4j::float16 *>(extraParams);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
if (debug && verbose)
printf("H16 opNum:[%i]\n", opNum);
// dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[3], deviceProperties[getDeviceId(extraPointers[2])]);
nd4j::float16 *resultPointer = reinterpret_cast<nd4j::float16 *>(extraPointers[5]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
nd4j::float16 *reductionPointer = reinterpret_cast<nd4j::float16 *>(extraPointers[4]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[3], 1, sizeof(nd4j::float16), 8);
if (verbose && launchDims.x == 1)
printf("AH16 opNum:[%i]\n", opNum);
hipLaunchKernelGGL(( summaryStatsReduceHalf), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
extraParamsPointer,
resultPointer,
nullptr, 0,
nullptr,
1,
1,biasCorrected, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
checkCudaErrors(hipStreamSynchronize(*stream));
float result = (float) resultPointer[0];
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execSummaryStatsFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,bool biasCorrected){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
if (debug && verbose)
printf("F17 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[3], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[3], 1, sizeof(float), 8);
if (verbose && launchDims.x == 1)
printf("AF17 opNum:[%i]\n", opNum);
hipLaunchKernelGGL(( summaryStatsReduceFloat), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
extraParamsPointer,
resultPointer,
resultShapeInfoPointer, shape::rank(hostZShapeInfo),
nullptr,
1,
1,biasCorrected, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::execSummaryStatsHalf(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,bool biasCorrected){
nd4j::float16 *xPointer = reinterpret_cast<nd4j::float16 *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
nd4j::float16 *resultPointer = reinterpret_cast<nd4j::float16 *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
nd4j::float16 *extraParamsPointer = reinterpret_cast<nd4j::float16 *>(extraParams);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
if (debug && verbose)
printf("H17 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[3], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
nd4j::float16 *reductionPointer = reinterpret_cast<nd4j::float16 *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[3], 1, sizeof(nd4j::float16), 8);
if (verbose && launchDims.x == 1)
printf("AH17 opNum:[%i]\n", opNum);
hipLaunchKernelGGL(( summaryStatsReduceHalf), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
extraParamsPointer,
resultPointer,
resultShapeInfoPointer, shape::rank(hostZShapeInfo),
nullptr,
1,
1,biasCorrected, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfoBuffer
* @param dimension
* @param dimensionLength
*/
void NativeOps::execSummaryStatsFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfoBuffer,
Nd4jPointer dimension,
int dimensionLength,bool biasCorrected){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfoBuffer);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
if (debug && verbose)
printf("F18 opNum:[%i]\n", opNum);
// dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[3], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[3], dimensionLength, sizeof(float), 8);
if (verbose && launchDims.x == 1)
printf("AF18 opNum:[%i]\n", opNum);
hipLaunchKernelGGL(( summaryStatsReduceFloat), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
extraParamsPointer,
resultPointer,
resultShapeInfoPointer, shape::rank(hostZShapeInfo),
dimensionPointer,
dimensionLength,
1,biasCorrected, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::execSummaryStatsHalf(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfoBuffer,
Nd4jPointer dimension,
int dimensionLength,bool biasCorrected){
nd4j::float16 *xPointer = reinterpret_cast<nd4j::float16 *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
nd4j::float16 *resultPointer = reinterpret_cast<nd4j::float16 *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfoBuffer);
nd4j::float16 *extraParamsPointer = reinterpret_cast<nd4j::float16 *>(extraParams);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
if (debug && verbose)
printf("H18 opNum:[%i]\n", opNum);
// dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[3], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
nd4j::float16 *reductionPointer = reinterpret_cast<nd4j::float16 *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[3], dimensionLength, sizeof(nd4j::float16), 8);
if (verbose && launchDims.x == 1)
printf("AH18 opNum:[%i]\n", opNum);
hipLaunchKernelGGL(( summaryStatsReduceHalf), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
extraParamsPointer,
resultPointer,
resultShapeInfoPointer, shape::rank(hostZShapeInfo),
dimensionPointer,
dimensionLength,
1,biasCorrected, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xStride
* @param result
* @param resultStride
* @param extraParams
* @param n
*/
void NativeOps::execTransformFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
int xStride,
Nd4jPointer result,
int resultStride,
Nd4jPointer extraParams,
Nd4jIndex n) {
float *xPointer = reinterpret_cast<float *>(dx);
float *resultPointer = reinterpret_cast<float *>(result);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
if (debug && verbose)
printf("F19 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[2], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[2]);
if (verbose && launchDims.x == 1)
printf("AF19 opNum:[%i], xLength: [%i]\n", opNum, shape::length(hostXShapeInfo));
hipLaunchKernelGGL(( transformFloat), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
n,
xPointer,
xStride,
extraParamsPointer,
resultPointer,resultStride, allocPointer, reductionPointer);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::execTransformHalf(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
int xStride,
Nd4jPointer result,
int resultStride,
Nd4jPointer extraParams,
Nd4jIndex n) {
nd4j::float16 *xPointer = reinterpret_cast<nd4j::float16 *>(dx);
nd4j::float16 *resultPointer = reinterpret_cast<nd4j::float16 *>(result);
nd4j::float16 *extraParamsPointer = reinterpret_cast<nd4j::float16 *>(extraParams);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
if (debug && verbose)
printf("H19 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[2], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
nd4j::float16 *reductionPointer = reinterpret_cast<nd4j::float16 *>(extraPointers[4]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[2]);
if (verbose && launchDims.x == 1)
printf("AH19 opNum:[%i], xLength: [%i]\n", opNum, shape::length(hostXShapeInfo));
hipLaunchKernelGGL(( transformHalf), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
n,
xPointer,
xStride,
extraParamsPointer,
resultPointer,resultStride, allocPointer, reductionPointer);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execTransformFloat(Nd4jPointer *extraPointers,int opNum,
Nd4jPointer dx,
Nd4jPointer xShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer extraParams) {
float *xPointer = reinterpret_cast<float *>(dx);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
if (debug && verbose)
printf("F20 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[1], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
// special pointer for special buffer for special ops
float *specialPointer = reinterpret_cast<float *>(extraPointers[6]);
int *dimension = (int *) specialPointer;
int *maxDimension = dimension + 1;
int *maxShapeBuffer = (int *) maxDimension + 1;
float * special = (float *) maxShapeBuffer + (MAX_RANK * 2 + 4);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[1]);
if (verbose && launchDims.x == 1)
printf("AF20 opNum:[%i]\n", opNum);
// simple trick to get workaround over reductions into scalar
if (opNum >= 38 && opNum <= 41) {
if (shape::isVector(hostXShapeInfo) && opNum != 41) {
// if that's vector, we just go directly to op in 1 block
int length = shape::length(hostXShapeInfo);
int block = nd4j::math::nd4j_min<int>(length, 256);
hipLaunchKernelGGL(( transformFloat) , dim3(1), dim3(block), launchDims.z + (block * sizeof(float) * 4), *stream >> > (
opNum,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
extraParamsPointer,
resultPointer, resultShapeInfoPointer, shape::rank(hostZShapeInfo), allocPointer, reductionPointer);
} else {
// going for blockwise specials
//float *xpf = reinterpret_cast<float *>(dx);
int *shape = shape::shapeOf(hostXShapeInfo);
switch (opNum) {
case 40: // LogSoftMax
case 39: // SoftMax Derivative
case 38: {// softmax
Nd4jPointer tempPointers[16];
tempPointers[0] = extraPointers[0];
tempPointers[1] = extraPointers[1];
tempPointers[2] = extraPointers[2];
tempPointers[3] = extraPointers[3];
tempPointers[4] = extraPointers[4];
tempPointers[5] = extraPointers[5];
tempPointers[6] = extraPointers[6];
tempPointers[7] = extraPointers[7];
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
tempPointers[12] = extraPointers[12];
tempPointers[13] = extraPointers[13];
tempPointers[14] = extraPointers[14];
tempPointers[15] = extraPointers[15];
int maxShape[2] = {shape::shapeOf(hostXShapeInfo)[0], 1};
int *hostMaxShapeBuffer = shape::shapeBuffer(2, maxShape);
tempPointers[7] = (Nd4jPointer) hostMaxShapeBuffer;
tempPointers[8] = (Nd4jPointer) hostMaxShapeBuffer;
prepareShapeBuffer , 1, 1, 128, *stream , dimension, maxDimension, maxShapeBuffer, shape[0]);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
//shape::printShapeInfo(maxShapeBuffer);
tempPointers[9] = extraPointers[12];
tempPointers[10] = extraPointers[13];
tempPointers[11] = extraPointers[14];
// max 3
execReduceFloat(tempPointers, 3, dx, xShapeInfo, extraParams, (Nd4jPointer) special,
(Nd4jPointer) maxShapeBuffer, (Nd4jPointer) maxDimension, 1);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
// sub 1
execBroadcastFloat(tempPointers, 1, dx, xShapeInfo, (Nd4jPointer) special,
(Nd4jPointer) maxShapeBuffer, dx, xShapeInfo, (Nd4jPointer) dimension, 1);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
// exp 3
execTransformFloat(extraPointers, 3, dx, xShapeInfo, dx, xShapeInfo, extraParams);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
tempPointers[8] = tempPointers[7];
tempPointers[9] = extraPointers[12];
tempPointers[10] = extraPointers[13];
tempPointers[11] = extraPointers[14];
//sum 1
execReduceFloat(tempPointers, 1, dx, xShapeInfo, extraParams, (Nd4jPointer) special,
(Nd4jPointer) maxShapeBuffer, (Nd4jPointer) maxDimension, 1);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
// divide 3
execBroadcastFloat(tempPointers, 3, dx, xShapeInfo, (Nd4jPointer) special,
(Nd4jPointer) maxShapeBuffer, dx, xShapeInfo, (Nd4jPointer) dimension, 1);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
// log 3
if (opNum == 40)
execTransformFloat(extraPointers, 5, dx, xShapeInfo, dx, xShapeInfo, extraParams);
else if (opNum == 39)
execTransformFloat(extraPointers, 42, dx, xShapeInfo, dx, xShapeInfo, extraParams);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
delete hostMaxShapeBuffer;
break;
}
case 41: {
// IsMax along all dimensions
int *dimensionHostPointer = reinterpret_cast<int *> (extraPointers[16]);
bool scalarCheat = false;
if (extraParamsPointer == nullptr) {
scalarCheat = true;
} else {
/* //extraParamsPointer == nullptr || (shape::isVector(hostXShapeInfo))
if (shape::isVector(hostXShapeInfo) && dimensionHostPointer[0] == 1) {
scalarCheat = true;
}*/
}
if (scalarCheat) {
//printf("Going for scalar IsMax\n");
int maxIdx = (int) execIndexReduceScalarFloat(extraPointers, 0, dx, xShapeInfo, extraParams);
int targetIdx = 0;
if (shape::order(hostXShapeInfo) == 'c' || shape::order(hostXShapeInfo) == 'f' && maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1] >= shape::length(hostXShapeInfo))
targetIdx = maxIdx;
else
targetIdx = maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1];
hipLaunchKernelGGL(( fillIsMaxFloat), dim3(1), dim3(128), 1536, *stream , resultPointer, shape::length(hostXShapeInfo), targetIdx);
} else {
// going for dimension-based IsMax
//printf("Going for dimension-based IsMax\n");
int *tadMaxShapeInfo = reinterpret_cast<int *> (extraPointers[10]);
int *tadMaxOffsets = reinterpret_cast<int *> (extraPointers[11]);
int *dimensionPointer = reinterpret_cast<int *> (extraPointers[15]);
// we call for IMax on specified dimension
execIndexReduceFloat(extraPointers, 0, dx, xShapeInfo, extraParams, (Nd4jPointer) special, (Nd4jPointer) hostYShapeInfo, (Nd4jPointer) dimensionPointer, 1);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
// at this point, all IMax indexes are gathered, and we execute
hipLaunchKernelGGL(( fillDimensionalIsMaxFloat), dim3(768), dim3(16), funcAttributes[36].sharedSizeBytes, *stream, special, hostYShapeInfo, resultPointer, resultShapeInfoPointer, tadMaxShapeInfo, dimensionPointer, 1, tadMaxOffsets );
checkCudaErrors(hipStreamSynchronize(*stream));
}
break;
}
default: {
printf("Bad case for transformFloat\n");
break;
}
}
}
} else {
hipLaunchKernelGGL(( transformFloat) , dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
extraParamsPointer,
resultPointer, resultShapeInfoPointer, shape::rank(hostZShapeInfo), allocPointer, reductionPointer);
}
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::execTransformHalf(Nd4jPointer *extraPointers,int opNum,
Nd4jPointer dx,
Nd4jPointer xShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer extraParams) {
nd4j::float16 *xPointer = reinterpret_cast<nd4j::float16 *>(dx);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
nd4j::float16 *resultPointer = reinterpret_cast<nd4j::float16 *>(result);
nd4j::float16 *extraParamsPointer = reinterpret_cast<nd4j::float16 *>(extraParams);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
if (debug && verbose)
printf("H20 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[1], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
nd4j::float16 *reductionPointer = reinterpret_cast<nd4j::float16 *>(extraPointers[4]);
// special pointer for special buffer for special ops
nd4j::float16 *specialPointer = reinterpret_cast<nd4j::float16 *>(extraPointers[6]);
int *dimension = (int *) specialPointer;
int *maxDimension = dimension + 1;
int *maxShapeBuffer = (int *) maxDimension + 1;
nd4j::float16 * special = (nd4j::float16 *) maxShapeBuffer + (MAX_RANK * 2 + 4);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[1]);
if (verbose && launchDims.x == 1)
printf("AH20 opNum:[%i]\n", opNum);
// simple trick to get workaround over reductions into scalar
if (opNum >= 38 && opNum <= 41) {
if (shape::isVector(hostXShapeInfo) && opNum != 41) {
// if that's vector, we just go directly to op in 1 block
int length = shape::length(hostXShapeInfo);
int block = nd4j::math::nd4j_min<int>(length, 256);
hipLaunchKernelGGL(( transformHalf), dim3(1), dim3(block), launchDims.z + (block * sizeof(float) * 4), *stream >> > (
opNum,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
extraParamsPointer,
resultPointer, resultShapeInfoPointer, shape::rank(hostZShapeInfo), allocPointer, reductionPointer);
} else {
// going for blockwise specials
//float *xpf = reinterpret_cast<float *>(dx);
int *shape = shape::shapeOf(hostXShapeInfo);
switch (opNum) {
case 40: // LogSoftMax
case 39: // SoftMax Derivative
case 38: {// softmax
Nd4jPointer tempPointers[16];
tempPointers[0] = extraPointers[0];
tempPointers[1] = extraPointers[1];
tempPointers[2] = extraPointers[2];
tempPointers[3] = extraPointers[3];
tempPointers[4] = extraPointers[4];
tempPointers[5] = extraPointers[5];
tempPointers[6] = extraPointers[6];
tempPointers[7] = extraPointers[7];
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
tempPointers[12] = extraPointers[12];
tempPointers[13] = extraPointers[13];
tempPointers[14] = extraPointers[14];
tempPointers[15] = extraPointers[15];
int maxShape[2] = {shape::shapeOf(hostXShapeInfo)[0], 1};
int *hostMaxShapeBuffer = shape::shapeBuffer(2, maxShape);
tempPointers[7] = (Nd4jPointer) hostMaxShapeBuffer;
tempPointers[8] = (Nd4jPointer) hostMaxShapeBuffer;
prepareShapeBuffer , 1, 1, 128, *stream , dimension, maxDimension, maxShapeBuffer, shape[0]);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
//shape::printShapeInfo(maxShapeBuffer);
tempPointers[9] = extraPointers[12];
tempPointers[10] = extraPointers[13];
tempPointers[11] = extraPointers[14];
// max 3
execReduceHalf(tempPointers, 3, dx, xShapeInfo, extraParams, (Nd4jPointer) special,
(Nd4jPointer) maxShapeBuffer, (Nd4jPointer) maxDimension, 1);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
// sub 1
execBroadcastHalf(tempPointers, 1, dx, xShapeInfo, (Nd4jPointer) special,
(Nd4jPointer) maxShapeBuffer, dx, xShapeInfo, (Nd4jPointer) dimension, 1);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
// exp 3
execTransformHalf(extraPointers, 3, dx, xShapeInfo, dx, xShapeInfo, extraParams);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
tempPointers[8] = tempPointers[7];
tempPointers[9] = extraPointers[12];
tempPointers[10] = extraPointers[13];
tempPointers[11] = extraPointers[14];
//sum 1
execReduceHalf(tempPointers, 1, dx, xShapeInfo, extraParams, (Nd4jPointer) special,
(Nd4jPointer) maxShapeBuffer, (Nd4jPointer) maxDimension, 1);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
// divide 3
execBroadcastHalf(tempPointers, 3, dx, xShapeInfo, (Nd4jPointer) special,
(Nd4jPointer) maxShapeBuffer, dx, xShapeInfo, (Nd4jPointer) dimension, 1);
if (opNum == 40) {
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
execTransformHalf(tempPointers, 47, dx, xShapeInfo, dx, xShapeInfo, extraParams);
}
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
// log 3
if (opNum == 40)
execTransformHalf(extraPointers, 5, dx, xShapeInfo, dx, xShapeInfo, extraParams);
else if (opNum == 39)
execTransformHalf(extraPointers, 42, dx, xShapeInfo, dx, xShapeInfo, extraParams);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
delete hostMaxShapeBuffer;
break;
}
case 41: {
// IsMax along all dimensions
int *dimensionHostPointer = reinterpret_cast<int *> (extraPointers[16]);
bool scalarCheat = false;
if (extraParamsPointer == nullptr) {
scalarCheat = true;
} else {
/* //extraParamsPointer == nullptr || (shape::isVector(hostXShapeInfo))
if (shape::isVector(hostXShapeInfo) && dimensionHostPointer[0] == 1) {
scalarCheat = true;
}*/
}
if (scalarCheat) {
//printf("Going for scalar IsMax\n");
int maxIdx = (int) execIndexReduceScalarHalf(extraPointers, 0, dx, xShapeInfo, extraParams);
int targetIdx = 0;
if (shape::order(hostXShapeInfo) == 'c' || shape::order(hostXShapeInfo) == 'f' && maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1] >= shape::length(hostXShapeInfo))
targetIdx = maxIdx;
else
targetIdx = maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1];
hipLaunchKernelGGL(( fillIsMaxHalf), dim3(1), dim3(128), 1536, *stream , resultPointer, shape::length(hostXShapeInfo), targetIdx);
} else {
// going for dimension-based IsMax
//printf("Going for dimension-based IsMax\n");
int *tadMaxShapeInfo = reinterpret_cast<int *> (extraPointers[10]);
int *tadMaxOffsets = reinterpret_cast<int *> (extraPointers[11]);
int *dimensionPointer = reinterpret_cast<int *> (extraPointers[15]);
// we call for IMax on specified dimension
execIndexReduceHalf(extraPointers, 0, dx, xShapeInfo, extraParams, (Nd4jPointer) special, (Nd4jPointer) hostYShapeInfo, (Nd4jPointer) dimensionPointer, 1);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
// at this point, all IMax indexes are gathered, and we execute
hipLaunchKernelGGL(( fillDimensionalIsMaxHalf), dim3(128), dim3(64), funcAttributes[36].sharedSizeBytes, *stream, special, hostYShapeInfo, resultPointer, resultShapeInfoPointer, tadMaxShapeInfo, dimensionPointer, 1, tadMaxOffsets );
checkCudaErrors(hipStreamSynchronize(*stream));
}
break;
}
default: {
printf("Bad case for transformFloat\n");
break;
}
}
}
} else {
hipLaunchKernelGGL(( transformHalf), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
extraParamsPointer,
resultPointer, resultShapeInfoPointer, shape::rank(hostZShapeInfo), allocPointer, reductionPointer);
}
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execTransformFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
Nd4jPointer xShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer xIndexes,
Nd4jPointer resultIndexes) {
float *xPointer = reinterpret_cast<float *>(dx);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
int *resultIndexesPointer = reinterpret_cast<int *>(resultIndexes);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
if (debug && verbose)
printf("F21 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[0], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[0]);
if (verbose && launchDims.x == 1)
printf("AF21 opNum:[%i]\n", opNum);
hipLaunchKernelGGL(( transformFloatIndexes), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
extraParamsPointer,
resultPointer,
resultIndexesPointer, allocPointer, reductionPointer);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::execTransformHalf(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
Nd4jPointer xShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer xIndexes,
Nd4jPointer resultIndexes) {
nd4j::float16 *xPointer = reinterpret_cast<nd4j::float16 *>(dx);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
nd4j::float16 *resultPointer = reinterpret_cast<nd4j::float16 *>(result);
nd4j::float16 *extraParamsPointer = reinterpret_cast<nd4j::float16 *>(extraParams);
int *resultIndexesPointer = reinterpret_cast<int *>(resultIndexes);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
if (debug && verbose)
printf("H21 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[0], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
nd4j::float16 *reductionPointer = reinterpret_cast<nd4j::float16 *>(extraPointers[4]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[0]);
if (verbose && launchDims.x == 1)
printf("AH21 opNum:[%i]\n", opNum);
hipLaunchKernelGGL(( transformHalfIndexes), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
extraParamsPointer,
resultPointer,
resultIndexesPointer, allocPointer, reductionPointer);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
}
template <typename T>
__device__ void flattenKernelGeneric(int dOffset,
char order,
T *result,
int *resultShapeInfo,
T *input,
int *inputShapeInfo, int *allocationPointer) {
__shared__ UnifiedSharedMemory *manager;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
manager = new(shmem) UnifiedSharedMemory((int *) shmem);
manager->init(sizeof(UnifiedSharedMemory), 4, 4, sizeof(shape::TAD), 2);
}
__syncthreads();
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int *zShape = shape::shapeOf(resultShapeInfo);
int *zStride = shape::stride(resultShapeInfo);
int *yShape = shape::shapeOf(inputShapeInfo);
int *yStride = shape::stride(inputShapeInfo);
char yOrder = shape::order(inputShapeInfo);
int len = shape::length(inputShapeInfo);
int resultEWS = shape::elementWiseStride(resultShapeInfo);
int inputEWS = shape::elementWiseStride(inputShapeInfo);
if (yOrder == order) {
if (resultEWS >= 1 && inputEWS >= 1) {
for (int i = tid; i < len; i+= gridDim.x * blockDim.x) {
result[i * resultEWS + dOffset] = input[i * inputEWS];
}
} else {
int rank = shape::rank(inputShapeInfo);
/*
long allocSize = sizeof(int) * rank;
int *coord = shape::cuMalloc(allocationPointer, allocSize, manager);
*/
int coord[MAX_RANK];
if(order == 'f') {
for(int i = tid; i < len; i+= gridDim.x * blockDim.x) {
shape::ind2sub(rank,yShape,i,coord);
int offset = shape::getOffset(0,yShape,yStride,coord,rank);
result[i + dOffset] = input[offset];
}
}
else {
for(int i = tid; i < len; i+= gridDim.x * blockDim.x) {
shape::ind2subC(rank,yShape,i,coord);
int offset = shape::getOffset(0,yShape,yStride,coord,rank);
result[i + dOffset] = input[offset];
}
}
/*
if (rank > MAX_COORD && tid * allocSize > PREALLOC_SIZE - allocSize) {
free(coord);
}
*/
}
} else {
int rank = shape::rank(inputShapeInfo);
/*
long allocSize = sizeof(int) * rank;
int *coord = shape::cuMalloc(allocationPointer, allocSize, manager);
*/
int coord[MAX_RANK];
if(order == 'f') {
for(int i = tid; i < len; i+= gridDim.x * blockDim.x) {
shape::ind2sub(rank,yShape,i,coord);
int offset = shape::getOffset(0,yShape,yStride,coord,rank);
result[i+dOffset] = input[offset];
}
}
else {
for(int i = tid; i < len; i+= gridDim.x * blockDim.x) {
shape::ind2subC(rank,yShape,i,coord);
int offset = shape::getOffset(0,yShape,yStride,coord,rank);
result[i+dOffset] = input[offset];
}
}
/*
if (rank > MAX_COORD && tid * allocSize > PREALLOC_SIZE - allocSize) {
free(coord);
}*/
}
}
extern "C" __global__ void flattenKernelDouble(int offset,
char order,
double *result,
int *resultShapeInfo,
double *input,
int *inputShapeInfo, int *allocationPointer) {
flattenKernelGeneric<double>(
offset,
order, result,
resultShapeInfo,
input,
inputShapeInfo,
allocationPointer);
}
extern "C" __global__ void flattenKernelFloat(int offset,
char order,
float *result,
int *resultShapeInfo,
float *input,
int *inputShapeInfo, int *allocationPointer) {
flattenKernelGeneric<float>(
offset,
order,
result,
resultShapeInfo,
input,
inputShapeInfo,
allocationPointer);
}
extern "C" __global__ void flattenKernelHalf(int offset,
char order,
nd4j::float16 *result,
int *resultShapeInfo,
nd4j::float16 *input,
int *inputShapeInfo, int *allocationPointer) {
flattenKernelGeneric<nd4j::float16>(
offset,
order,
result,
resultShapeInfo,
input,
inputShapeInfo,
allocationPointer);
}
/**
* Append an input array
* to the end of a flat array
* in a particular order
* @param offset the offset of the array to start at
* @param order the order
* @param result the result array
* @param resultShapeInfo the shape info for te array
* @param input the input for the array
* @param inputShapeInfo the shape information for that array
*/
void NativeOps::flattenFloat(
Nd4jPointer *extraPointers,
int offset,
char order,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer input,
Nd4jPointer inputShapeInfo) {
float *xPointer = reinterpret_cast<float *>(result);
int *xShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
float *yPointer = reinterpret_cast<float *>(input);
int *yShapeInfoPointer = reinterpret_cast<int *>(inputShapeInfo);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[5], deviceProperties[getDeviceId(extraPointers[2])]);
if (debug && verbose)
printf("F22 opNum:[7]\n");
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostYShapeInfo), 2, funcAttributes[30]);
if (verbose && launchDims.x == 1)
printf("AF222 opNum:[7]\n");
hipLaunchKernelGGL(( flattenKernelFloat), dim3(launchDims.x),dim3(launchDims.y), launchDims.z, *stream, offset, order, xPointer, xShapeInfoPointer, yPointer, yShapeInfoPointer, allocPointer);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::flattenHalf(
Nd4jPointer *extraPointers,
int offset,
char order,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer input,
Nd4jPointer inputShapeInfo) {
nd4j::float16 *xPointer = reinterpret_cast<nd4j::float16 *>(result);
int *xShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
nd4j::float16 *yPointer = reinterpret_cast<nd4j::float16 *>(input);
int *yShapeInfoPointer = reinterpret_cast<int *>(inputShapeInfo);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[5], deviceProperties[getDeviceId(extraPointers[2])]);
if (debug && verbose)
printf("H22 opNum:[7]\n");
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostYShapeInfo), 2, funcAttributes[30]);
if (verbose && launchDims.x == 1)
printf("AH222 opNum:[7]\n");
hipLaunchKernelGGL(( flattenKernelHalf), dim3(launchDims.x),dim3(launchDims.y), launchDims.z, *stream, offset, order, xPointer, xShapeInfoPointer, yPointer, yShapeInfoPointer, allocPointer);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
* Append an input array
* to the end of a flat array
* in a particular order
* @param offset the offset of the array to start at
* @param order the order
* @param result the result array
* @param resultShapeInfo the shape info for te array
* @param input the input for the array
* @param inputShapeInfo the shape information for that array
*/
void NativeOps::flattenDouble(
Nd4jPointer *extraPointers,
int offset,
char order,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer input,
Nd4jPointer inputShapeInfo) {
double *xPointer = reinterpret_cast<double *>(result);
int *xShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
double *yPointer = reinterpret_cast<double *>(input);
int *yShapeInfoPointer = reinterpret_cast<int *>(inputShapeInfo);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
if (debug && verbose)
printf("D30 opNum:[7]\n");
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[5], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostYShapeInfo), 2, funcAttributes[34]);
hipLaunchKernelGGL(( flattenKernelDouble), dim3(launchDims.x),dim3(launchDims.y), launchDims.z, *stream, offset, order, xPointer, xShapeInfoPointer, yPointer, yShapeInfoPointer, allocPointer);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::enableP2P(bool enable) {
if (enable == allowedP2P)
return;
int curDevice = 0;
hipGetDevice(&curDevice);
int devCnt = 0;
hipGetDeviceCount(&devCnt);
if (devCnt > 1) {
for (int x = 0; x < devCnt; x++) {
for (int y = 0; y < devCnt; y++) {
if (x == y)
continue;
int canAccess = 0;
hipSetDevice(x);
hipDeviceCanAccessPeer(&canAccess, x , y);
if (canAccess) {
if (enable) {
hipDeviceEnablePeerAccess(y, 0);
} else {
hipDeviceDisablePeerAccess(y);
}
} else
printf("Peer access [%i] -> [%i] isn't possible\n", x, y);
}
}
hipSetDevice(0);
}
allowedP2P = enable;
hipSetDevice(curDevice);
}
void NativeOps::initializeDevicesAndFunctions() {
int devCnt = 0;
hipGetDeviceCount(&devCnt);
deviceProperties = new hipDeviceProp_t[devCnt];
for (int i = 0; i < devCnt; i++) {
hipSetDevice(i);
hipGetDeviceProperties(&deviceProperties[i], i);
hipDeviceSetLimit(hipLimitStackSize, 4096);
}
hipSetDevice(0);
enableP2P(allowedP2P);
hipFuncGetAttributes(&funcAttributes[0], (void *)transformFloatIndexes);
void (*transformFloatPointer1)(int opNum, float *dy,int *shapeInfo, int xRank, float *params, float *result,int *resultShapeInfo, int zRank, int *allocationPointer, float *reductionPointer) = transformFloat;
hipFuncGetAttributes(&funcAttributes[1], transformFloatPointer1);
void (*transformFloatPointer2)(int opNum, Nd4jIndex n, float *dy, int incy, float *params, float *result,int resultStride, int *allocationPointer, float *reductionPointer) = transformFloat;
hipFuncGetAttributes(&funcAttributes[2], transformFloatPointer2);
hipFuncGetAttributes(&funcAttributes[3], (void *)summaryStatsReduceFloat);
hipFuncGetAttributes(&funcAttributes[4], (void *)scalarFloatIndexes);
void (*scalarFloatPointer1)(int opNum, float dx,float *dy, int *shapeInfo, int xRank, float *params, float *result,int *resultShapeInfo, int zRank, int *allocPointer) = scalarFloat;
hipFuncGetAttributes(&funcAttributes[5], scalarFloatPointer1);
void (*scalarFloatPointer2)(int opNum, Nd4jIndex n,float dx, float *dy, int incy, float *params, float *result,int resultStride, int *allocPointer) = scalarFloat;
hipFuncGetAttributes(&funcAttributes[6], scalarFloatPointer2);
hipFuncGetAttributes(&funcAttributes[7], reduce3Float);
hipFuncGetAttributes(&funcAttributes[8], reduceFloat);
// printf("reduceFloat regs: [%i], static shmem: [%i]\n", funcAttributes[8].numRegs, funcAttributes[8].sharedSizeBytes);
hipFuncGetAttributes(&funcAttributes[28], reduceFloat1D);
// printf("reduceFloat1D regs: [%i], static shmem: [%i]\n", funcAttributes[28].numRegs, funcAttributes[28].sharedSizeBytes);
hipFuncGetAttributes(&funcAttributes[29], reduceFloat6D);
// printf("reduceFloat6D regs: [%i], static shmem: [%i]\n", funcAttributes[29].numRegs, funcAttributes[29].sharedSizeBytes);
hipFuncGetAttributes(&funcAttributes[30], flattenKernelFloat);
hipFuncGetAttributes(&funcAttributes[31], concatKernelFloat);
hipFuncGetAttributes(&funcAttributes[9], pairWiseTransformFloat);
hipFuncGetAttributes(&funcAttributes[10], pairWiseTransformFloatIndex);
hipFuncGetAttributes(&funcAttributes[11], pairWiseTransformStridedFloat);
hipFuncGetAttributes(&funcAttributes[12], broadcastFloat);
hipFuncGetAttributes(&funcAttributes[13], indexReduceFloat);
///////////////////////////////////////// Doubles are separate, just in case of...
hipFuncGetAttributes(&funcAttributes[14], transformDoubleIndexes);
void (*transformDoublePointer1)(int opNum, double *dy, int *shapeInfo, int xRank, double *params, double *result,int *resultShapeInfo, int zRank, int *allocationPointer, double *reductionPointer) = transformDouble;
hipFuncGetAttributes(&funcAttributes[15], transformDoublePointer1);
void (*transformDoublePointer2)(int opNum, Nd4jIndex n, double *dy, int incy, double *params, double *result,int resultStride, int *allocationPointer, double *reductionPointer) = transformDouble;
hipFuncGetAttributes(&funcAttributes[16], transformDoublePointer2);
hipFuncGetAttributes(&funcAttributes[17], summaryStatsReduceDouble);
hipFuncGetAttributes(&funcAttributes[18], scalarDoubleIndexes);
void (*scalarDoublePointer1)(int opNum, double dx,double *dy, int *shapeInfo, int xRank, double *params, double *result,int *resultShapeInfo, int zRank, int *allocPointer) = scalarDouble;
hipFuncGetAttributes(&funcAttributes[19], scalarDoublePointer1);
void (*scalarDoublePointer2)(int opNum, Nd4jIndex n,double dx, double *dy, int incy, double *params, double *result,int resultStride, int *allocPointer) = scalarDouble;
hipFuncGetAttributes(&funcAttributes[20], scalarDoublePointer2);
hipFuncGetAttributes(&funcAttributes[21], reduce3Double);
hipFuncGetAttributes(&funcAttributes[22], reduceDouble);
hipFuncGetAttributes(&funcAttributes[23], pairWiseTransformDouble);
hipFuncGetAttributes(&funcAttributes[24], pairWiseTransformDoubleIndex);
hipFuncGetAttributes(&funcAttributes[25], pairWiseTransformStridedDouble);
hipFuncGetAttributes(&funcAttributes[26], broadcastDouble);
hipFuncGetAttributes(&funcAttributes[27], indexReduceDouble);
hipFuncGetAttributes(&funcAttributes[32], reduceDouble1D);
hipFuncGetAttributes(&funcAttributes[33], reduceDouble6D);
hipFuncGetAttributes(&funcAttributes[34], flattenKernelDouble);
hipFuncGetAttributes(&funcAttributes[35], concatKernelDouble);
hipFuncGetAttributes(&funcAttributes[36], fillDimensionalIsMaxFloat);
hipFuncGetAttributes(&funcAttributes[37], fillDimensionalIsMaxDouble);
hipFuncGetAttributes(&funcAttributes[38], concatKernelScalarFloat);
hipFuncGetAttributes(&funcAttributes[39], concatKernelScalarDouble);
hipFuncGetAttributes(&funcAttributes[40], concatKernelVStackFloat);
hipFuncGetAttributes(&funcAttributes[41], concatKernelVStackDouble);
hipFuncGetAttributes(&funcAttributes[42], concatKernelHStackFloat);
hipFuncGetAttributes(&funcAttributes[43], concatKernelHStackDouble);
/////////////////////////
hipFuncGetAttributes(&funcAttributes[44], averagingKernelHalf);
hipFuncGetAttributes(&funcAttributes[45], averagingKernelFloat);
hipFuncGetAttributes(&funcAttributes[46], averagingKernelDouble);
}
/**
* This method acquires memory chunk of requested size on host side
*
* @param pointer pointer that'll be used for allocation
* @param memorySize memory size, in bytes
* @param flags optional parameter
*/
Nd4jPointer NativeOps::mallocHost(Nd4jIndex memorySize, int flags) {
Nd4jPointer pointer;
// hipHostMallocMapped |hipHostMallocPortable
hipError_t res = hipHostMalloc((void **)&pointer, memorySize, hipHostMallocDefault);
if (res != 0)
pointer = 0L;
return pointer;
}
/**
* This method acquires memory chunk of requested size on specified device
*
* @param pointer pointer that'll be used for allocation
* @param memorySize memory size, in bytes
* @param ptrToDeviceId pointer to deviceId. For cuda that's just and int, for OpenCL that's pointer to device_id, etc
* @param flags optional parameter
*/
Nd4jPointer NativeOps::mallocDevice(Nd4jIndex memorySize, Nd4jPointer ptrToDeviceId, int flags) {
Nd4jPointer pointer;
hipError_t res = hipMalloc((void **)&pointer, memorySize);
if (res != 0)
pointer = 0L;
return pointer;
}
/**
* This method releases previously allocated host memory space
*
* @param pointer pointer that'll be freed
*/
int NativeOps::freeHost(Nd4jPointer pointer) {
hipError_t res = hipHostFree((void *) pointer);
if (res != 0)
pointer = 0L;
return 1L;
}
/**
* This method releases previously allocated memory space on device
*
* @param pointer pointer that'll be freed
* @param ptrToDeviceId pointer to deviceId.
*/
int NativeOps::freeDevice(Nd4jPointer pointer, Nd4jPointer ptrToDeviceId) {
hipError_t res = hipFree((void *)pointer);
if (res != 0)
pointer = 0L;
return 1L;
}
Nd4jPointer NativeOps::createContext() {
return 0L;
}
Nd4jPointer NativeOps::createStream() {
Nd4jPointer nativeStream = 0;
hipError_t result = hipStreamCreate((hipStream_t *) &nativeStream);
checkCudaErrors(result);
if (result != 0)
return 0L;
else return nativeStream;
}
Nd4jPointer NativeOps::createEvent() {
Nd4jPointer nativeEvent= 0;
hipError_t result = hipEventCreateWithFlags((hipEvent_t *) &nativeEvent, hipEventDisableTiming);
checkCudaErrors(result);
if (result != 0)
return 0L;
else return nativeEvent;
}
Nd4jPointer NativeOps::createBlasHandle() {
Nd4jPointer nativeHandle= 0;
hipblasStatus_t result = hipblasCreate((hipblasHandle_t *) &nativeHandle);
if (result != 0)
return 0L;
else return nativeHandle;
}
int NativeOps::registerEvent(Nd4jPointer event, Nd4jPointer stream) {
hipEvent_t *pEvent = reinterpret_cast<hipEvent_t *>(&event);
hipStream_t *pStream = reinterpret_cast<hipStream_t *>(&stream);
hipError_t result = hipEventRecord(*pEvent, *pStream);
checkCudaErrors(result);
if (result != 0)
return 0L;
else return 1;
}
int NativeOps::setBlasStream(Nd4jPointer handle, Nd4jPointer stream) {
hipblasHandle_t *pHandle = reinterpret_cast<hipblasHandle_t *>(&handle);
hipStream_t *pStream = reinterpret_cast<hipStream_t *>(&stream);
hipblasStatus_t result = hipblasSetStream(*pHandle, *pStream);
if (result != 0)
return 0L;
else return 1L;
}
int NativeOps::setDevice(Nd4jPointer ptrToDeviceId) {
int deviceId = getDeviceId(ptrToDeviceId);
hipError_t result = hipSetDevice(deviceId);
checkCudaErrors(result);
if (result != 0)
return 0L;
else return 1;
}
Nd4jIndex NativeOps::getDeviceFreeMemory(Nd4jPointer ptrToDeviceId) {
int device = getDeviceId(ptrToDeviceId);
if (device >= 0) {
setDevice(ptrToDeviceId);
}
size_t memFree = 0;
size_t memTotal = 0;
hipMemGetInfo(&memFree, &memTotal);
return (Nd4jIndex) memFree;
}
Nd4jIndex NativeOps::getDeviceTotalMemory(Nd4jPointer ptrToDeviceId) {
int device = getDeviceId(ptrToDeviceId);
if (device >= 0) {
setDevice(ptrToDeviceId);
}
size_t memFree = 0;
size_t memTotal = 0;
hipMemGetInfo(&memFree, &memTotal);
return (Nd4jIndex) memTotal;
}
int NativeOps::memcpy(Nd4jPointer dst, Nd4jPointer src, Nd4jIndex size, int flags, Nd4jPointer reserved) {
return memcpyAsync(dst, src, size, flags, reserved);
}
int NativeOps::memcpyAsync(Nd4jPointer dst, Nd4jPointer src, Nd4jIndex size, int flags, Nd4jPointer reserved) {
hipStream_t *pStream = reinterpret_cast<hipStream_t *>(&reserved);
hipMemcpyKind kind;
if (debug)
checkCudaErrors(hipStreamSynchronize(*pStream));
switch (flags) {
case 0: {
kind = hipMemcpyHostToHost;
}
break;
case 1: {
kind = hipMemcpyHostToDevice;
}
break;
case 2: {
kind = hipMemcpyDeviceToHost;
}
case 3: {
kind = hipMemcpyDeviceToDevice;
}
break;
default: {
printf("UNDEFINED MEMCPY!\n");
break;
}
}
hipError_t result = hipMemcpyAsync((void *) dst, (const void *) src, (size_t) size, kind, *pStream);
checkCudaErrors(result);
if (result != 0) {
printf("Failed on [%lu] -> [%lu], size: [%i], direction: [%i], result: [%i]\n", src, dst, size, flags, (int) result );
return 0L;
}
else return 1;
}
int NativeOps::memset(Nd4jPointer dst, int value, Nd4jIndex size, int flags, Nd4jPointer reserved) {
//hipStream_t *pStream = reinterpret_cast<hipStream_t *>(&reserved);
hipError_t result = hipMemset((void *) dst, value, (size_t) size);
checkCudaErrors(result);
if (result != 0)
return 0L;
else return 1;
}
int NativeOps::memsetAsync(Nd4jPointer dst, int value, Nd4jIndex size, int flags, Nd4jPointer reserved) {
hipStream_t *pStream = reinterpret_cast<hipStream_t *>(&reserved);
hipError_t result = hipMemsetAsync((void *) dst, value, (size_t) size, *pStream);
checkCudaErrors(result);
if (result != 0)
return 0L;
else return 1;
}
int NativeOps::destroyEvent(Nd4jPointer event) {
hipEvent_t *pEvent = reinterpret_cast<hipEvent_t *>(&event);
hipError_t result = hipEventDestroy(*pEvent);
checkCudaErrors(result);
if (result != 0)
return 0L;
else return 1;
}
int NativeOps::streamSynchronize(Nd4jPointer stream) {
hipStream_t *pStream = reinterpret_cast<hipStream_t *>(&stream);
hipError_t result = hipStreamSynchronize(*pStream);
checkCudaErrors(result);
if (result != 0)
return 0L;
else return 1L;
}
int NativeOps::eventSynchronize(Nd4jPointer event) {
hipEvent_t *pEvent = reinterpret_cast<hipEvent_t *>(&event);
hipError_t result = hipEventSynchronize(*pEvent);
checkCudaErrors(result);
if (result != 0)
return 0L;
else return 1L;
}
int NativeOps::getAvailableDevices() {
int devCnt = 0;
hipGetDeviceCount(&devCnt);
return devCnt;
}
void NativeOps::enableDebugMode(bool reallyEnable) {
debug = reallyEnable;
}
void NativeOps::setGridLimit(int gridSize) {
if (gridSize > 1024)
gridSize = 1024;
if (gridSize < 1)
gridSize = 1;
blockLimit = gridSize;
}
int NativeOps::ompGetMaxThreads() {
return maxThreads;
}
int NativeOps::ompGetNumThreads() {
return maxThreads;
}
void NativeOps::setOmpNumThreads(int threads) {
if (threads > 1024)
threads = 1024;
if (threads < 32)
threads = 32;
maxThreads = threads;
}
void NativeOps::enableVerboseMode(bool reallyEnable) {
verbose = reallyEnable;
}
/**
* Concatneate multi array of the same shape together
* along a particular dimension
*/
void NativeOps::concatFloat(
Nd4jPointer *extraPointers,
int dimension,
int numArrays,
Nd4jPointer *data,
Nd4jPointer *inputShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer *tadPointers,
Nd4jPointer *offsetPointers) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
float *resultData = reinterpret_cast<float *>(result);
int *resultShape = reinterpret_cast<int *>(resultShapeInfo);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int **hostShapePointers = reinterpret_cast<int **>(extraPointers[9]);
// numArrays will be used as number of TADs, so each block process 1 input
int smem = 0;
bool isVstack = false;
bool isScalar = true;
bool isHstack = false;
for (int i = 0; i < numArrays; i++) {
if (!shape::isScalar(hostShapePointers[i])) {
isScalar = false;
break;
}
}
if (!isScalar && dimension == 0 && shape::rank(hostXShapeInfo) == 2 && shape::order(hostXShapeInfo) == 'c' ) {
isVstack = true;
for (int i = 0; i < numArrays; i++) {
if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0 || shape::order(hostShapePointers[i]) != 'c') {
isVstack = false;
break;
}
}
}
if (!isScalar && !isVstack && dimension == 1 && shape::isVector(hostXShapeInfo)) {
isHstack = true;
for (int i = 0; i < numArrays; i++) {
if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0) {
isHstack = false;
break;
}
}
}
if (isScalar) {
if (debug && verbose)
printf("Going scalar concat\n");
smem = funcAttributes[38].sharedSizeBytes;
hipLaunchKernelGGL(( concatKernelScalarFloat), dim3(128), dim3(128), smem, *stream, dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], resultData, resultShape, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]);
} else if (isVstack) {
if (debug && verbose)
printf("Going VStack concat\n");
smem = funcAttributes[40].sharedSizeBytes;
hipLaunchKernelGGL(( concatKernelVStackFloat), dim3(128), dim3(128), smem, *stream, dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], resultData, resultShape, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]);
} else if (isHstack) {
if (debug && verbose)
printf("Going HStack concat\n");
smem = funcAttributes[42].sharedSizeBytes;
hipLaunchKernelGGL(( concatKernelHStackFloat), dim3(128), dim3(128), smem, *stream, dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], resultData, resultShape, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]);
} else {
if (debug && verbose)
printf("Going generic concat\n");
smem = nd4j::math::nd4j_max<int>(funcAttributes[31].sharedSizeBytes + 768, 1280);
hipLaunchKernelGGL(( concatKernelFloat), dim3(128), dim3(128), smem, *stream, dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], resultData, resultShape, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]);
}
if (debug && verbose)
printf("sharedMemory requested for concatFloat: [%i], registers: [%i]\n", smem, funcAttributes[31].numRegs);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::concatHalf(
Nd4jPointer *extraPointers,
int dimension,
int numArrays,
Nd4jPointer *data,
Nd4jPointer *inputShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer *tadPointers,
Nd4jPointer *offsetPointers) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
nd4j::float16 *resultData = reinterpret_cast<nd4j::float16 *>(result);
int *resultShape = reinterpret_cast<int *>(resultShapeInfo);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int **hostShapePointers = reinterpret_cast<int **>(extraPointers[9]);
// numArrays will be used as number of TADs, so each block process 1 input
int smem = 0;
bool isVstack = false;
bool isScalar = true;
bool isHstack = false;
for (int i = 0; i < numArrays; i++) {
if (!shape::isScalar(hostShapePointers[i])) {
isScalar = false;
break;
}
}
if (!isScalar && dimension == 0 && shape::rank(hostXShapeInfo) == 2 && shape::order(hostXShapeInfo) == 'c' ) {
isVstack = true;
for (int i = 0; i < numArrays; i++) {
if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0 || shape::order(hostShapePointers[i]) != 'c') {
isVstack = false;
break;
}
}
}
if (!isScalar && !isVstack && dimension == 1 && shape::isVector(hostXShapeInfo)) {
isHstack = true;
for (int i = 0; i < numArrays; i++) {
if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0) {
isHstack = false;
break;
}
}
}
if (isScalar) {
if (debug && verbose)
printf("Going scalar concat\n");
smem = funcAttributes[38].sharedSizeBytes;
hipLaunchKernelGGL(( concatKernelScalarHalf), dim3(128), dim3(128), smem, *stream, dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], resultData, resultShape, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]);
} else if (isVstack) {
if (debug && verbose)
printf("Going VStack concat\n");
smem = funcAttributes[40].sharedSizeBytes;
hipLaunchKernelGGL(( concatKernelVStackHalf), dim3(128), dim3(128), smem, *stream, dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], resultData, resultShape, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]);
} else if (isHstack) {
if (debug && verbose)
printf("Going HStack concat\n");
smem = funcAttributes[42].sharedSizeBytes;
hipLaunchKernelGGL(( concatKernelHStackHalf), dim3(128), dim3(128), smem, *stream, dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], resultData, resultShape, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]);
} else {
if (debug && verbose)
printf("Going generic concat\n");
smem = nd4j::math::nd4j_max<int>(funcAttributes[31].sharedSizeBytes + 768, 1280);
hipLaunchKernelGGL(( concatKernelHalf), dim3(128), dim3(128), smem, *stream, dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], resultData, resultShape, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]);
}
if (debug && verbose)
printf("sharedMemory requested for concatHalf: [%i], registers: [%i]\n", smem, funcAttributes[31].numRegs);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
* Concatneate multi array of the same shape together
* along a particular dimension
*/
void NativeOps::concatDouble(
Nd4jPointer *extraPointers,
int dimension,
int numArrays,
Nd4jPointer *data,
Nd4jPointer *inputShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer *tadPointers,
Nd4jPointer *offsetPointers) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
double *resultData = reinterpret_cast<double *>(result);
int *resultShape = reinterpret_cast<int *>(resultShapeInfo);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int **hostShapePointers = reinterpret_cast<int **>(extraPointers[9]);
// numArrays will be used as number of TADs, so each block process 1 input
int smem = 0;
bool isVstack = false;
bool isScalar = true;
bool isHstack = false;
for (int i = 0; i < numArrays; i++) {
if (!shape::isScalar(hostShapePointers[i])) {
isScalar = false;
break;
}
}
if (!isScalar && dimension == 0 && shape::rank(hostXShapeInfo) == 2 && shape::order(hostXShapeInfo) == 'c' ) {
isVstack = true;
for (int i = 0; i < numArrays; i++) {
if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0 || shape::order(hostShapePointers[i]) != 'c') {
isVstack = false;
break;
}
}
}
if (!isScalar && !isVstack && dimension == 1 && shape::isVector(hostXShapeInfo)) {
isHstack = true;
for (int i = 0; i < numArrays; i++) {
if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0) {
isHstack = false;
break;
}
}
}
if (isScalar) {
if (debug && verbose)
printf("Going scalar concat\n");
smem = funcAttributes[39].sharedSizeBytes;
hipLaunchKernelGGL(( concatKernelScalarDouble), dim3(128), dim3(128), smem, *stream, dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], resultData, resultShape, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]);
} else if (isVstack) {
if (debug && verbose)
printf("Going VStack concat\n");
smem = funcAttributes[41].sharedSizeBytes;
hipLaunchKernelGGL(( concatKernelVStackDouble), dim3(128), dim3(128), smem, *stream, dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], resultData, resultShape, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]);
} else if (isHstack) {
if (debug && verbose)
printf("Going HStack concat\n");
smem = funcAttributes[43].sharedSizeBytes;
hipLaunchKernelGGL(( concatKernelHStackDouble), dim3(128), dim3(128), smem, *stream, dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], resultData, resultShape, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]);
} else {
if (debug && verbose)
printf("Going generic concat\n");
smem = nd4j::math::nd4j_max<int>(funcAttributes[35].sharedSizeBytes + 768, 1280);
hipLaunchKernelGGL(( concatKernelDouble), dim3(128), dim3(128), smem, *stream, dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], resultData, resultShape, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]);
}
if (debug && verbose)
printf("sharedMemory requested for concatFloat: [%i], registers: [%i]\n", smem, funcAttributes[31].numRegs);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
* This method saves
*/
void NativeOps::tadOnlyShapeInfo(Nd4jPointer xShapeInfo, Nd4jPointer dimension, int dimensionLength, Nd4jPointer targetBuffer, Nd4jPointer offsetsBuffer) {
int *hostXShapeInfo = reinterpret_cast<int *>(xShapeInfo);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
int *target = reinterpret_cast<int *>(targetBuffer);
int *offsets = reinterpret_cast<int *>(offsetsBuffer);
shape::TAD *tad = new shape::TAD();
tad->init(hostXShapeInfo, dimensionPointer, dimensionLength);
//tad->setOutputBuffer(target);
tad->createTadOnlyShapeInfo();
tad->createOffsets();
std::memcpy((void *) target, tad->tadOnlyShapeInfo, (tad->tadOnlyShapeInfo[0] * 2 + 4) * sizeof(int));
std::memcpy((void *) offsets, tad->tadOffsets, tad->numTads * sizeof(int));
/*
shape::printShapeInfoLinear(hostXShapeInfo);
shape::printShapeInfoLinear(tad->tadOnlyShapeInfo);
shape::printShapeInfoLinear(target);
*/
delete tad;
}
int NativeOps::memcpyConstantAsync(Nd4jIndex dst, Nd4jPointer src, Nd4jIndex size, int flags, Nd4jPointer reserved) {
hipStream_t *pStream = reinterpret_cast<hipStream_t *>(&reserved);
hipMemcpyKind kind;
if (debug)
checkCudaErrors(hipStreamSynchronize(*pStream));
switch (flags) {
case 0: {
kind = hipMemcpyHostToHost;
}
break;
case 1: {
kind = hipMemcpyHostToDevice;
}
break;
case 2: {
kind = hipMemcpyDeviceToHost;
}
case 3: {
kind = hipMemcpyDeviceToDevice;
}
break;
}
//hipError_t result = hipMemcpyAsync((void *) dst, (const void *) src, (size_t) size, kind, *pStream);
hipError_t result = hipMemcpyToSymbolAsync(deviceConstantMemory, (const void *) src, size, dst, kind, *pStream);
checkCudaErrors(result);
if (result != 0) {
printf("Symbol failed on [%lu] -> [%lu], size: [%i], direction: [%i]\n", src, dst, size, flags );
return 0L;
}
else return 1;
}
Nd4jPointer NativeOps::getConstantSpace() {
Nd4jPointer dConstAddr;
hipError_t result = hipGetSymbolAddress((void **)&dConstAddr, deviceConstantMemory);
return dConstAddr;
}
void NativeOps::pullRowsHalf(Nd4jPointer *extraPointers, Nd4jPointer x, Nd4jPointer xShapeInfo, Nd4jPointer z, Nd4jPointer zShapeInfo, int n, Nd4jPointer indexes, Nd4jPointer tadShapeInfo, Nd4jPointer tadOffsets) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
nd4j::float16 *xBuffer = reinterpret_cast<nd4j::float16 *>(x);
nd4j::float16 *zBuffer = reinterpret_cast<nd4j::float16 *>(z);
int *zShape = reinterpret_cast<int *>(zShapeInfo);
int *xShape = reinterpret_cast<int *>(xShapeInfo);
int *index = reinterpret_cast<int *>(indexes);
int *tadOnlyShapeInfo = reinterpret_cast<int *>(tadShapeInfo);
int *tadOffset = reinterpret_cast<int *>(tadOffsets);
hipLaunchKernelGGL(( pullRowsKernelHalf), dim3(32), dim3(32), 1024, *stream, xBuffer, xShape, zBuffer, zShape, n, index, tadOnlyShapeInfo, tadOffset);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::pullRowsFloat(Nd4jPointer *extraPointers, Nd4jPointer x, Nd4jPointer xShapeInfo, Nd4jPointer z, Nd4jPointer zShapeInfo, int n, Nd4jPointer indexes, Nd4jPointer tadShapeInfo, Nd4jPointer tadOffsets) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
float *xBuffer = reinterpret_cast<float *>(x);
float *zBuffer = reinterpret_cast<float *>(z);
int *zShape = reinterpret_cast<int *>(zShapeInfo);
int *xShape = reinterpret_cast<int *>(xShapeInfo);
int *index = reinterpret_cast<int *>(indexes);
int *tadOnlyShapeInfo = reinterpret_cast<int *>(tadShapeInfo);
int *tadOffset = reinterpret_cast<int *>(tadOffsets);
hipLaunchKernelGGL(( pullRowsKernelFloat), dim3(32), dim3(32), 1024, *stream, xBuffer, xShape, zBuffer, zShape, n, index, tadOnlyShapeInfo, tadOffset);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::pullRowsDouble(Nd4jPointer *extraPointers, Nd4jPointer x, Nd4jPointer xShapeInfo, Nd4jPointer z, Nd4jPointer zShapeInfo, int n, Nd4jPointer indexes, Nd4jPointer tadShapeInfo, Nd4jPointer tadOffsets) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
double *xBuffer = reinterpret_cast<double *>(x);
double *zBuffer = reinterpret_cast<double *>(z);
int *zShape = reinterpret_cast<int *>(zShapeInfo);
int *xShape = reinterpret_cast<int *>(xShapeInfo);
int *index = reinterpret_cast<int *>(indexes);
int *tadOnlyShapeInfo = reinterpret_cast<int *>(tadShapeInfo);
int *tadOffset = reinterpret_cast<int *>(tadOffsets);
hipLaunchKernelGGL(( pullRowsKernelDouble), dim3(32), dim3(32), 1024, *stream, xBuffer, xShape, zBuffer, zShape, n, index, tadOnlyShapeInfo, tadOffset);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::convertHalfsToFloats(Nd4jPointer *extraPointers, Nd4jPointer dx, int n, Nd4jPointer dz) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
half *x = reinterpret_cast<half *>(dx);
float *z = reinterpret_cast<float *>(dz);
hipLaunchKernelGGL(( kernelHalfsToFloats), dim3(32), dim3(32), 1024, *stream, x, n, z);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::convertHalfsToDoubles(Nd4jPointer *extraPointers, Nd4jPointer dx, int n, Nd4jPointer dz) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
half *x = reinterpret_cast<half *>(dx);
double *z = reinterpret_cast<double *>(dz);
hipLaunchKernelGGL(( kernelHalfsToDoubles), dim3(32), dim3(32), 1024, *stream, x, n, z);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::convertDoublesToHalfs(Nd4jPointer *extraPointers, Nd4jPointer dx, int n, Nd4jPointer dz) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
double *x = reinterpret_cast<double *>(dx);
half *z = reinterpret_cast<half *>(dz);
hipLaunchKernelGGL(( kernelDoublesToHalfs), dim3(32), dim3(32), 1024, *stream, x, n, z);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::convertFloatsToHalfs(Nd4jPointer *extraPointers, Nd4jPointer dx, int n, Nd4jPointer dz) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
float *x = reinterpret_cast<float *>(dx);
half *z = reinterpret_cast<half *>(dz);
hipLaunchKernelGGL(( kernelFloatsToHalfs), dim3(32), dim3(32), 1024, *stream, x, n, z);
if (debug)
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::averageHalf(Nd4jPointer *extras, Nd4jPointer dx, Nd4jPointer dz, int n, Nd4jIndex length, bool propagate) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]);
nd4j::float16 **x = reinterpret_cast<nd4j::float16 **>(dx);
nd4j::float16 *z = reinterpret_cast<nd4j::float16 *>(dz);
if (debug && verbose)
printf("averageHalf called\n");
dim3 launchDims = getBasicLaunchParams(getDeviceId(extras[2]), length, sizeof(nd4j::float16), funcAttributes[44]);
hipLaunchKernelGGL(( averagingKernelHalf), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, x, z, n, length, propagate);
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::averageFloat(Nd4jPointer *extras, Nd4jPointer dx, Nd4jPointer dz, int n, Nd4jIndex length, bool propagate) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]);
float **x = reinterpret_cast<float **>(dx);
float *z = reinterpret_cast<float *>(dz);
if (debug && verbose)
printf("averageFloat called\n");
dim3 launchDims = getBasicLaunchParams(getDeviceId(extras[2]), length, sizeof(float), funcAttributes[45]);
hipLaunchKernelGGL(( averagingKernelFloat), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, x, z, n, length, propagate);
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::averageDouble(Nd4jPointer *extras, Nd4jPointer dx, Nd4jPointer dz, int n, Nd4jIndex length, bool propagate) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]);
double **x = reinterpret_cast<double **>(dx);
double *z = reinterpret_cast<double *>(dz);
if (debug && verbose)
printf("averageDouble called\n");
dim3 launchDims = getBasicLaunchParams(getDeviceId(extras[2]), length, sizeof(double), funcAttributes[46]);
hipLaunchKernelGGL(( averagingKernelDouble), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, x, z, n, length, propagate);
checkCudaErrors(hipStreamSynchronize(*stream));
}
|
b6b9bafcf47616c0555dea3eedfc85e562605b63.cu
|
#include "../NativeOps.h"
#include <cuda.h>
#include <cuda_launch_config.h>
#include <buffer.h>
#include <shape.h>
#include <cublas_v2.h>
#include <reduce3.h>
#include <reduce.h>
#include <indexreduce.h>
#include <pairwise_transform.h>
#include <transform.h>
#include <scalar.h>
#include <broadcasting.h>
#include <summarystatsreduce.h>
#include <thread>
#include <map>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <cuda_runtime.h>
#include <cuda_device_runtime_api.h>
#include <pointercast.h>
#include <stdio.h>
#include <stdlib.h>
cudaDeviceProp *deviceProperties;
cudaFuncAttributes *funcAttributes = new cudaFuncAttributes[64];
int blockLimit = 128;
int maxThreads = 512;
bool debug = false;
bool verbose = true;
bool allowedP2P = false;
__constant__ char deviceConstantMemory[49152];
int getDeviceId(Nd4jPointer ptrToDeviceId) {
return (int)(Nd4jIndex)ptrToDeviceId;
}
template <typename T>
dim3 getOptimalDimensions(Nd4jIndex n,cudaFuncAttributes attributes, cudaDeviceProp properties) {
// we can combine the two to compute a block size
int num_threads = block_size_with_maximum_potential_occupancy(attributes, properties);
// no real sense launching more threads, then number of elements we have
if (num_threads > n) num_threads = n;
if (maxThreads > 0 && num_threads > maxThreads) num_threads = maxThreads;
// compute the number of blocks of size num_threads to launch
int num_blocks = n / num_threads;
// check for partial block at the end
if (num_blocks > blockLimit) num_blocks = blockLimit;
if (num_blocks < 4 && n > 128) {
num_blocks = 4;
num_threads = n / num_blocks;
}
if (num_threads >= 768) {
num_blocks = num_blocks * 2;
num_threads = num_threads / 2;
}
if(n % num_threads && num_blocks < blockLimit) ++num_blocks;
//(num_threads * sizeof(T)) + attributes.sharedSizeBytes);
return dim3(num_blocks,num_threads, 3000);
}
int getBaseMemorySize(int xRank, cudaFuncAttributes funcAttr) {
int memory_limit = 256; //funcAttr.sharedSizeBytes;
// TODO: remove this later
memory_limit += sizeof(UnifiedSharedMemory) + 32; // sizeof(shape::TAD) + (xRank * 4 * 4)
/*
if (xRank == 0) xRank = 2;
memory_limit += (xRank * 2 + 4) * 3 * 4; // we reserve memory for xShape + T1/T2 shapes
memory_limit += yRank == 0 ? 0 : (yRank * 2 + 4) * 4;
memory_limit += zRank == 0 ? 0 : (zRank * 2 + 4) * 4;
memory_limit += (xRank * 4) * 6;
memory_limit += MAX_RANK * 4; // special case, needed roughtly in one pase
*/
return memory_limit;
}
int getDeviceBlockThreshold(int deviceId) {
int ccMinor = deviceProperties[deviceId].minor;
int ccMajor = deviceProperties[deviceId].major;
int blockThreshold = 8;
if (ccMajor >= 5)
blockThreshold = 32;
else if (ccMajor == 3)
blockThreshold = 16;
else if (ccMajor < 3)
blockThreshold = 8;
return blockThreshold;
}
dim3 getBasicLaunchParams(int deviceId, long problemLength, int sharedMemoryPerThread, cudaFuncAttributes funcAttr) {
int countMP = deviceProperties[deviceId].multiProcessorCount;
int blockThreshold = getDeviceBlockThreshold(deviceId);
int num_threads = problemLength / (countMP * blockThreshold);
num_threads = nd4j::math::nd4j_min<int>(num_threads, maxThreads);
num_threads = nd4j::math::nd4j_max<int>(num_threads, 64);
int num_blocks = nd4j::math::nd4j_max<int>(problemLength / num_threads, 1);
num_blocks = nd4j::math::nd4j_min<int>(num_blocks, blockLimit);
int memory_limit = (sharedMemoryPerThread * num_threads) + getBaseMemorySize(1, funcAttr);
dim3 launchDims = dim3(num_blocks, num_threads, memory_limit);
if (debug && verbose)
printf("Preliminary basic launch params: gridSize: [%i], blockSize: [%i], base shmem: [%i]\n", num_blocks, num_threads, memory_limit);
return launchDims;
}
int getDeviceSharedThreshold(int deviceId) {
int ccMinor = deviceProperties[deviceId].minor;
int ccMajor = deviceProperties[deviceId].major;
// please note threshold isn't multiple of 32, and that's NOT a mistake
int shmemThreshold;
if (ccMajor == 6 && ccMinor == 0)
shmemThreshold = 65536;
else if (ccMajor == 6 && ccMinor == 1)
shmemThreshold = 98304;
else if (ccMajor == 5 && ccMinor == 2)
shmemThreshold = 98304;
else if (ccMajor == 5)
shmemThreshold = 65536;
else if (ccMajor == 3 && ccMinor == 7)
shmemThreshold = 114688;
else shmemThreshold = 49152;
return shmemThreshold;
}
dim3 getBetterDimensions(int deviceId, int numTads, int tadLength, int xRank, cudaFuncAttributes funcAttr, int dimensionLength, int elementSize, int reduction) {
int num_threads = nd4j::math::nd4j_min<int>(tadLength, maxThreads);
int countMP = deviceProperties[deviceId].multiProcessorCount;
int regPerBlock = deviceProperties[deviceId].regsPerBlock;
int warpSize = deviceProperties[deviceId].warpSize;
int blockThreshold = getDeviceBlockThreshold(deviceId);
int shmemThreshold = getDeviceSharedThreshold(deviceId);
// round num_threads to nearest warpSize
num_threads -= num_threads % warpSize;
num_threads = nd4j::math::nd4j_max<int>(32, num_threads);
// since we use shared memory as fast memory for some cases - we need to count that in
int memory_limit = getBaseMemorySize(xRank, funcAttr);
int memory_floor = memory_limit;
int effective_block_limit = countMP * blockThreshold;
int num_blocks = nd4j::math::nd4j_min<int>(numTads, effective_block_limit);
int desiredShared = shmemThreshold / nd4j::math::nd4j_max<int>((num_blocks / countMP), 1);
if (debug && verbose)
printf("numBlocks: [%i], numThreads: [%i], countMap: [%i], shmemThreshold: [%i], desiredShared: [%i]\n", num_blocks, num_threads, countMP, shmemThreshold, desiredShared);
// at this moment we've stored all required information for things. time to count in reduction multipliers
int reduction_per_block = 0;
bool found = false;
if (reduction > 0)
while (!found) {
reduction_per_block = (num_threads * elementSize * reduction);
if (memory_limit + reduction_per_block < desiredShared) {
memory_limit += reduction_per_block;
found = true;
} else {
if (num_threads > 128) {
num_threads -= 32;
} else {
memory_limit += reduction_per_block;
found = true;
}
}
}
// at this moment we know total memory used per block, and we also know per-mp limit.
int max_active_blocks = shmemThreshold / nd4j::math::nd4j_max<int>(memory_limit, 1);
if (debug && verbose)
printf("MAB: [%i], memory_limit: [%i]\n", max_active_blocks, memory_limit);
// we don't want to spawn more blocks, that gpu can actually handle without queue
num_blocks = nd4j::math::nd4j_min<int>(num_blocks, max_active_blocks);
num_blocks = nd4j::math::nd4j_min<int>(num_blocks, blockLimit);
// if (num_blocks > countMP)
// num_blocks = num_blocks - (num_blocks % countMP);
num_blocks = nd4j::math::nd4j_max<int>(num_blocks, 1);
int targetBlocksPerMP = num_blocks / countMP;
// now we know desired number of blocks wrt to shared memory. So, now we should take in account number of threads per SM
if (targetBlocksPerMP * num_threads > 2048) {
while (targetBlocksPerMP * num_threads > 2048) {
if (num_threads <= 96)
break;
num_threads -= 32;
}
reduction_per_block = (num_threads * elementSize * reduction);
memory_limit = memory_floor + reduction_per_block;
}
if (debug && verbose)
printf("Preliminary reduce launch params: gridSize: [%i], blockSize: [%i], base shmem: [%i], reduction_per_block: [%i], blocksPerMP: [%i]\n", num_blocks, num_threads, memory_limit, reduction_per_block, targetBlocksPerMP);
return dim3(num_blocks,num_threads, memory_limit);
}
dim3 getFlatLaunchParams(int deviceId, int *xShapeInfo, int *yShapeInfo, cudaFuncAttributes funcAttr) {
int xRank = shape::rank(xShapeInfo);
int yRank = yShapeInfo == nullptr ? 0 : shape::rank(yShapeInfo);
int zRank = 0;
int memory_limit = getBaseMemorySize(xRank, funcAttr);
int countMP = deviceProperties[deviceId].multiProcessorCount;
int regPerBlock = deviceProperties[deviceId].regsPerBlock;
int blockThreshold = getDeviceBlockThreshold(deviceId);
int shmemThreshold = getDeviceSharedThreshold(deviceId);
int xLength = shape::length(xShapeInfo);
int effective_block_limit = countMP * blockThreshold;
// for flat calls we just want as much concurrent blocks, as possible, and we're not tied to TAD here
int num_threads = xLength / effective_block_limit;
if (num_threads < 64)
num_threads = 64;
num_threads = num_threads - (num_threads % 32);
int memory_floor = memory_limit;
int num_blocks = xLength / num_threads;
num_blocks = nd4j::math::nd4j_min<int>(num_blocks, blockLimit);
num_blocks = nd4j::math::nd4j_min<int>(num_blocks, effective_block_limit);
num_blocks = nd4j::math::nd4j_max<int>(num_blocks, 1);
int targetBlocksPerMP = num_blocks / countMP;
// now we know desired number of blocks wrt to shared memory. So, now we should take in account number of threads per SM
if (targetBlocksPerMP * num_threads > 2048 && num_threads >= 64) {
while (targetBlocksPerMP * num_threads > 2048) {
if (num_threads <= 32)
break;
num_threads -= 32;
}
}
dim3 launchDims = dim3(num_blocks, num_threads, memory_limit);
if (debug && verbose)
printf("Preliminary scalar launch params: gridSize: [%i], blockSize: [%i], base shmem: [%i], blocksPerMP: [%i], problemLength: [%i], effectiveBlockLimit: [%i]\n", num_blocks, num_threads, memory_limit, targetBlocksPerMP, xLength, effective_block_limit);
return launchDims;
}
dim3 getReduceLaunchParams(int deviceId, int *xShapeInfo, int *tadShapeInfo, cudaFuncAttributes funcAttr, int dimensionLength, int elementSize, int reductionSize) {
int tadLength = 0;
int numTads = 0;
if (tadShapeInfo != nullptr) {
tadLength = shape::length(tadShapeInfo);
numTads = shape::length(xShapeInfo) / tadLength;
if (tadLength == 1) {
if (debug && verbose)
printf("A xLength: [%i], zLength: [%i]\n", shape::length(xShapeInfo), shape::length(tadShapeInfo));
}
} else{
// we have special case - reduction along all dimensions
tadLength = nd4j::math::nd4j_min<int>(shape::length(xShapeInfo), 768);
numTads = shape::length(xShapeInfo) / tadLength;
}
int xRank = shape::rank(xShapeInfo);
int zRank = tadShapeInfo == nullptr ? 0 : shape::rank(tadShapeInfo);
dim3 launchDims = getBetterDimensions(deviceId, numTads, tadLength, xRank, funcAttr, dimensionLength, elementSize, reductionSize);
if ((debug && verbose ) ) { //|| launchDims.x == 1
printf("B xLength: [%i], numTads: [%i], tadLength: [%i], launchDims.x: [%i], launchDims.y: [%i]\n", shape::length(xShapeInfo), numTads, tadLength, launchDims.x, launchDims.y);
//shape::printShapeInfo(xShapeInfo);
}
return launchDims;
}
/**
* Returns optimal launch parameters
* given the extra pointers passed in.
* The extra pointer should be
* the host pointer for the shape information
* associated with the data.
* From there it is used to obtain the length
* from which we can derive the optimal launch parameters.
*
*/
template <typename T>
dim3 getOptimalLaunchParameters(Nd4jPointer *extraPointers, cudaFuncAttributes attributes, cudaDeviceProp properties) {
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
Nd4jIndex n = shape::length(hostXShapeInfo);
dim3 launchDims = getOptimalDimensions<T>(n,attributes, properties);
if (debug && verbose)
printf("Params: gridSize: [%i], blockSize: [%i], shMem: [%i], problemLength: [%i], totalThreads:[%i]\n", launchDims.x, launchDims.y, launchDims.z, n, (launchDims.x * launchDims.y));
return launchDims;
}
float cpu_half2float(half h) {
unsigned sign = ((h.x >> 15) & 1);
unsigned exponent = ((h.x >> 10) & 0x1f);
unsigned mantissa = ((h.x & 0x3ff) << 13);
if (exponent == 0x1f) { /* NaN or Inf */
mantissa = (mantissa ? (sign = 0, 0x7fffff) : 0);
exponent = 0xff;
} else if (!exponent) { /* Denorm or Zero */
if (mantissa) {
unsigned int msb;
exponent = 0x71;
do {
msb = (mantissa & 0x400000);
mantissa <<= 1; /* normalize */
--exponent;
} while (!msb);
mantissa &= 0x7fffff; /* 1.mantissa is implicit */
}
} else {
exponent += 0x70;
}
int temp = ((sign << 31) | (exponent << 23) | mantissa);
return *((float*)((void*)&temp));
}
nd4j::buffer::Buffer<int> * createScalarBuffer(cudaStream_t stream) {
int *scalarShapeInfo = shape::createScalarShapeInfo();
nd4j::buffer::Buffer<int> *buff = nd4j::buffer::createBuffer(scalarShapeInfo,shape::shapeInfoLength(2), stream);
nd4j::buffer::copyDataToGpu(&buff, stream);
return buff;
}
class ScalarShapeInformation {
private:
nd4j::buffer::Buffer<int> *scalarDimension;
nd4j::buffer::Buffer<int> *scalarShapeInfo;
std::thread::id threadId;
public:
ScalarShapeInformation(cudaStream_t stream) {
int *scalarDimensionBuff = (int *) malloc(sizeof(int));
scalarDimensionBuff[0] = MAX_DIMENSION;
scalarDimension = nd4j::buffer::createBuffer(scalarDimensionBuff,1, stream);
scalarShapeInfo = createScalarBuffer(stream);
threadId = std::this_thread::get_id();
}
~ScalarShapeInformation() {
nd4j::buffer::freeBuffer(&scalarShapeInfo);
nd4j::buffer::freeBuffer(&scalarDimension);
}
int *getShapeInfoHostPointer() {
return scalarShapeInfo->data;
}
int * getShapeInfoGpuPointer() {
return scalarShapeInfo->gData;
}
int * getDimensionHostPointer() {
return scalarDimension->data;
}
int * getDimensionGpuPointer() {
return scalarDimension->gData;
}
};
template <typename T>
class ScalarInfo {
nd4j::buffer::Buffer<T> *scalarData;
ScalarShapeInformation *shapeInfo;
T finalResult;
cudaStream_t streamRef;
public:
ScalarInfo(cudaStream_t stream) {
T *scalarResult = (T*)malloc(sizeof(T));
shapeInfo = new ScalarShapeInformation(stream);
scalarData = nd4j::buffer::createBuffer(scalarResult,1, stream);
streamRef = stream;
nd4j::buffer::copyDataToGpu(&scalarData, stream);
}
T getFinalResultFromDevice() {
nd4j::buffer::copyDataFromGpu(&scalarData, streamRef);
return scalarData->data[0];
}
/**
* Get the device shape information
* representing a scalar
*/
int *getDeviceShapeInfo() {
return shapeInfo->getShapeInfoGpuPointer();
}
/**
* Get the result pointers
*/
T *getDevicePointer() {
return scalarData->gData;
}
/**
* Get the infinite dimension device pointer
*/
int *getDimensionDevicePointer() {
return shapeInfo->getDimensionGpuPointer();
}
~ScalarInfo() {
nd4j::buffer::freeBuffer(&scalarData);
delete shapeInfo;
}
};
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
*/
double NativeOps::execIndexReduceScalarDouble(Nd4jPointer *extraPointers,int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams) {
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
if (debug && verbose)
printf("D1 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[27], deviceProperties[getDeviceId(extraPointers[2])]);
double *resultPointer = reinterpret_cast<double *>(extraPointers[5]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[27], 1, sizeof(double), 2);
indexReduceDouble<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
extraParamsPointer,
resultPointer,
nullptr, 0,
nullptr,
1,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
checkCudaErrors(cudaStreamSynchronize(*stream));
double result = resultPointer[0];
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfoBuffer
* @param dimension
* @param dimensionLength
*/
void NativeOps::execIndexReduceDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfoBuffer,
Nd4jPointer dimension, int dimensionLength) {
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfoBuffer);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
if (debug && verbose)
printf("D2 opNum:[%i]\n", opNum);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[27], dimensionLength, sizeof(double), 2);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
indexReduceDouble<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
extraParamsPointer,
resultPointer,
resultShapeInfoPointer, shape::rank(hostZShapeInfo),
dimensionPointer,
dimensionLength,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param dimension
* @param dimensionLength
*/
void NativeOps::execBroadcastDouble(Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer dimension, int dimensionLength){
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *yPointer = reinterpret_cast<double *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
if (debug && verbose)
printf("D3 opNum:[%i]\n", opNum);
// dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[26], deviceProperties[getDeviceId(extraPointers[2])]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[26], dimensionLength, sizeof(double), 0);
broadcastDouble<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
yPointer,
yShapeInfoPointer, shape::rank(hostYShapeInfo),
resultPointer,
resultShapeInfoPointer, shape::rank(hostZShapeInfo),
dimensionPointer,
dimensionLength, deviceTADShapeInfo, deviceTADOffsets);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xStride
* @param y
* @param yStride
* @param result
* @param resultStride
* @param extraParams
* @param n
*/
void NativeOps::execPairwiseTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
int xStride,
Nd4jPointer y,
int yStride,
Nd4jPointer result,
int resultStride,
Nd4jPointer extraParams, Nd4jIndex n) {
double *xPointer = reinterpret_cast<double *>(dx);
double *yPointer = reinterpret_cast<double *>(y);
double *resultPointer = reinterpret_cast<double *>(result);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
if (debug && verbose)
printf("D4 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[25], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[25]);
pairWiseTransformStridedDouble<<<launchDims.x,launchDims.y, launchDims.z, *stream>>> (
opNum,
n,
xPointer,
yPointer,
xStride,
yStride,
extraParamsPointer,
resultPointer,
resultStride, allocationPointer, deviceTADShapeInfo);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
* @param xIndexes
* @param yIndexes
* @param resultIndexes
*/
void NativeOps::execPairwiseTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
Nd4jPointer xShapeInfo,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer xIndexes,
Nd4jPointer yIndexes,
Nd4jPointer resultIndexes) {
double *xPointer = reinterpret_cast<double *>(dx);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *yPointer = reinterpret_cast<double *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
int *xIndexesPointer = reinterpret_cast<int *>(xIndexes);
int *yIndexesPointer = reinterpret_cast<int *>(yIndexes);
int *resultIndexesPointer = reinterpret_cast<int *>(resultIndexes);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
if (debug && verbose)
printf("D5 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[24], deviceProperties[getDeviceId(extraPointers[2])]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[24]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
pairWiseTransformDoubleIndex <<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(
opNum,
xPointer,
yPointer,
extraParamsPointer,
resultPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
yShapeInfoPointer, shape::rank(hostYShapeInfo),
resultShapeInfoPointer, shape::rank(hostZShapeInfo),
xIndexesPointer,
yIndexesPointer,
resultIndexesPointer, allocationPointer, deviceTADShapeInfo);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execPairwiseTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
Nd4jPointer xShapeInfo,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer extraParams) {
double *xPointer = reinterpret_cast<double *>(dx);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *yPointer = reinterpret_cast<double *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
if (debug && verbose)
printf("D6 opNum:[%i]\n", opNum);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[23], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[23]);
pairWiseTransformDouble<<<launchDims.x,launchDims.y, launchDims.z, *stream>>>(
opNum,
xPointer,
yPointer,
extraParamsPointer,
resultPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
yShapeInfoPointer, shape::rank(hostYShapeInfo),
resultShapeInfoPointer, shape::rank(hostZShapeInfo), allocationPointer, deviceTADShapeInfo);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduceDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfo) {
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
if (debug && verbose)
printf("D7 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[22], deviceProperties[getDeviceId(extraPointers[2])]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[22], 1, sizeof(double), 1);
reduceScalarDouble<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
nullptr,
1,
reductionPointer, deviceTADShapeInfo);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduceDouble(
Nd4jPointer *extraPointers
,int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer dimension,
int dimensionLength) {
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
if (debug && verbose)
printf("D8 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[22], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[22], dimensionLength, sizeof(double), 1);
if (dimensionLength == 1) {
reduceDouble1D<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
} else if (shape::rank(hostTADShapeInfo) <= 3) {
reduceDouble6D<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
} else {
reduceDouble<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
}
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @return
*/
double NativeOps::execReduceScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams){
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
if (debug && verbose)
printf("D9 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[22], deviceProperties[getDeviceId(extraPointers[2])]);
double *resultPointer = reinterpret_cast<double *>(extraPointers[5]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[22], 1, sizeof(double), 1);
reduceScalarDouble<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
nullptr,
nullptr,
1,
reductionPointer, deviceTADShapeInfo);
checkCudaErrors(cudaStreamSynchronize(*stream));
double result = resultPointer[0];
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduce3Double(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParamsVals,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo) {
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *yPointer = reinterpret_cast<double *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParamsVals);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
if (debug && verbose)
printf("D10 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[21], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
//dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), (int *) extraPointers[0], yShapeInfoPointer, resultShapeInfoPointer, 1, sizeof(double), 2);
//dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), (int *) extraPointers[0], yShapeInfoPointer);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[21]);
reduce3Double<<<1,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
yPointer,
yShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
nullptr,
1,
1, allocationPointer, deviceTADShapeInfo, deviceTADOffsets);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
*/
double NativeOps::execReduce3ScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParamsVals,
Nd4jPointer y,
Nd4jPointer yShapeInfo){
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *yPointer = reinterpret_cast<double *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParamsVals);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[21], deviceProperties[getDeviceId(extraPointers[2])]);
if (debug && verbose)
printf("D11 opNum:[%i]\n", opNum);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
double *resultPointer = reinterpret_cast<double *>(extraPointers[5]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
//dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), (int *) extraPointers[0], yShapeInfoPointer, nullptr, 1, sizeof(double), 2);
//dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), (int *) extraPointers[0], yShapeInfoPointer);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[21]);
reduce3Double<<<1,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
yPointer,
yShapeInfoPointer,
extraParamsPointer,
resultPointer,
nullptr,
nullptr,
1,
1, allocationPointer, deviceTADShapeInfo, deviceTADOffsets);
checkCudaErrors(cudaStreamSynchronize(*stream));
double result = resultPointer[0];
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfoBuffer
* @param dimension
* @param dimensionLength
*/
void NativeOps::execReduce3Double(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParamsVals,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfoBuffer,
Nd4jPointer dimension,
int dimensionLength){
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *yPointer = reinterpret_cast<double *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfoBuffer);
double *extraParamsPointer = reinterpret_cast<double *>(extraParamsVals);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
if (debug && verbose)
printf("D12 opNum:[%i]\n", opNum);
// dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[21], deviceProperties[getDeviceId(extraPointers[2])]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
//dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), (int *) extraPointers[0], yShapeInfoPointer, resultShapeInfoPointer, dimensionLength, sizeof(double), 2);
//dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), (int *) extraPointers[0], yShapeInfoPointer);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[21]);
reduce3Double<<<1,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
yPointer,
yShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
1, allocationPointer, deviceTADShapeInfo, deviceTADOffsets);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xStride
* @param result
* @param resultStride
* @param scalar
* @param extraParams
* @param n
*/
void NativeOps::execScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
int xStride,
Nd4jPointer result,
int resultStride,
double scalar,
Nd4jPointer extraParams,
Nd4jIndex n) {
double *xPointer = reinterpret_cast<double *>(x);
double *resultPointer = reinterpret_cast<double *>(result);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
if (debug && verbose)
printf("D13 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[20], deviceProperties[getDeviceId(extraPointers[2])]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[20]);
scalarDouble<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
n,
scalar,
xPointer,
xStride,
extraParamsPointer,
resultPointer,resultStride, allocPointer);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param scalar
* @param extraParams
* @param n
*/
void NativeOps::execScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
double scalar,
Nd4jPointer extraParams){
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
if (debug && verbose)
printf("D14 opNum:[%i]\n", opNum);
// dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[19], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[19]);
scalarDouble<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
scalar,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
extraParamsPointer,
resultPointer,resultShapeInfoPointer, shape::rank(hostZShapeInfo), allocPointer);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param scalar
* @param extraParams
* @param n
* @param xIndexes
* @param resultIndexes
*/
void NativeOps::execScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
double scalar,
Nd4jPointer extraParams,
Nd4jIndex n,
Nd4jPointer xIndexes,
Nd4jPointer resultIndexes){
double *xPointer = reinterpret_cast<double *>(x);
double *resultPointer = reinterpret_cast<double *>(result);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
int *resultIndexesPointer = reinterpret_cast<int *>(resultIndexes);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
if (debug && verbose)
printf("D15 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[18], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[18]);
scalarDoubleIndexes<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
n,
scalar,
xPointer,
extraParamsPointer,
resultPointer,
resultIndexesPointer, allocPointer);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
*/
double NativeOps::execSummaryStatsScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,bool biasCorrected){
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
if (debug && verbose)
printf("D16 opNum:[%i]\n", opNum);
// dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[17], deviceProperties[getDeviceId(extraPointers[2])]);
double *resultPointer = reinterpret_cast<double *>(extraPointers[5]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[17], 1, sizeof(double), 8);
summaryStatsReduceDouble<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
extraParamsPointer,
resultPointer,
nullptr, 0,
nullptr,
1,
1,biasCorrected, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
checkCudaErrors(cudaStreamSynchronize(*stream));
double result = resultPointer[0];
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execSummaryStatsDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,bool biasCorrected) {
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
if (debug && verbose)
printf("D17 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[17], deviceProperties[getDeviceId(extraPointers[2])]);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[17], 1, sizeof(double), 8);
summaryStatsReduceDouble<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
extraParamsPointer,
resultPointer,
resultShapeInfoPointer, shape::rank(hostZShapeInfo),
nullptr,
1,
1,biasCorrected, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfoBuffer
* @param dimension
* @param dimensionLength
*/
void NativeOps::execSummaryStatsDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfoBuffer,
Nd4jPointer dimension, int dimensionLength,bool biasCorrected){
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfoBuffer);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
if (debug && verbose)
printf("D18 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[17], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[17], dimensionLength, sizeof(double), 8);
summaryStatsReduceDouble<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
extraParamsPointer,
resultPointer,
resultShapeInfoPointer, shape::rank(hostZShapeInfo),
dimensionPointer,
dimensionLength,
1,biasCorrected, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xStride
* @param result
* @param resultStride
* @param extraParams
* @param n
*/
void NativeOps::execTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
int xStride,
Nd4jPointer result,
int resultStride,
Nd4jPointer extraParams,
Nd4jIndex n) {
double *xPointer = reinterpret_cast<double *>(dx);
double *resultPointer = reinterpret_cast<double *>(result);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
if (debug && verbose)
printf("D19 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[16], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[16]);
transformDouble<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
n,
xPointer,
xStride,
extraParamsPointer,
resultPointer,resultStride, allocPointer, reductionPointer);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
Nd4jPointer xShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer extraParams){
double *xPointer = reinterpret_cast<double *>(dx);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
if (debug && verbose)
printf("D20 opNum:[%i]\n", opNum);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[1], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
// special pointer for special buffer for special ops
double *specialPointer = reinterpret_cast<double *>(extraPointers[6]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[1]);
int *dimension = (int *) specialPointer;
int *maxDimension = dimension + 1;
int *maxShapeBuffer = (int *) maxDimension + 1;
double * special = (double *) maxShapeBuffer + (MAX_RANK * 2 + 4);
// simple trick to get workaround over reductions into scalar
if (opNum >= 38 && opNum <= 41) {
if (shape::isVector(hostXShapeInfo) && opNum != 41) {
// if that's vector, we just go directly to op in 1 block
int length = shape::length(hostXShapeInfo);
int block = nd4j::math::nd4j_min<int>(256, length);
transformDouble<<< 1, block,launchDims.z + (block * sizeof(double) * 8), *stream >>> (
opNum,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
extraParamsPointer,
resultPointer, resultShapeInfoPointer, shape::rank(hostZShapeInfo), allocPointer, reductionPointer);
} else {
// going for blockwise specials
//float *xpf = reinterpret_cast<float *>(dx);
int *shape = shape::shapeOf(hostXShapeInfo);
//printf("Rows num: %i\n", shape[0]);
switch (opNum) {
case 40: // LogSoftMax
case 39: // SoftMax Derivative
case 38: {// softmax
Nd4jPointer tempPointers[16];
tempPointers[0] = extraPointers[0];
tempPointers[1] = extraPointers[1];
tempPointers[2] = extraPointers[2];
tempPointers[3] = extraPointers[3];
tempPointers[4] = extraPointers[4];
tempPointers[5] = extraPointers[5];
tempPointers[6] = extraPointers[6];
tempPointers[7] = extraPointers[7];
tempPointers[8] = extraPointers[8];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
tempPointers[12] = extraPointers[12];
tempPointers[13] = extraPointers[13];
tempPointers[14] = extraPointers[14];
tempPointers[15] = extraPointers[15];
int maxShape[2] = {shape::shapeOf(hostXShapeInfo)[0], 1};
int *hostMaxShapeBuffer = shape::shapeBuffer(2, maxShape);
tempPointers[7] = (Nd4jPointer) hostMaxShapeBuffer;
tempPointers[8] = (Nd4jPointer) hostMaxShapeBuffer;
prepareShapeBuffer << < 1, 1, 128, *stream >> > (dimension, maxDimension, maxShapeBuffer, shape[0]);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
tempPointers[9] = extraPointers[12];
tempPointers[10] = extraPointers[13];
tempPointers[11] = extraPointers[14];
// max 3
execReduceDouble(tempPointers, 3, dx, xShapeInfo, extraParams, (Nd4jPointer) special,
(Nd4jPointer) maxShapeBuffer, (Nd4jPointer) maxDimension, 1);
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
// sub 1
execBroadcastDouble(tempPointers, 1, dx, xShapeInfo, (Nd4jPointer) special,
(Nd4jPointer) maxShapeBuffer, dx, xShapeInfo, (Nd4jPointer) dimension, 1);
// exp 3
execTransformDouble(extraPointers, 3, dx, xShapeInfo, dx, xShapeInfo, extraParams);
tempPointers[8] = tempPointers[7];
tempPointers[9] = extraPointers[12];
tempPointers[10] = extraPointers[13];
tempPointers[11] = extraPointers[14];
//sum 1
execReduceDouble(tempPointers, 1, dx, xShapeInfo, extraParams, (Nd4jPointer) special,
(Nd4jPointer) maxShapeBuffer, (Nd4jPointer) maxDimension, 1);
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
// divide 3
execBroadcastDouble(tempPointers, 3, dx, xShapeInfo, (Nd4jPointer) special,
(Nd4jPointer) maxShapeBuffer, dx, xShapeInfo, (Nd4jPointer) dimension, 1);
// log 3
if (opNum == 40)
execTransformDouble(extraPointers, 5, dx, xShapeInfo, dx, xShapeInfo, extraParams);
else if (opNum == 39)
execTransformDouble(extraPointers, 42, dx, xShapeInfo, dx, xShapeInfo, extraParams);
delete hostMaxShapeBuffer;
break;
}
case 41: {
// IsMax along all dimensions
bool scalarCheat = false;
if (extraParamsPointer == nullptr) {
scalarCheat = true;
} else {
//extraParamsPointer == nullptr || (shape::isVector(hostXShapeInfo))
//if (shape::isVector(hostXShapeInfo) && extraParamsPointer[1] == 1) {
// scalarCheat = true;
//}
}
if (scalarCheat) {
//printf("Going for scalar IsMax\n");
int maxIdx = (int) execIndexReduceScalarDouble(extraPointers, 0, dx, xShapeInfo, extraParams);
int targetIdx = 0;
if (shape::order(hostXShapeInfo) == 'c' || shape::order(hostXShapeInfo) == 'f' && maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1] >= shape::length(hostXShapeInfo))
targetIdx = maxIdx;
else
targetIdx = maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1];
fillIsMaxDouble<<< 1, 128, 0, *stream >>>(resultPointer, shape::length(hostXShapeInfo), targetIdx);
} else {
// going for dimension-based IsMax
//printf("Going for dimension-based IsMax\n");
int *tadMaxShapeInfo = reinterpret_cast<int *> (extraPointers[10]);
int *tadMaxOffsets = reinterpret_cast<int *> (extraPointers[11]);
int *dimensionPointer = reinterpret_cast<int *> (extraPointers[15]);
// we call for IMax on specified dimension
execIndexReduceDouble(extraPointers, 0, dx, xShapeInfo, extraParams, (Nd4jPointer) special, (Nd4jPointer) hostYShapeInfo, (Nd4jPointer) dimensionPointer, 1);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
// at this point, all IMax indexes are gathered, and we execute
fillDimensionalIsMaxDouble<<<768, 16, funcAttributes[37].sharedSizeBytes, *stream>>>(special, hostYShapeInfo, resultPointer, resultShapeInfoPointer, tadMaxShapeInfo, dimensionPointer, 1, tadMaxOffsets );
checkCudaErrors(cudaStreamSynchronize(*stream));
}
break;
}
default: {
printf("Bad case for transformFloat\n");
break;
}
}
}
} else {
transformDouble<<<launchDims.x, launchDims.y, launchDims.z, *stream>>> (
opNum,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
extraParamsPointer,
resultPointer, resultShapeInfoPointer, shape::rank(hostZShapeInfo), allocPointer, reductionPointer);
}
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
Nd4jPointer xShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer xIndexes,
Nd4jPointer resultIndexes) {
double *xPointer = reinterpret_cast<double *>(dx);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
int *resultIndexesPointer = reinterpret_cast<int *>(resultIndexes);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
if (debug && verbose)
printf("D21 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[14], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[14]);
transformDoubleIndexes<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
extraParamsPointer,
resultPointer,
resultIndexesPointer, allocPointer, reductionPointer);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
*/
float NativeOps::execIndexReduceScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
if (debug && verbose)
printf("F1 opNum:[%i]\n", opNum);
// dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[13], deviceProperties[getDeviceId(extraPointers[2])]);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
float *resultPointer = reinterpret_cast<float *>(extraPointers[5]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[13], 1, sizeof(float), 2);
if (debug && verbose && launchDims.x == 1)
printf("AF1 opNum:[%i]\n", opNum);
indexReduceFloat<<<launchDims.x,launchDims.y, launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
extraParamsPointer,
resultPointer,
nullptr, 0,
nullptr,
1,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
checkCudaErrors(cudaStreamSynchronize(*stream));
float result = resultPointer[0];
return result;
}
float NativeOps::execIndexReduceScalarHalf(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams){
nd4j::float16 *xPointer = reinterpret_cast<nd4j::float16 *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
nd4j::float16 *extraParamsPointer = reinterpret_cast<nd4j::float16 *>(extraParams);
if (debug && verbose)
printf("H1 opNum:[%i]\n", opNum);
// dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[13], deviceProperties[getDeviceId(extraPointers[2])]);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
nd4j::float16 *resultPointer = reinterpret_cast<nd4j::float16 *>(extraPointers[5]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
nd4j::float16 *reductionPointer = reinterpret_cast<nd4j::float16 *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[13], 1, sizeof(nd4j::float16), 2);
if (debug && verbose && launchDims.x == 1)
printf("AH1 opNum:[%i]\n", opNum);
indexReduceHalf<<<launchDims.x,launchDims.y, launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
extraParamsPointer,
resultPointer,
nullptr, 0,
nullptr,
1,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
checkCudaErrors(cudaStreamSynchronize(*stream));
float result = (float) resultPointer[0];
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfoBuffer
* @param dimension
* @param dimensionLength
*/
void NativeOps::execIndexReduceFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfoBuffer,
Nd4jPointer dimension,
int dimensionLength){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfoBuffer);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
if (debug && verbose)
printf("F2 opNum:[%i]\n", opNum);
// dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[13], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[13], dimensionLength, sizeof(float), 2);
if (verbose && launchDims.x == 1)
printf("AF2 opNum:[%i]\n", opNum);
indexReduceFloat<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
extraParamsPointer,
resultPointer,
resultShapeInfoPointer, shape::rank(hostZShapeInfo),
dimensionPointer,
dimensionLength,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::execIndexReduceHalf(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfoBuffer,
Nd4jPointer dimension,
int dimensionLength){
nd4j::float16 *xPointer = reinterpret_cast<nd4j::float16 *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
nd4j::float16 *extraParamsPointer = reinterpret_cast<nd4j::float16 *>(extraParams);
nd4j::float16 *resultPointer = reinterpret_cast<nd4j::float16 *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfoBuffer);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
if (debug && verbose)
printf("H2 opNum:[%i]\n", opNum);
// dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[13], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
nd4j::float16 *reductionPointer = reinterpret_cast<nd4j::float16 *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[13], dimensionLength, sizeof(nd4j::float16), 2);
if (verbose && launchDims.x == 1)
printf("AH2 opNum:[%i]\n", opNum);
indexReduceHalf<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
extraParamsPointer,
resultPointer,
resultShapeInfoPointer, shape::rank(hostZShapeInfo),
dimensionPointer,
dimensionLength,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param dimension
* @param dimensionLength
*/
void NativeOps::execBroadcastFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer dimension, int dimensionLength){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *yPointer = reinterpret_cast<float *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
if (debug && verbose)
printf("F3 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[12], deviceProperties[getDeviceId(extraPointers[2])]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[12], 1, sizeof(float), 0);
broadcastFloat<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
yPointer,
yShapeInfoPointer, shape::rank(hostYShapeInfo),
resultPointer,
resultShapeInfoPointer, shape::rank(hostZShapeInfo),
dimensionPointer,
dimensionLength, deviceTADShapeInfo, deviceTADOffsets);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::execBroadcastHalf(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer dimension, int dimensionLength){
nd4j::float16 *xPointer = reinterpret_cast<nd4j::float16 *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
nd4j::float16 *yPointer = reinterpret_cast<nd4j::float16 *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
nd4j::float16 *resultPointer = reinterpret_cast<nd4j::float16 *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
if (debug && verbose)
printf("H3 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[12], deviceProperties[getDeviceId(extraPointers[2])]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[12], 1, sizeof(nd4j::float16), 0);
broadcastHalf<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
yPointer,
yShapeInfoPointer, shape::rank(hostYShapeInfo),
resultPointer,
resultShapeInfoPointer, shape::rank(hostZShapeInfo),
dimensionPointer,
dimensionLength, deviceTADShapeInfo, deviceTADOffsets);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xStride
* @param y
* @param yStride
* @param result
* @param resultStride
* @param extraParams
* @param n
*/
void NativeOps::execPairwiseTransformFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
int xStride,
Nd4jPointer y,
int yStride,
Nd4jPointer result,
int resultStride,
Nd4jPointer extraParams, Nd4jIndex n){
float *xPointer = reinterpret_cast<float *>(dx);
float *yPointer = reinterpret_cast<float *>(y);
float *resultPointer = reinterpret_cast<float *>(result);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
if (debug && verbose)
printf("F4 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[11], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
//dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), (int *) extraPointers[0], nullptr, (int *) extraPointers[7], 1, sizeof(float), 0);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[11]);
if (verbose && launchDims.x == 1)
printf("AF4 opNum:[%i], xLength: [%i]\n", opNum, shape::length(hostXShapeInfo));
pairWiseTransformStridedFloat<<<launchDims.x,launchDims.y, launchDims.z, *stream>>>(
opNum,
n,
xPointer,
yPointer,
xStride,
yStride,
extraParamsPointer,
resultPointer,
resultStride, allocationPointer, deviceTADShapeInfo);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::execPairwiseTransformHalf(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
int xStride,
Nd4jPointer y,
int yStride,
Nd4jPointer result,
int resultStride,
Nd4jPointer extraParams, Nd4jIndex n){
nd4j::float16 *xPointer = reinterpret_cast<nd4j::float16 *>(dx);
nd4j::float16 *yPointer = reinterpret_cast<nd4j::float16 *>(y);
nd4j::float16 *resultPointer = reinterpret_cast<nd4j::float16 *>(result);
nd4j::float16 *extraParamsPointer = reinterpret_cast<nd4j::float16 *>(extraParams);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
if (debug && verbose)
printf("H4 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[11], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
//dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), (int *) extraPointers[0], nullptr, (int *) extraPointers[7], 1, sizeof(float), 0);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[11]);
if (verbose && launchDims.x == 1)
printf("AH4 opNum:[%i], xLength: [%i]\n", opNum, shape::length(hostXShapeInfo));
pairWiseTransformStridedHalf<<<launchDims.x,launchDims.y, launchDims.z, *stream>>>(
opNum,
n,
xPointer,
yPointer,
xStride,
yStride,
extraParamsPointer,
resultPointer,
resultStride, allocationPointer, deviceTADShapeInfo);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
* @param xIndexes
* @param yIndexes
* @param resultIndexes
*/
void NativeOps::execPairwiseTransformFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
Nd4jPointer xShapeInfo,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer xIndexes,
Nd4jPointer yIndexes,
Nd4jPointer resultIndexes){
float *xPointer = reinterpret_cast<float *>(dx);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *yPointer = reinterpret_cast<float *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
int *xIndexesPointer = reinterpret_cast<int *>(xIndexes);
int *yIndexesPointer = reinterpret_cast<int *>(yIndexes);
int *resultIndexesPointer = reinterpret_cast<int *>(resultIndexes);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
if (debug && verbose)
printf("F5 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[10], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[10], 1, sizeof(float), 0);
if (verbose && launchDims.x == 1)
printf("AF5 opNum:[%i]\n", opNum);
pairWiseTransformFloatIndex<<<launchDims.x,launchDims.y, launchDims.z, *stream>>>(
opNum,
xPointer,
yPointer,
extraParamsPointer,
resultPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
yShapeInfoPointer, shape::rank(hostYShapeInfo),
resultShapeInfoPointer, shape::rank(hostZShapeInfo),
xIndexesPointer,
yIndexesPointer,
resultIndexesPointer, allocationPointer, deviceTADShapeInfo);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::execPairwiseTransformHalf(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
Nd4jPointer xShapeInfo,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer xIndexes,
Nd4jPointer yIndexes,
Nd4jPointer resultIndexes){
nd4j::float16 *xPointer = reinterpret_cast<nd4j::float16 *>(dx);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
nd4j::float16 *yPointer = reinterpret_cast<nd4j::float16 *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
nd4j::float16 *resultPointer = reinterpret_cast<nd4j::float16 *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
nd4j::float16 *extraParamsPointer = reinterpret_cast<nd4j::float16 *>(extraParams);
int *xIndexesPointer = reinterpret_cast<int *>(xIndexes);
int *yIndexesPointer = reinterpret_cast<int *>(yIndexes);
int *resultIndexesPointer = reinterpret_cast<int *>(resultIndexes);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
if (debug && verbose)
printf("H5 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[10], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[10], 1, sizeof(nd4j::float16), 0);
if (verbose && launchDims.x == 1)
printf("AH5 opNum:[%i]\n", opNum);
pairWiseTransformHalfIndex<<<launchDims.x,launchDims.y, launchDims.z, *stream>>>(
opNum,
xPointer,
yPointer,
extraParamsPointer,
resultPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
yShapeInfoPointer, shape::rank(hostYShapeInfo),
resultShapeInfoPointer, shape::rank(hostZShapeInfo),
xIndexesPointer,
yIndexesPointer,
resultIndexesPointer, allocationPointer, deviceTADShapeInfo);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execPairwiseTransformFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
Nd4jPointer xShapeInfo,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer extraParams){
float *xPointer = reinterpret_cast<float *>(dx);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *yPointer = reinterpret_cast<float *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
if (debug && verbose)
printf("F6 opNum:[%i]\n", opNum);
// dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[9], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
//dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), (int *) extraPointers[0], resultShapeInfoPointer, yShapeInfoPointer, 1, sizeof(float), 0);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[9]);
if (verbose && launchDims.x == 1) {
printf("AF6 opNum:[%i], launchDims.x: [%i], launchDims.y: [%i]\n", opNum, launchDims.x, launchDims.y);
shape::printShapeInfoLinear(hostXShapeInfo);
}
pairWiseTransformFloat<<<launchDims.x,launchDims.y, launchDims.z, *stream>>>(
opNum,
xPointer,
yPointer,
extraParamsPointer,
resultPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
yShapeInfoPointer, shape::rank(hostYShapeInfo),
resultShapeInfoPointer, shape::rank(hostZShapeInfo), allocationPointer, deviceTADShapeInfo);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::execPairwiseTransformHalf(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
Nd4jPointer xShapeInfo,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer extraParams){
nd4j::float16 *xPointer = reinterpret_cast<nd4j::float16 *>(dx);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
nd4j::float16 *yPointer = reinterpret_cast<nd4j::float16 *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
nd4j::float16 *resultPointer = reinterpret_cast<nd4j::float16 *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
nd4j::float16 *extraParamsPointer = reinterpret_cast<nd4j::float16 *>(extraParams);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
if (debug && verbose)
printf("H6 opNum:[%i]\n", opNum);
// dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[9], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
//dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), (int *) extraPointers[0], resultShapeInfoPointer, yShapeInfoPointer, 1, sizeof(float), 0);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[9]);
if (verbose && launchDims.x == 1) {
printf("HF6 opNum:[%i], launchDims.x: [%i], launchDims.y: [%i]\n", opNum, launchDims.x, launchDims.y);
shape::printShapeInfoLinear(hostXShapeInfo);
}
pairWiseTransformHalf<<<launchDims.x,launchDims.y, launchDims.z, *stream>>>(
opNum,
xPointer,
yPointer,
extraParamsPointer,
resultPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
yShapeInfoPointer, shape::rank(hostYShapeInfo),
resultShapeInfoPointer, shape::rank(hostZShapeInfo), allocationPointer, deviceTADShapeInfo);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduceFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfo) {
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
if (debug && verbose)
printf("F7 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[8], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[8], 1, sizeof(float), 1);
if (verbose && launchDims.x == 1)
printf("AF7 opNum:[%i]\n", opNum);
reduceScalarFloat<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
nullptr,
1,
reductionPointer, deviceTADShapeInfo);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::execReduceHalf(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfo) {
nd4j::float16 *xPointer = reinterpret_cast<nd4j::float16 *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
nd4j::float16 *resultPointer = reinterpret_cast<nd4j::float16 *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
nd4j::float16 *extraParamsPointer = reinterpret_cast<nd4j::float16 *>(extraParams);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
if (debug && verbose)
printf("H7 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[8], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
nd4j::float16 *reductionPointer = reinterpret_cast<nd4j::float16 *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[8], 1, sizeof(nd4j::float16), 1);
if (verbose && launchDims.x == 1)
printf("AH7 opNum:[%i]\n", opNum);
reduceScalarHalf<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
nullptr,
1,
reductionPointer, deviceTADShapeInfo);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduceFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer dimension,int dimensionLength){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
if (debug && verbose)
printf("F8 opNum:[%i]\n", opNum);
// dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[8], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
// DO NOT REMOVE COMMENTS OR CODE BELOW.
// [email protected]
// shape::TAD *tad = new shape::TAD();
// tad->init(xShapeInfoPointer, dimensionPointer, dimensionLength);
// tad->setOutputBuffer(allocPointer);
// tad->createTadOnlyShapeInfo();
// shape::printShapeInfo(tad->tadOnlyShapeInfo);
// dim3 getBetterDimensions(int deviceId, int numTads, int tadLength, int xRank, int yRank, int zRank, int dimensionLength, int elementSize, int reduction)
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[8], dimensionLength, sizeof(float), 1);
if (verbose && launchDims.x == 1)
printf("AF8 opNum:[%i]\n", opNum);
if (dimensionLength == 1) {
reduceFloat1D<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
} else if (shape::rank(hostTADShapeInfo) <= 3) {
reduceFloat6D<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
} else {
reduceFloat<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
}
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
//delete tad;
}
void NativeOps::execReduceHalf(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer dimension,int dimensionLength){
nd4j::float16 *xPointer = reinterpret_cast<nd4j::float16 *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
nd4j::float16 *resultPointer = reinterpret_cast<nd4j::float16 *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
nd4j::float16 *extraParamsPointer = reinterpret_cast<nd4j::float16 *>(extraParams);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
if (debug && verbose)
printf("H8 opNum:[%i]\n", opNum);
// dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[8], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
nd4j::float16 *reductionPointer = reinterpret_cast<nd4j::float16 *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[8], dimensionLength, sizeof(nd4j::float16), 1);
if (verbose && launchDims.x == 1)
printf("AH8 opNum:[%i]\n", opNum);
if (dimensionLength == 1) {
reduceHalf1D<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
} else if (shape::rank(hostTADShapeInfo) <= 3) {
reduceHalf6D<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
} else {
reduceHalf<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
}
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
//delete tad;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @return
*/
float NativeOps::execReduceScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
if (debug && verbose)
printf("F9 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[8], deviceProperties[getDeviceId(extraPointers[2])]);
float *resultPointer = reinterpret_cast<float *>(extraPointers[5]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
//dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[8], 1, sizeof(float), 1);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 8, funcAttributes[8]);
if (verbose && launchDims.x == 1)
printf("AF9 opNum:[%i]\n", opNum);
reduceScalarFloat<<< launchDims.x,launchDims.y, launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
nullptr,
nullptr,
1,
reductionPointer, deviceTADShapeInfo
);
checkCudaErrors(cudaStreamSynchronize(*stream));
float result = resultPointer[0];
return result;
}
float NativeOps::execReduceScalarHalf(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams){
nd4j::float16 *xPointer = reinterpret_cast<nd4j::float16 *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
nd4j::float16 *extraParamsPointer = reinterpret_cast<nd4j::float16 *>(extraParams);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
if (debug && verbose)
printf("H9 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[8], deviceProperties[getDeviceId(extraPointers[2])]);
nd4j::float16 *resultPointer = reinterpret_cast<nd4j::float16 *>(extraPointers[5]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
nd4j::float16 *reductionPointer = reinterpret_cast<nd4j::float16 *>(extraPointers[4]);
//dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[8], 1, sizeof(float), 1);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 2, funcAttributes[8]);
if (verbose && launchDims.x == 1)
printf("AH9 opNum:[%i]\n", opNum);
reduceScalarHalf<<< launchDims.x,launchDims.y, launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
nullptr,
nullptr,
1,
reductionPointer, deviceTADShapeInfo
);
checkCudaErrors(cudaStreamSynchronize(*stream));
float result = (float) resultPointer[0];
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduce3Float(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParamsVals,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *yPointer = reinterpret_cast<float *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParamsVals);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
if (debug && verbose)
printf("F10 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[7], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
//dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), (int *) extraPointers[0], yShapeInfoPointer, resultShapeInfoPointer, 1, sizeof(float), 2);
//dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), (int *) extraPointers[0], yShapeInfoPointer);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[7]);
if (verbose && launchDims.x == 1)
printf("AF10 opNum:[%i]\n", opNum);
reduce3ScalarFloat<<<1,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
yPointer,
yShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
nullptr,
1,
1, allocationPointer, deviceTADShapeInfo, deviceTADOffsets);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::execReduce3Half(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParamsVals,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo){
nd4j::float16 *xPointer = reinterpret_cast<nd4j::float16 *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
nd4j::float16 *yPointer = reinterpret_cast<nd4j::float16 *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
nd4j::float16 *resultPointer = reinterpret_cast<nd4j::float16 *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
nd4j::float16 *extraParamsPointer = reinterpret_cast<nd4j::float16 *>(extraParamsVals);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
if (debug && verbose)
printf("H10 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[7], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
//dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), (int *) extraPointers[0], yShapeInfoPointer, resultShapeInfoPointer, 1, sizeof(float), 2);
//dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), (int *) extraPointers[0], yShapeInfoPointer);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 8, funcAttributes[7]);
if (verbose && launchDims.x == 1)
printf("AH10 opNum:[%i]\n", opNum);
reduce3ScalarHalf<<<1,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
yPointer,
yShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
nullptr,
1,
1, allocationPointer, deviceTADShapeInfo, deviceTADOffsets);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
*/
float NativeOps::execReduce3ScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParamsVals,
Nd4jPointer y,
Nd4jPointer yShapeInfo) {
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *yPointer = reinterpret_cast<float *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParamsVals);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
if (debug && verbose)
printf("F11 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[7], deviceProperties[getDeviceId(extraPointers[2])]);
float *resultPointer = reinterpret_cast<float *>(extraPointers[5]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
//dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), (int *) extraPointers[0], yShapeInfoPointer, nullptr, 1, sizeof(float), 2);
//dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), (int *) extraPointers[0], yShapeInfoPointer);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 32, funcAttributes[7]);
if (verbose && launchDims.x == 1)
printf("AF11 opNum:[%i]\n", opNum);
reduce3ScalarFloat<<<1,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
yPointer,
yShapeInfoPointer,
extraParamsPointer,
resultPointer,
nullptr,
nullptr,
1,
1, allocationPointer, deviceTADShapeInfo, deviceTADOffsets);
checkCudaErrors(cudaStreamSynchronize(*stream));
float result = resultPointer[0];
return result;
}
float NativeOps::execReduce3ScalarHalf(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParamsVals,
Nd4jPointer y,
Nd4jPointer yShapeInfo) {
nd4j::float16 *xPointer = reinterpret_cast<nd4j::float16 *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
nd4j::float16 *yPointer = reinterpret_cast<nd4j::float16 *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
nd4j::float16 *extraParamsPointer = reinterpret_cast<nd4j::float16 *>(extraParamsVals);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
if (debug && verbose)
printf("H11 opNum:[%i]\n", opNum);
nd4j::float16 *resultPointer = reinterpret_cast<nd4j::float16 *>(extraPointers[5]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[7]);
if (verbose && launchDims.x == 1)
printf("AH11 opNum:[%i]\n", opNum);
reduce3ScalarHalf<<<1,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
yPointer,
yShapeInfoPointer,
extraParamsPointer,
resultPointer,
nullptr,
nullptr,
1,
1, allocationPointer, deviceTADShapeInfo, deviceTADOffsets);
checkCudaErrors(cudaStreamSynchronize(*stream));
float result = (float) resultPointer[0];
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfoBuffer
* @param dimension
* @param dimensionLength
*/
void NativeOps::execReduce3Float(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParamsVals,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfoBuffer,
Nd4jPointer dimension,
int dimensionLength){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *yPointer = reinterpret_cast<float *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfoBuffer);
float *extraParamsPointer = reinterpret_cast<float *>(extraParamsVals);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[7], deviceProperties[getDeviceId(extraPointers[2])]);
if (debug && verbose)
printf("F12 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
//dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), (int *) extraPointers[0], yShapeInfoPointer, resultShapeInfoPointer, dimensionLength, sizeof(float), 2);
//dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), (int *) extraPointers[0], yShapeInfoPointer);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[7]);
if (verbose && launchDims.x == 1)
printf("AF12 opNum:[%i]\n", opNum);
if (shape::isScalar(hostZShapeInfo) || dimensionPointer == nullptr) {
reduce3ScalarFloat << < 1, launchDims.y, launchDims.z, *stream >> > (
opNum,
xPointer,
xShapeInfoPointer,
yPointer,
yShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
1, allocationPointer, deviceTADShapeInfo, deviceTADOffsets);
} else {
reduce3Float << < 1, launchDims.y, launchDims.z, *stream >> > (
opNum,
xPointer,
xShapeInfoPointer,
yPointer,
yShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
1, allocationPointer, deviceTADShapeInfo, deviceTADOffsets);
}
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::execReduce3Half(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParamsVals,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfoBuffer,
Nd4jPointer dimension,
int dimensionLength){
nd4j::float16 *xPointer = reinterpret_cast<nd4j::float16 *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
nd4j::float16 *yPointer = reinterpret_cast<nd4j::float16 *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
nd4j::float16 *resultPointer = reinterpret_cast<nd4j::float16 *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfoBuffer);
nd4j::float16 *extraParamsPointer = reinterpret_cast<nd4j::float16 *>(extraParamsVals);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[7], deviceProperties[getDeviceId(extraPointers[2])]);
if (debug && verbose)
printf("H12 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 8, funcAttributes[7]);
if (verbose && launchDims.x == 1)
printf("AH12 opNum:[%i]\n", opNum);
if (shape::isScalar(hostZShapeInfo) || dimensionPointer == nullptr) {
reduce3ScalarHalf<< < 1, launchDims.y, launchDims.z, *stream >> > (
opNum,
xPointer,
xShapeInfoPointer,
yPointer,
yShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
1, allocationPointer, deviceTADShapeInfo, deviceTADOffsets);
} else {
reduce3Half<< < 1, launchDims.y, launchDims.z, *stream >> > (
opNum,
xPointer,
xShapeInfoPointer,
yPointer,
yShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
1, allocationPointer, deviceTADShapeInfo, deviceTADOffsets);
}
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xStride
* @param result
* @param resultStride
* @param scalar
* @param extraParams
* @param n
*/
void NativeOps::execScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
int xStride,
Nd4jPointer result,
int resultStride,
double scalar,
Nd4jPointer extraParams,
Nd4jIndex n){
float *xPointer = reinterpret_cast<float *>(x);
float *resultPointer = reinterpret_cast<float *>(result);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
if (debug && verbose)
printf("F13 opNum:[%i]\n", opNum);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[6]);
if (verbose && launchDims.x == 1)
printf("AF13 opNum:[%i]\n", opNum);
scalarFloat<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
n,
scalar,
xPointer,
xStride,
extraParamsPointer,
resultPointer,resultStride, allocPointer);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param scalar
* @param extraParams
* @param n
*/
void NativeOps::execScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
float scalar,
Nd4jPointer extraParams){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
Nd4jIndex n = shape::length(hostXShapeInfo);
if (debug && verbose)
printf("F14 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[5], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[5]);
if (verbose && launchDims.x == 1)
printf("AF14 opNum:[%i], xLength:[%i]\n", opNum, shape::length(hostXShapeInfo));
scalarFloat<<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(
opNum,
scalar,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
extraParamsPointer,
resultPointer,resultShapeInfoPointer, shape::rank(hostZShapeInfo), allocPointer );
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::execScalarHalf(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
float scalar,
Nd4jPointer extraParams){
nd4j::float16 *xPointer = reinterpret_cast<nd4j::float16 *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
nd4j::float16 *resultPointer = reinterpret_cast<nd4j::float16 *>(result);
nd4j::float16 *extraParamsPointer = reinterpret_cast<nd4j::float16 *>(extraParams);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
Nd4jIndex n = shape::length(hostXShapeInfo);
if (debug && verbose)
printf("H14 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[5], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[5]);
if (verbose && launchDims.x == 1)
printf("AH14 opNum:[%i], xLength:[%i]\n", opNum, shape::length(hostXShapeInfo));
scalarHalf<<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(
opNum,
scalar,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
extraParamsPointer,
resultPointer,resultShapeInfoPointer, shape::rank(hostZShapeInfo), allocPointer );
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param scalar
* @param extraParams
* @param n
* @param xIndexes
* @param resultIndexes
*/
void NativeOps::execScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
double scalar,
Nd4jPointer extraParams,
Nd4jPointer xIndexes,
Nd4jPointer resultIndexes){
float *xPointer = reinterpret_cast<float *>(x);
float *resultPointer = reinterpret_cast<float *>(result);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
int *resultIndexesPointer = reinterpret_cast<int *>(resultIndexes);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
Nd4jIndex n = shape::length(hostXShapeInfo);
if (debug && verbose)
printf("F15 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[4], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[4]);
if (verbose && launchDims.x == 1)
printf("AF15 opNum:[%i]\n", opNum);
scalarFloatIndexes<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
n,
scalar,
xPointer,
extraParamsPointer,
resultPointer,
resultIndexesPointer, allocPointer);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
*/
float NativeOps::execSummaryStatsScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,bool biasCorrected){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
if (debug && verbose)
printf("F16 opNum:[%i]\n", opNum);
// dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[3], deviceProperties[getDeviceId(extraPointers[2])]);
float *resultPointer = reinterpret_cast<float *>(extraPointers[5]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[3], 1, sizeof(float), 8);
if (verbose && launchDims.x == 1)
printf("AF16 opNum:[%i]\n", opNum);
summaryStatsReduceFloat<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
extraParamsPointer,
resultPointer,
nullptr, 0,
nullptr,
1,
1,biasCorrected, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
checkCudaErrors(cudaStreamSynchronize(*stream));
float result = resultPointer[0];
return result;
}
float NativeOps::execSummaryStatsScalarHalf(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,bool biasCorrected){
nd4j::float16 *xPointer = reinterpret_cast<nd4j::float16 *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
nd4j::float16 *extraParamsPointer = reinterpret_cast<nd4j::float16 *>(extraParams);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
if (debug && verbose)
printf("H16 opNum:[%i]\n", opNum);
// dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[3], deviceProperties[getDeviceId(extraPointers[2])]);
nd4j::float16 *resultPointer = reinterpret_cast<nd4j::float16 *>(extraPointers[5]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
nd4j::float16 *reductionPointer = reinterpret_cast<nd4j::float16 *>(extraPointers[4]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[3], 1, sizeof(nd4j::float16), 8);
if (verbose && launchDims.x == 1)
printf("AH16 opNum:[%i]\n", opNum);
summaryStatsReduceHalf<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
extraParamsPointer,
resultPointer,
nullptr, 0,
nullptr,
1,
1,biasCorrected, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
checkCudaErrors(cudaStreamSynchronize(*stream));
float result = (float) resultPointer[0];
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execSummaryStatsFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,bool biasCorrected){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
if (debug && verbose)
printf("F17 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[3], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[3], 1, sizeof(float), 8);
if (verbose && launchDims.x == 1)
printf("AF17 opNum:[%i]\n", opNum);
summaryStatsReduceFloat<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
extraParamsPointer,
resultPointer,
resultShapeInfoPointer, shape::rank(hostZShapeInfo),
nullptr,
1,
1,biasCorrected, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::execSummaryStatsHalf(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,bool biasCorrected){
nd4j::float16 *xPointer = reinterpret_cast<nd4j::float16 *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
nd4j::float16 *resultPointer = reinterpret_cast<nd4j::float16 *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
nd4j::float16 *extraParamsPointer = reinterpret_cast<nd4j::float16 *>(extraParams);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
if (debug && verbose)
printf("H17 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[3], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
nd4j::float16 *reductionPointer = reinterpret_cast<nd4j::float16 *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[3], 1, sizeof(nd4j::float16), 8);
if (verbose && launchDims.x == 1)
printf("AH17 opNum:[%i]\n", opNum);
summaryStatsReduceHalf<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
extraParamsPointer,
resultPointer,
resultShapeInfoPointer, shape::rank(hostZShapeInfo),
nullptr,
1,
1,biasCorrected, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfoBuffer
* @param dimension
* @param dimensionLength
*/
void NativeOps::execSummaryStatsFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfoBuffer,
Nd4jPointer dimension,
int dimensionLength,bool biasCorrected){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfoBuffer);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
if (debug && verbose)
printf("F18 opNum:[%i]\n", opNum);
// dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[3], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[3], dimensionLength, sizeof(float), 8);
if (verbose && launchDims.x == 1)
printf("AF18 opNum:[%i]\n", opNum);
summaryStatsReduceFloat<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
extraParamsPointer,
resultPointer,
resultShapeInfoPointer, shape::rank(hostZShapeInfo),
dimensionPointer,
dimensionLength,
1,biasCorrected, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::execSummaryStatsHalf(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfoBuffer,
Nd4jPointer dimension,
int dimensionLength,bool biasCorrected){
nd4j::float16 *xPointer = reinterpret_cast<nd4j::float16 *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
nd4j::float16 *resultPointer = reinterpret_cast<nd4j::float16 *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfoBuffer);
nd4j::float16 *extraParamsPointer = reinterpret_cast<nd4j::float16 *>(extraParams);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]);
if (debug && verbose)
printf("H18 opNum:[%i]\n", opNum);
// dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[3], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
nd4j::float16 *reductionPointer = reinterpret_cast<nd4j::float16 *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[3], dimensionLength, sizeof(nd4j::float16), 8);
if (verbose && launchDims.x == 1)
printf("AH18 opNum:[%i]\n", opNum);
summaryStatsReduceHalf<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
extraParamsPointer,
resultPointer,
resultShapeInfoPointer, shape::rank(hostZShapeInfo),
dimensionPointer,
dimensionLength,
1,biasCorrected, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xStride
* @param result
* @param resultStride
* @param extraParams
* @param n
*/
void NativeOps::execTransformFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
int xStride,
Nd4jPointer result,
int resultStride,
Nd4jPointer extraParams,
Nd4jIndex n) {
float *xPointer = reinterpret_cast<float *>(dx);
float *resultPointer = reinterpret_cast<float *>(result);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
if (debug && verbose)
printf("F19 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[2], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[2]);
if (verbose && launchDims.x == 1)
printf("AF19 opNum:[%i], xLength: [%i]\n", opNum, shape::length(hostXShapeInfo));
transformFloat<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
n,
xPointer,
xStride,
extraParamsPointer,
resultPointer,resultStride, allocPointer, reductionPointer);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::execTransformHalf(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
int xStride,
Nd4jPointer result,
int resultStride,
Nd4jPointer extraParams,
Nd4jIndex n) {
nd4j::float16 *xPointer = reinterpret_cast<nd4j::float16 *>(dx);
nd4j::float16 *resultPointer = reinterpret_cast<nd4j::float16 *>(result);
nd4j::float16 *extraParamsPointer = reinterpret_cast<nd4j::float16 *>(extraParams);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
if (debug && verbose)
printf("H19 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[2], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
nd4j::float16 *reductionPointer = reinterpret_cast<nd4j::float16 *>(extraPointers[4]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[2]);
if (verbose && launchDims.x == 1)
printf("AH19 opNum:[%i], xLength: [%i]\n", opNum, shape::length(hostXShapeInfo));
transformHalf<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
n,
xPointer,
xStride,
extraParamsPointer,
resultPointer,resultStride, allocPointer, reductionPointer);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execTransformFloat(Nd4jPointer *extraPointers,int opNum,
Nd4jPointer dx,
Nd4jPointer xShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer extraParams) {
float *xPointer = reinterpret_cast<float *>(dx);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
if (debug && verbose)
printf("F20 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[1], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
// special pointer for special buffer for special ops
float *specialPointer = reinterpret_cast<float *>(extraPointers[6]);
int *dimension = (int *) specialPointer;
int *maxDimension = dimension + 1;
int *maxShapeBuffer = (int *) maxDimension + 1;
float * special = (float *) maxShapeBuffer + (MAX_RANK * 2 + 4);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[1]);
if (verbose && launchDims.x == 1)
printf("AF20 opNum:[%i]\n", opNum);
// simple trick to get workaround over reductions into scalar
if (opNum >= 38 && opNum <= 41) {
if (shape::isVector(hostXShapeInfo) && opNum != 41) {
// if that's vector, we just go directly to op in 1 block
int length = shape::length(hostXShapeInfo);
int block = nd4j::math::nd4j_min<int>(length, 256);
transformFloat <<< 1, block, launchDims.z + (block * sizeof(float) * 4), *stream >> > (
opNum,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
extraParamsPointer,
resultPointer, resultShapeInfoPointer, shape::rank(hostZShapeInfo), allocPointer, reductionPointer);
} else {
// going for blockwise specials
//float *xpf = reinterpret_cast<float *>(dx);
int *shape = shape::shapeOf(hostXShapeInfo);
switch (opNum) {
case 40: // LogSoftMax
case 39: // SoftMax Derivative
case 38: {// softmax
Nd4jPointer tempPointers[16];
tempPointers[0] = extraPointers[0];
tempPointers[1] = extraPointers[1];
tempPointers[2] = extraPointers[2];
tempPointers[3] = extraPointers[3];
tempPointers[4] = extraPointers[4];
tempPointers[5] = extraPointers[5];
tempPointers[6] = extraPointers[6];
tempPointers[7] = extraPointers[7];
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
tempPointers[12] = extraPointers[12];
tempPointers[13] = extraPointers[13];
tempPointers[14] = extraPointers[14];
tempPointers[15] = extraPointers[15];
int maxShape[2] = {shape::shapeOf(hostXShapeInfo)[0], 1};
int *hostMaxShapeBuffer = shape::shapeBuffer(2, maxShape);
tempPointers[7] = (Nd4jPointer) hostMaxShapeBuffer;
tempPointers[8] = (Nd4jPointer) hostMaxShapeBuffer;
prepareShapeBuffer <<< 1, 1, 128, *stream >>> (dimension, maxDimension, maxShapeBuffer, shape[0]);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
//shape::printShapeInfo(maxShapeBuffer);
tempPointers[9] = extraPointers[12];
tempPointers[10] = extraPointers[13];
tempPointers[11] = extraPointers[14];
// max 3
execReduceFloat(tempPointers, 3, dx, xShapeInfo, extraParams, (Nd4jPointer) special,
(Nd4jPointer) maxShapeBuffer, (Nd4jPointer) maxDimension, 1);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
// sub 1
execBroadcastFloat(tempPointers, 1, dx, xShapeInfo, (Nd4jPointer) special,
(Nd4jPointer) maxShapeBuffer, dx, xShapeInfo, (Nd4jPointer) dimension, 1);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
// exp 3
execTransformFloat(extraPointers, 3, dx, xShapeInfo, dx, xShapeInfo, extraParams);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
tempPointers[8] = tempPointers[7];
tempPointers[9] = extraPointers[12];
tempPointers[10] = extraPointers[13];
tempPointers[11] = extraPointers[14];
//sum 1
execReduceFloat(tempPointers, 1, dx, xShapeInfo, extraParams, (Nd4jPointer) special,
(Nd4jPointer) maxShapeBuffer, (Nd4jPointer) maxDimension, 1);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
// divide 3
execBroadcastFloat(tempPointers, 3, dx, xShapeInfo, (Nd4jPointer) special,
(Nd4jPointer) maxShapeBuffer, dx, xShapeInfo, (Nd4jPointer) dimension, 1);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
// log 3
if (opNum == 40)
execTransformFloat(extraPointers, 5, dx, xShapeInfo, dx, xShapeInfo, extraParams);
else if (opNum == 39)
execTransformFloat(extraPointers, 42, dx, xShapeInfo, dx, xShapeInfo, extraParams);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
delete hostMaxShapeBuffer;
break;
}
case 41: {
// IsMax along all dimensions
int *dimensionHostPointer = reinterpret_cast<int *> (extraPointers[16]);
bool scalarCheat = false;
if (extraParamsPointer == nullptr) {
scalarCheat = true;
} else {
/* //extraParamsPointer == nullptr || (shape::isVector(hostXShapeInfo))
if (shape::isVector(hostXShapeInfo) && dimensionHostPointer[0] == 1) {
scalarCheat = true;
}*/
}
if (scalarCheat) {
//printf("Going for scalar IsMax\n");
int maxIdx = (int) execIndexReduceScalarFloat(extraPointers, 0, dx, xShapeInfo, extraParams);
int targetIdx = 0;
if (shape::order(hostXShapeInfo) == 'c' || shape::order(hostXShapeInfo) == 'f' && maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1] >= shape::length(hostXShapeInfo))
targetIdx = maxIdx;
else
targetIdx = maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1];
fillIsMaxFloat<<< 1, 128, 1536, *stream >>>(resultPointer, shape::length(hostXShapeInfo), targetIdx);
} else {
// going for dimension-based IsMax
//printf("Going for dimension-based IsMax\n");
int *tadMaxShapeInfo = reinterpret_cast<int *> (extraPointers[10]);
int *tadMaxOffsets = reinterpret_cast<int *> (extraPointers[11]);
int *dimensionPointer = reinterpret_cast<int *> (extraPointers[15]);
// we call for IMax on specified dimension
execIndexReduceFloat(extraPointers, 0, dx, xShapeInfo, extraParams, (Nd4jPointer) special, (Nd4jPointer) hostYShapeInfo, (Nd4jPointer) dimensionPointer, 1);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
// at this point, all IMax indexes are gathered, and we execute
fillDimensionalIsMaxFloat<<<768, 16, funcAttributes[36].sharedSizeBytes, *stream>>>(special, hostYShapeInfo, resultPointer, resultShapeInfoPointer, tadMaxShapeInfo, dimensionPointer, 1, tadMaxOffsets );
checkCudaErrors(cudaStreamSynchronize(*stream));
}
break;
}
default: {
printf("Bad case for transformFloat\n");
break;
}
}
}
} else {
transformFloat <<<launchDims.x, launchDims.y, launchDims.z, *stream>>> (
opNum,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
extraParamsPointer,
resultPointer, resultShapeInfoPointer, shape::rank(hostZShapeInfo), allocPointer, reductionPointer);
}
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::execTransformHalf(Nd4jPointer *extraPointers,int opNum,
Nd4jPointer dx,
Nd4jPointer xShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer extraParams) {
nd4j::float16 *xPointer = reinterpret_cast<nd4j::float16 *>(dx);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
nd4j::float16 *resultPointer = reinterpret_cast<nd4j::float16 *>(result);
nd4j::float16 *extraParamsPointer = reinterpret_cast<nd4j::float16 *>(extraParams);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
if (debug && verbose)
printf("H20 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[1], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
nd4j::float16 *reductionPointer = reinterpret_cast<nd4j::float16 *>(extraPointers[4]);
// special pointer for special buffer for special ops
nd4j::float16 *specialPointer = reinterpret_cast<nd4j::float16 *>(extraPointers[6]);
int *dimension = (int *) specialPointer;
int *maxDimension = dimension + 1;
int *maxShapeBuffer = (int *) maxDimension + 1;
nd4j::float16 * special = (nd4j::float16 *) maxShapeBuffer + (MAX_RANK * 2 + 4);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[1]);
if (verbose && launchDims.x == 1)
printf("AH20 opNum:[%i]\n", opNum);
// simple trick to get workaround over reductions into scalar
if (opNum >= 38 && opNum <= 41) {
if (shape::isVector(hostXShapeInfo) && opNum != 41) {
// if that's vector, we just go directly to op in 1 block
int length = shape::length(hostXShapeInfo);
int block = nd4j::math::nd4j_min<int>(length, 256);
transformHalf<<< 1, block, launchDims.z + (block * sizeof(float) * 4), *stream >> > (
opNum,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
extraParamsPointer,
resultPointer, resultShapeInfoPointer, shape::rank(hostZShapeInfo), allocPointer, reductionPointer);
} else {
// going for blockwise specials
//float *xpf = reinterpret_cast<float *>(dx);
int *shape = shape::shapeOf(hostXShapeInfo);
switch (opNum) {
case 40: // LogSoftMax
case 39: // SoftMax Derivative
case 38: {// softmax
Nd4jPointer tempPointers[16];
tempPointers[0] = extraPointers[0];
tempPointers[1] = extraPointers[1];
tempPointers[2] = extraPointers[2];
tempPointers[3] = extraPointers[3];
tempPointers[4] = extraPointers[4];
tempPointers[5] = extraPointers[5];
tempPointers[6] = extraPointers[6];
tempPointers[7] = extraPointers[7];
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
tempPointers[12] = extraPointers[12];
tempPointers[13] = extraPointers[13];
tempPointers[14] = extraPointers[14];
tempPointers[15] = extraPointers[15];
int maxShape[2] = {shape::shapeOf(hostXShapeInfo)[0], 1};
int *hostMaxShapeBuffer = shape::shapeBuffer(2, maxShape);
tempPointers[7] = (Nd4jPointer) hostMaxShapeBuffer;
tempPointers[8] = (Nd4jPointer) hostMaxShapeBuffer;
prepareShapeBuffer <<< 1, 1, 128, *stream >>> (dimension, maxDimension, maxShapeBuffer, shape[0]);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
//shape::printShapeInfo(maxShapeBuffer);
tempPointers[9] = extraPointers[12];
tempPointers[10] = extraPointers[13];
tempPointers[11] = extraPointers[14];
// max 3
execReduceHalf(tempPointers, 3, dx, xShapeInfo, extraParams, (Nd4jPointer) special,
(Nd4jPointer) maxShapeBuffer, (Nd4jPointer) maxDimension, 1);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
// sub 1
execBroadcastHalf(tempPointers, 1, dx, xShapeInfo, (Nd4jPointer) special,
(Nd4jPointer) maxShapeBuffer, dx, xShapeInfo, (Nd4jPointer) dimension, 1);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
// exp 3
execTransformHalf(extraPointers, 3, dx, xShapeInfo, dx, xShapeInfo, extraParams);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
tempPointers[8] = tempPointers[7];
tempPointers[9] = extraPointers[12];
tempPointers[10] = extraPointers[13];
tempPointers[11] = extraPointers[14];
//sum 1
execReduceHalf(tempPointers, 1, dx, xShapeInfo, extraParams, (Nd4jPointer) special,
(Nd4jPointer) maxShapeBuffer, (Nd4jPointer) maxDimension, 1);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
// divide 3
execBroadcastHalf(tempPointers, 3, dx, xShapeInfo, (Nd4jPointer) special,
(Nd4jPointer) maxShapeBuffer, dx, xShapeInfo, (Nd4jPointer) dimension, 1);
if (opNum == 40) {
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
execTransformHalf(tempPointers, 47, dx, xShapeInfo, dx, xShapeInfo, extraParams);
}
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
// log 3
if (opNum == 40)
execTransformHalf(extraPointers, 5, dx, xShapeInfo, dx, xShapeInfo, extraParams);
else if (opNum == 39)
execTransformHalf(extraPointers, 42, dx, xShapeInfo, dx, xShapeInfo, extraParams);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
delete hostMaxShapeBuffer;
break;
}
case 41: {
// IsMax along all dimensions
int *dimensionHostPointer = reinterpret_cast<int *> (extraPointers[16]);
bool scalarCheat = false;
if (extraParamsPointer == nullptr) {
scalarCheat = true;
} else {
/* //extraParamsPointer == nullptr || (shape::isVector(hostXShapeInfo))
if (shape::isVector(hostXShapeInfo) && dimensionHostPointer[0] == 1) {
scalarCheat = true;
}*/
}
if (scalarCheat) {
//printf("Going for scalar IsMax\n");
int maxIdx = (int) execIndexReduceScalarHalf(extraPointers, 0, dx, xShapeInfo, extraParams);
int targetIdx = 0;
if (shape::order(hostXShapeInfo) == 'c' || shape::order(hostXShapeInfo) == 'f' && maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1] >= shape::length(hostXShapeInfo))
targetIdx = maxIdx;
else
targetIdx = maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1];
fillIsMaxHalf<<< 1, 128, 1536, *stream >>>(resultPointer, shape::length(hostXShapeInfo), targetIdx);
} else {
// going for dimension-based IsMax
//printf("Going for dimension-based IsMax\n");
int *tadMaxShapeInfo = reinterpret_cast<int *> (extraPointers[10]);
int *tadMaxOffsets = reinterpret_cast<int *> (extraPointers[11]);
int *dimensionPointer = reinterpret_cast<int *> (extraPointers[15]);
// we call for IMax on specified dimension
execIndexReduceHalf(extraPointers, 0, dx, xShapeInfo, extraParams, (Nd4jPointer) special, (Nd4jPointer) hostYShapeInfo, (Nd4jPointer) dimensionPointer, 1);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
// at this point, all IMax indexes are gathered, and we execute
fillDimensionalIsMaxHalf<<<128, 64, funcAttributes[36].sharedSizeBytes, *stream>>>(special, hostYShapeInfo, resultPointer, resultShapeInfoPointer, tadMaxShapeInfo, dimensionPointer, 1, tadMaxOffsets );
checkCudaErrors(cudaStreamSynchronize(*stream));
}
break;
}
default: {
printf("Bad case for transformFloat\n");
break;
}
}
}
} else {
transformHalf<<<launchDims.x, launchDims.y, launchDims.z, *stream>>> (
opNum,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
extraParamsPointer,
resultPointer, resultShapeInfoPointer, shape::rank(hostZShapeInfo), allocPointer, reductionPointer);
}
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execTransformFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
Nd4jPointer xShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer xIndexes,
Nd4jPointer resultIndexes) {
float *xPointer = reinterpret_cast<float *>(dx);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
int *resultIndexesPointer = reinterpret_cast<int *>(resultIndexes);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
if (debug && verbose)
printf("F21 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[0], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[0]);
if (verbose && launchDims.x == 1)
printf("AF21 opNum:[%i]\n", opNum);
transformFloatIndexes<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
extraParamsPointer,
resultPointer,
resultIndexesPointer, allocPointer, reductionPointer);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::execTransformHalf(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
Nd4jPointer xShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer xIndexes,
Nd4jPointer resultIndexes) {
nd4j::float16 *xPointer = reinterpret_cast<nd4j::float16 *>(dx);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
nd4j::float16 *resultPointer = reinterpret_cast<nd4j::float16 *>(result);
nd4j::float16 *extraParamsPointer = reinterpret_cast<nd4j::float16 *>(extraParams);
int *resultIndexesPointer = reinterpret_cast<int *>(resultIndexes);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
if (debug && verbose)
printf("H21 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[0], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
nd4j::float16 *reductionPointer = reinterpret_cast<nd4j::float16 *>(extraPointers[4]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[0]);
if (verbose && launchDims.x == 1)
printf("AH21 opNum:[%i]\n", opNum);
transformHalfIndexes<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer, shape::rank(hostXShapeInfo),
extraParamsPointer,
resultPointer,
resultIndexesPointer, allocPointer, reductionPointer);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
}
template <typename T>
__device__ void flattenKernelGeneric(int dOffset,
char order,
T *result,
int *resultShapeInfo,
T *input,
int *inputShapeInfo, int *allocationPointer) {
__shared__ UnifiedSharedMemory *manager;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
manager = new(shmem) UnifiedSharedMemory((int *) shmem);
manager->init(sizeof(UnifiedSharedMemory), 4, 4, sizeof(shape::TAD), 2);
}
__syncthreads();
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int *zShape = shape::shapeOf(resultShapeInfo);
int *zStride = shape::stride(resultShapeInfo);
int *yShape = shape::shapeOf(inputShapeInfo);
int *yStride = shape::stride(inputShapeInfo);
char yOrder = shape::order(inputShapeInfo);
int len = shape::length(inputShapeInfo);
int resultEWS = shape::elementWiseStride(resultShapeInfo);
int inputEWS = shape::elementWiseStride(inputShapeInfo);
if (yOrder == order) {
if (resultEWS >= 1 && inputEWS >= 1) {
for (int i = tid; i < len; i+= gridDim.x * blockDim.x) {
result[i * resultEWS + dOffset] = input[i * inputEWS];
}
} else {
int rank = shape::rank(inputShapeInfo);
/*
long allocSize = sizeof(int) * rank;
int *coord = shape::cuMalloc(allocationPointer, allocSize, manager);
*/
int coord[MAX_RANK];
if(order == 'f') {
for(int i = tid; i < len; i+= gridDim.x * blockDim.x) {
shape::ind2sub(rank,yShape,i,coord);
int offset = shape::getOffset(0,yShape,yStride,coord,rank);
result[i + dOffset] = input[offset];
}
}
else {
for(int i = tid; i < len; i+= gridDim.x * blockDim.x) {
shape::ind2subC(rank,yShape,i,coord);
int offset = shape::getOffset(0,yShape,yStride,coord,rank);
result[i + dOffset] = input[offset];
}
}
/*
if (rank > MAX_COORD && tid * allocSize > PREALLOC_SIZE - allocSize) {
free(coord);
}
*/
}
} else {
int rank = shape::rank(inputShapeInfo);
/*
long allocSize = sizeof(int) * rank;
int *coord = shape::cuMalloc(allocationPointer, allocSize, manager);
*/
int coord[MAX_RANK];
if(order == 'f') {
for(int i = tid; i < len; i+= gridDim.x * blockDim.x) {
shape::ind2sub(rank,yShape,i,coord);
int offset = shape::getOffset(0,yShape,yStride,coord,rank);
result[i+dOffset] = input[offset];
}
}
else {
for(int i = tid; i < len; i+= gridDim.x * blockDim.x) {
shape::ind2subC(rank,yShape,i,coord);
int offset = shape::getOffset(0,yShape,yStride,coord,rank);
result[i+dOffset] = input[offset];
}
}
/*
if (rank > MAX_COORD && tid * allocSize > PREALLOC_SIZE - allocSize) {
free(coord);
}*/
}
}
extern "C" __global__ void flattenKernelDouble(int offset,
char order,
double *result,
int *resultShapeInfo,
double *input,
int *inputShapeInfo, int *allocationPointer) {
flattenKernelGeneric<double>(
offset,
order, result,
resultShapeInfo,
input,
inputShapeInfo,
allocationPointer);
}
extern "C" __global__ void flattenKernelFloat(int offset,
char order,
float *result,
int *resultShapeInfo,
float *input,
int *inputShapeInfo, int *allocationPointer) {
flattenKernelGeneric<float>(
offset,
order,
result,
resultShapeInfo,
input,
inputShapeInfo,
allocationPointer);
}
extern "C" __global__ void flattenKernelHalf(int offset,
char order,
nd4j::float16 *result,
int *resultShapeInfo,
nd4j::float16 *input,
int *inputShapeInfo, int *allocationPointer) {
flattenKernelGeneric<nd4j::float16>(
offset,
order,
result,
resultShapeInfo,
input,
inputShapeInfo,
allocationPointer);
}
/**
* Append an input array
* to the end of a flat array
* in a particular order
* @param offset the offset of the array to start at
* @param order the order
* @param result the result array
* @param resultShapeInfo the shape info for te array
* @param input the input for the array
* @param inputShapeInfo the shape information for that array
*/
void NativeOps::flattenFloat(
Nd4jPointer *extraPointers,
int offset,
char order,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer input,
Nd4jPointer inputShapeInfo) {
float *xPointer = reinterpret_cast<float *>(result);
int *xShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
float *yPointer = reinterpret_cast<float *>(input);
int *yShapeInfoPointer = reinterpret_cast<int *>(inputShapeInfo);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[5], deviceProperties[getDeviceId(extraPointers[2])]);
if (debug && verbose)
printf("F22 opNum:[7]\n");
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostYShapeInfo), 2, funcAttributes[30]);
if (verbose && launchDims.x == 1)
printf("AF222 opNum:[7]\n");
flattenKernelFloat<<<launchDims.x,launchDims.y, launchDims.z, *stream>>>(offset, order, xPointer, xShapeInfoPointer, yPointer, yShapeInfoPointer, allocPointer);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::flattenHalf(
Nd4jPointer *extraPointers,
int offset,
char order,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer input,
Nd4jPointer inputShapeInfo) {
nd4j::float16 *xPointer = reinterpret_cast<nd4j::float16 *>(result);
int *xShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
nd4j::float16 *yPointer = reinterpret_cast<nd4j::float16 *>(input);
int *yShapeInfoPointer = reinterpret_cast<int *>(inputShapeInfo);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[5], deviceProperties[getDeviceId(extraPointers[2])]);
if (debug && verbose)
printf("H22 opNum:[7]\n");
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostYShapeInfo), 2, funcAttributes[30]);
if (verbose && launchDims.x == 1)
printf("AH222 opNum:[7]\n");
flattenKernelHalf<<<launchDims.x,launchDims.y, launchDims.z, *stream>>>(offset, order, xPointer, xShapeInfoPointer, yPointer, yShapeInfoPointer, allocPointer);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
* Append an input array
* to the end of a flat array
* in a particular order
* @param offset the offset of the array to start at
* @param order the order
* @param result the result array
* @param resultShapeInfo the shape info for te array
* @param input the input for the array
* @param inputShapeInfo the shape information for that array
*/
void NativeOps::flattenDouble(
Nd4jPointer *extraPointers,
int offset,
char order,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer input,
Nd4jPointer inputShapeInfo) {
double *xPointer = reinterpret_cast<double *>(result);
int *xShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
double *yPointer = reinterpret_cast<double *>(input);
int *yShapeInfoPointer = reinterpret_cast<int *>(inputShapeInfo);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
if (debug && verbose)
printf("D30 opNum:[7]\n");
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[5], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostYShapeInfo), 2, funcAttributes[34]);
flattenKernelDouble<<<launchDims.x,launchDims.y, launchDims.z, *stream>>>(offset, order, xPointer, xShapeInfoPointer, yPointer, yShapeInfoPointer, allocPointer);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::enableP2P(bool enable) {
if (enable == allowedP2P)
return;
int curDevice = 0;
cudaGetDevice(&curDevice);
int devCnt = 0;
cudaGetDeviceCount(&devCnt);
if (devCnt > 1) {
for (int x = 0; x < devCnt; x++) {
for (int y = 0; y < devCnt; y++) {
if (x == y)
continue;
int canAccess = 0;
cudaSetDevice(x);
cudaDeviceCanAccessPeer(&canAccess, x , y);
if (canAccess) {
if (enable) {
cudaDeviceEnablePeerAccess(y, 0);
} else {
cudaDeviceDisablePeerAccess(y);
}
} else
printf("Peer access [%i] -> [%i] isn't possible\n", x, y);
}
}
cudaSetDevice(0);
}
allowedP2P = enable;
cudaSetDevice(curDevice);
}
void NativeOps::initializeDevicesAndFunctions() {
int devCnt = 0;
cudaGetDeviceCount(&devCnt);
deviceProperties = new cudaDeviceProp[devCnt];
for (int i = 0; i < devCnt; i++) {
cudaSetDevice(i);
cudaGetDeviceProperties(&deviceProperties[i], i);
cudaDeviceSetLimit(cudaLimitStackSize, 4096);
}
cudaSetDevice(0);
enableP2P(allowedP2P);
cudaFuncGetAttributes(&funcAttributes[0], (void *)transformFloatIndexes);
void (*transformFloatPointer1)(int opNum, float *dy,int *shapeInfo, int xRank, float *params, float *result,int *resultShapeInfo, int zRank, int *allocationPointer, float *reductionPointer) = transformFloat;
cudaFuncGetAttributes(&funcAttributes[1], transformFloatPointer1);
void (*transformFloatPointer2)(int opNum, Nd4jIndex n, float *dy, int incy, float *params, float *result,int resultStride, int *allocationPointer, float *reductionPointer) = transformFloat;
cudaFuncGetAttributes(&funcAttributes[2], transformFloatPointer2);
cudaFuncGetAttributes(&funcAttributes[3], (void *)summaryStatsReduceFloat);
cudaFuncGetAttributes(&funcAttributes[4], (void *)scalarFloatIndexes);
void (*scalarFloatPointer1)(int opNum, float dx,float *dy, int *shapeInfo, int xRank, float *params, float *result,int *resultShapeInfo, int zRank, int *allocPointer) = scalarFloat;
cudaFuncGetAttributes(&funcAttributes[5], scalarFloatPointer1);
void (*scalarFloatPointer2)(int opNum, Nd4jIndex n,float dx, float *dy, int incy, float *params, float *result,int resultStride, int *allocPointer) = scalarFloat;
cudaFuncGetAttributes(&funcAttributes[6], scalarFloatPointer2);
cudaFuncGetAttributes(&funcAttributes[7], reduce3Float);
cudaFuncGetAttributes(&funcAttributes[8], reduceFloat);
// printf("reduceFloat regs: [%i], static shmem: [%i]\n", funcAttributes[8].numRegs, funcAttributes[8].sharedSizeBytes);
cudaFuncGetAttributes(&funcAttributes[28], reduceFloat1D);
// printf("reduceFloat1D regs: [%i], static shmem: [%i]\n", funcAttributes[28].numRegs, funcAttributes[28].sharedSizeBytes);
cudaFuncGetAttributes(&funcAttributes[29], reduceFloat6D);
// printf("reduceFloat6D regs: [%i], static shmem: [%i]\n", funcAttributes[29].numRegs, funcAttributes[29].sharedSizeBytes);
cudaFuncGetAttributes(&funcAttributes[30], flattenKernelFloat);
cudaFuncGetAttributes(&funcAttributes[31], concatKernelFloat);
cudaFuncGetAttributes(&funcAttributes[9], pairWiseTransformFloat);
cudaFuncGetAttributes(&funcAttributes[10], pairWiseTransformFloatIndex);
cudaFuncGetAttributes(&funcAttributes[11], pairWiseTransformStridedFloat);
cudaFuncGetAttributes(&funcAttributes[12], broadcastFloat);
cudaFuncGetAttributes(&funcAttributes[13], indexReduceFloat);
///////////////////////////////////////// Doubles are separate, just in case of...
cudaFuncGetAttributes(&funcAttributes[14], transformDoubleIndexes);
void (*transformDoublePointer1)(int opNum, double *dy, int *shapeInfo, int xRank, double *params, double *result,int *resultShapeInfo, int zRank, int *allocationPointer, double *reductionPointer) = transformDouble;
cudaFuncGetAttributes(&funcAttributes[15], transformDoublePointer1);
void (*transformDoublePointer2)(int opNum, Nd4jIndex n, double *dy, int incy, double *params, double *result,int resultStride, int *allocationPointer, double *reductionPointer) = transformDouble;
cudaFuncGetAttributes(&funcAttributes[16], transformDoublePointer2);
cudaFuncGetAttributes(&funcAttributes[17], summaryStatsReduceDouble);
cudaFuncGetAttributes(&funcAttributes[18], scalarDoubleIndexes);
void (*scalarDoublePointer1)(int opNum, double dx,double *dy, int *shapeInfo, int xRank, double *params, double *result,int *resultShapeInfo, int zRank, int *allocPointer) = scalarDouble;
cudaFuncGetAttributes(&funcAttributes[19], scalarDoublePointer1);
void (*scalarDoublePointer2)(int opNum, Nd4jIndex n,double dx, double *dy, int incy, double *params, double *result,int resultStride, int *allocPointer) = scalarDouble;
cudaFuncGetAttributes(&funcAttributes[20], scalarDoublePointer2);
cudaFuncGetAttributes(&funcAttributes[21], reduce3Double);
cudaFuncGetAttributes(&funcAttributes[22], reduceDouble);
cudaFuncGetAttributes(&funcAttributes[23], pairWiseTransformDouble);
cudaFuncGetAttributes(&funcAttributes[24], pairWiseTransformDoubleIndex);
cudaFuncGetAttributes(&funcAttributes[25], pairWiseTransformStridedDouble);
cudaFuncGetAttributes(&funcAttributes[26], broadcastDouble);
cudaFuncGetAttributes(&funcAttributes[27], indexReduceDouble);
cudaFuncGetAttributes(&funcAttributes[32], reduceDouble1D);
cudaFuncGetAttributes(&funcAttributes[33], reduceDouble6D);
cudaFuncGetAttributes(&funcAttributes[34], flattenKernelDouble);
cudaFuncGetAttributes(&funcAttributes[35], concatKernelDouble);
cudaFuncGetAttributes(&funcAttributes[36], fillDimensionalIsMaxFloat);
cudaFuncGetAttributes(&funcAttributes[37], fillDimensionalIsMaxDouble);
cudaFuncGetAttributes(&funcAttributes[38], concatKernelScalarFloat);
cudaFuncGetAttributes(&funcAttributes[39], concatKernelScalarDouble);
cudaFuncGetAttributes(&funcAttributes[40], concatKernelVStackFloat);
cudaFuncGetAttributes(&funcAttributes[41], concatKernelVStackDouble);
cudaFuncGetAttributes(&funcAttributes[42], concatKernelHStackFloat);
cudaFuncGetAttributes(&funcAttributes[43], concatKernelHStackDouble);
/////////////////////////
cudaFuncGetAttributes(&funcAttributes[44], averagingKernelHalf);
cudaFuncGetAttributes(&funcAttributes[45], averagingKernelFloat);
cudaFuncGetAttributes(&funcAttributes[46], averagingKernelDouble);
}
/**
* This method acquires memory chunk of requested size on host side
*
* @param pointer pointer that'll be used for allocation
* @param memorySize memory size, in bytes
* @param flags optional parameter
*/
Nd4jPointer NativeOps::mallocHost(Nd4jIndex memorySize, int flags) {
Nd4jPointer pointer;
// cudaHostAllocMapped |cudaHostAllocPortable
cudaError_t res = cudaHostAlloc((void **)&pointer, memorySize, cudaHostAllocDefault);
if (res != 0)
pointer = 0L;
return pointer;
}
/**
* This method acquires memory chunk of requested size on specified device
*
* @param pointer pointer that'll be used for allocation
* @param memorySize memory size, in bytes
* @param ptrToDeviceId pointer to deviceId. For cuda that's just and int, for OpenCL that's pointer to device_id, etc
* @param flags optional parameter
*/
Nd4jPointer NativeOps::mallocDevice(Nd4jIndex memorySize, Nd4jPointer ptrToDeviceId, int flags) {
Nd4jPointer pointer;
cudaError_t res = cudaMalloc((void **)&pointer, memorySize);
if (res != 0)
pointer = 0L;
return pointer;
}
/**
* This method releases previously allocated host memory space
*
* @param pointer pointer that'll be freed
*/
int NativeOps::freeHost(Nd4jPointer pointer) {
cudaError_t res = cudaFreeHost((void *) pointer);
if (res != 0)
pointer = 0L;
return 1L;
}
/**
* This method releases previously allocated memory space on device
*
* @param pointer pointer that'll be freed
* @param ptrToDeviceId pointer to deviceId.
*/
int NativeOps::freeDevice(Nd4jPointer pointer, Nd4jPointer ptrToDeviceId) {
cudaError_t res = cudaFree((void *)pointer);
if (res != 0)
pointer = 0L;
return 1L;
}
Nd4jPointer NativeOps::createContext() {
return 0L;
}
Nd4jPointer NativeOps::createStream() {
Nd4jPointer nativeStream = 0;
cudaError_t result = cudaStreamCreate((cudaStream_t *) &nativeStream);
checkCudaErrors(result);
if (result != 0)
return 0L;
else return nativeStream;
}
Nd4jPointer NativeOps::createEvent() {
Nd4jPointer nativeEvent= 0;
cudaError_t result = cudaEventCreateWithFlags((cudaEvent_t *) &nativeEvent, cudaEventDisableTiming);
checkCudaErrors(result);
if (result != 0)
return 0L;
else return nativeEvent;
}
Nd4jPointer NativeOps::createBlasHandle() {
Nd4jPointer nativeHandle= 0;
cublasStatus_t result = cublasCreate((cublasHandle_t *) &nativeHandle);
if (result != 0)
return 0L;
else return nativeHandle;
}
int NativeOps::registerEvent(Nd4jPointer event, Nd4jPointer stream) {
cudaEvent_t *pEvent = reinterpret_cast<cudaEvent_t *>(&event);
cudaStream_t *pStream = reinterpret_cast<cudaStream_t *>(&stream);
cudaError_t result = cudaEventRecord(*pEvent, *pStream);
checkCudaErrors(result);
if (result != 0)
return 0L;
else return 1;
}
int NativeOps::setBlasStream(Nd4jPointer handle, Nd4jPointer stream) {
cublasHandle_t *pHandle = reinterpret_cast<cublasHandle_t *>(&handle);
cudaStream_t *pStream = reinterpret_cast<cudaStream_t *>(&stream);
cublasStatus_t result = cublasSetStream(*pHandle, *pStream);
if (result != 0)
return 0L;
else return 1L;
}
int NativeOps::setDevice(Nd4jPointer ptrToDeviceId) {
int deviceId = getDeviceId(ptrToDeviceId);
cudaError_t result = cudaSetDevice(deviceId);
checkCudaErrors(result);
if (result != 0)
return 0L;
else return 1;
}
Nd4jIndex NativeOps::getDeviceFreeMemory(Nd4jPointer ptrToDeviceId) {
int device = getDeviceId(ptrToDeviceId);
if (device >= 0) {
setDevice(ptrToDeviceId);
}
size_t memFree = 0;
size_t memTotal = 0;
cudaMemGetInfo(&memFree, &memTotal);
return (Nd4jIndex) memFree;
}
Nd4jIndex NativeOps::getDeviceTotalMemory(Nd4jPointer ptrToDeviceId) {
int device = getDeviceId(ptrToDeviceId);
if (device >= 0) {
setDevice(ptrToDeviceId);
}
size_t memFree = 0;
size_t memTotal = 0;
cudaMemGetInfo(&memFree, &memTotal);
return (Nd4jIndex) memTotal;
}
int NativeOps::memcpy(Nd4jPointer dst, Nd4jPointer src, Nd4jIndex size, int flags, Nd4jPointer reserved) {
return memcpyAsync(dst, src, size, flags, reserved);
}
int NativeOps::memcpyAsync(Nd4jPointer dst, Nd4jPointer src, Nd4jIndex size, int flags, Nd4jPointer reserved) {
cudaStream_t *pStream = reinterpret_cast<cudaStream_t *>(&reserved);
cudaMemcpyKind kind;
if (debug)
checkCudaErrors(cudaStreamSynchronize(*pStream));
switch (flags) {
case 0: {
kind = cudaMemcpyHostToHost;
}
break;
case 1: {
kind = cudaMemcpyHostToDevice;
}
break;
case 2: {
kind = cudaMemcpyDeviceToHost;
}
case 3: {
kind = cudaMemcpyDeviceToDevice;
}
break;
default: {
printf("UNDEFINED MEMCPY!\n");
break;
}
}
cudaError_t result = cudaMemcpyAsync((void *) dst, (const void *) src, (size_t) size, kind, *pStream);
checkCudaErrors(result);
if (result != 0) {
printf("Failed on [%lu] -> [%lu], size: [%i], direction: [%i], result: [%i]\n", src, dst, size, flags, (int) result );
return 0L;
}
else return 1;
}
int NativeOps::memset(Nd4jPointer dst, int value, Nd4jIndex size, int flags, Nd4jPointer reserved) {
//cudaStream_t *pStream = reinterpret_cast<cudaStream_t *>(&reserved);
cudaError_t result = cudaMemset((void *) dst, value, (size_t) size);
checkCudaErrors(result);
if (result != 0)
return 0L;
else return 1;
}
int NativeOps::memsetAsync(Nd4jPointer dst, int value, Nd4jIndex size, int flags, Nd4jPointer reserved) {
cudaStream_t *pStream = reinterpret_cast<cudaStream_t *>(&reserved);
cudaError_t result = cudaMemsetAsync((void *) dst, value, (size_t) size, *pStream);
checkCudaErrors(result);
if (result != 0)
return 0L;
else return 1;
}
int NativeOps::destroyEvent(Nd4jPointer event) {
cudaEvent_t *pEvent = reinterpret_cast<cudaEvent_t *>(&event);
cudaError_t result = cudaEventDestroy(*pEvent);
checkCudaErrors(result);
if (result != 0)
return 0L;
else return 1;
}
int NativeOps::streamSynchronize(Nd4jPointer stream) {
cudaStream_t *pStream = reinterpret_cast<cudaStream_t *>(&stream);
cudaError_t result = cudaStreamSynchronize(*pStream);
checkCudaErrors(result);
if (result != 0)
return 0L;
else return 1L;
}
int NativeOps::eventSynchronize(Nd4jPointer event) {
cudaEvent_t *pEvent = reinterpret_cast<cudaEvent_t *>(&event);
cudaError_t result = cudaEventSynchronize(*pEvent);
checkCudaErrors(result);
if (result != 0)
return 0L;
else return 1L;
}
int NativeOps::getAvailableDevices() {
int devCnt = 0;
cudaGetDeviceCount(&devCnt);
return devCnt;
}
void NativeOps::enableDebugMode(bool reallyEnable) {
debug = reallyEnable;
}
void NativeOps::setGridLimit(int gridSize) {
if (gridSize > 1024)
gridSize = 1024;
if (gridSize < 1)
gridSize = 1;
blockLimit = gridSize;
}
int NativeOps::ompGetMaxThreads() {
return maxThreads;
}
int NativeOps::ompGetNumThreads() {
return maxThreads;
}
void NativeOps::setOmpNumThreads(int threads) {
if (threads > 1024)
threads = 1024;
if (threads < 32)
threads = 32;
maxThreads = threads;
}
void NativeOps::enableVerboseMode(bool reallyEnable) {
verbose = reallyEnable;
}
/**
* Concatneate multi array of the same shape together
* along a particular dimension
*/
void NativeOps::concatFloat(
Nd4jPointer *extraPointers,
int dimension,
int numArrays,
Nd4jPointer *data,
Nd4jPointer *inputShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer *tadPointers,
Nd4jPointer *offsetPointers) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
float *resultData = reinterpret_cast<float *>(result);
int *resultShape = reinterpret_cast<int *>(resultShapeInfo);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int **hostShapePointers = reinterpret_cast<int **>(extraPointers[9]);
// numArrays will be used as number of TADs, so each block process 1 input
int smem = 0;
bool isVstack = false;
bool isScalar = true;
bool isHstack = false;
for (int i = 0; i < numArrays; i++) {
if (!shape::isScalar(hostShapePointers[i])) {
isScalar = false;
break;
}
}
if (!isScalar && dimension == 0 && shape::rank(hostXShapeInfo) == 2 && shape::order(hostXShapeInfo) == 'c' ) {
isVstack = true;
for (int i = 0; i < numArrays; i++) {
if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0 || shape::order(hostShapePointers[i]) != 'c') {
isVstack = false;
break;
}
}
}
if (!isScalar && !isVstack && dimension == 1 && shape::isVector(hostXShapeInfo)) {
isHstack = true;
for (int i = 0; i < numArrays; i++) {
if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0) {
isHstack = false;
break;
}
}
}
if (isScalar) {
if (debug && verbose)
printf("Going scalar concat\n");
smem = funcAttributes[38].sharedSizeBytes;
concatKernelScalarFloat<<< 128, 128, smem, *stream>>> (dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], resultData, resultShape, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]);
} else if (isVstack) {
if (debug && verbose)
printf("Going VStack concat\n");
smem = funcAttributes[40].sharedSizeBytes;
concatKernelVStackFloat<<< 128, 128, smem, *stream>>> (dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], resultData, resultShape, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]);
} else if (isHstack) {
if (debug && verbose)
printf("Going HStack concat\n");
smem = funcAttributes[42].sharedSizeBytes;
concatKernelHStackFloat<<< 128, 128, smem, *stream>>> (dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], resultData, resultShape, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]);
} else {
if (debug && verbose)
printf("Going generic concat\n");
smem = nd4j::math::nd4j_max<int>(funcAttributes[31].sharedSizeBytes + 768, 1280);
concatKernelFloat<<< 128, 128, smem, *stream>>> (dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], resultData, resultShape, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]);
}
if (debug && verbose)
printf("sharedMemory requested for concatFloat: [%i], registers: [%i]\n", smem, funcAttributes[31].numRegs);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::concatHalf(
Nd4jPointer *extraPointers,
int dimension,
int numArrays,
Nd4jPointer *data,
Nd4jPointer *inputShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer *tadPointers,
Nd4jPointer *offsetPointers) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
nd4j::float16 *resultData = reinterpret_cast<nd4j::float16 *>(result);
int *resultShape = reinterpret_cast<int *>(resultShapeInfo);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int **hostShapePointers = reinterpret_cast<int **>(extraPointers[9]);
// numArrays will be used as number of TADs, so each block process 1 input
int smem = 0;
bool isVstack = false;
bool isScalar = true;
bool isHstack = false;
for (int i = 0; i < numArrays; i++) {
if (!shape::isScalar(hostShapePointers[i])) {
isScalar = false;
break;
}
}
if (!isScalar && dimension == 0 && shape::rank(hostXShapeInfo) == 2 && shape::order(hostXShapeInfo) == 'c' ) {
isVstack = true;
for (int i = 0; i < numArrays; i++) {
if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0 || shape::order(hostShapePointers[i]) != 'c') {
isVstack = false;
break;
}
}
}
if (!isScalar && !isVstack && dimension == 1 && shape::isVector(hostXShapeInfo)) {
isHstack = true;
for (int i = 0; i < numArrays; i++) {
if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0) {
isHstack = false;
break;
}
}
}
if (isScalar) {
if (debug && verbose)
printf("Going scalar concat\n");
smem = funcAttributes[38].sharedSizeBytes;
concatKernelScalarHalf<<< 128, 128, smem, *stream>>> (dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], resultData, resultShape, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]);
} else if (isVstack) {
if (debug && verbose)
printf("Going VStack concat\n");
smem = funcAttributes[40].sharedSizeBytes;
concatKernelVStackHalf<<< 128, 128, smem, *stream>>> (dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], resultData, resultShape, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]);
} else if (isHstack) {
if (debug && verbose)
printf("Going HStack concat\n");
smem = funcAttributes[42].sharedSizeBytes;
concatKernelHStackHalf<<< 128, 128, smem, *stream>>> (dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], resultData, resultShape, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]);
} else {
if (debug && verbose)
printf("Going generic concat\n");
smem = nd4j::math::nd4j_max<int>(funcAttributes[31].sharedSizeBytes + 768, 1280);
concatKernelHalf<<< 128, 128, smem, *stream>>> (dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], resultData, resultShape, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]);
}
if (debug && verbose)
printf("sharedMemory requested for concatHalf: [%i], registers: [%i]\n", smem, funcAttributes[31].numRegs);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
* Concatneate multi array of the same shape together
* along a particular dimension
*/
void NativeOps::concatDouble(
Nd4jPointer *extraPointers,
int dimension,
int numArrays,
Nd4jPointer *data,
Nd4jPointer *inputShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer *tadPointers,
Nd4jPointer *offsetPointers) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
double *resultData = reinterpret_cast<double *>(result);
int *resultShape = reinterpret_cast<int *>(resultShapeInfo);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int **hostShapePointers = reinterpret_cast<int **>(extraPointers[9]);
// numArrays will be used as number of TADs, so each block process 1 input
int smem = 0;
bool isVstack = false;
bool isScalar = true;
bool isHstack = false;
for (int i = 0; i < numArrays; i++) {
if (!shape::isScalar(hostShapePointers[i])) {
isScalar = false;
break;
}
}
if (!isScalar && dimension == 0 && shape::rank(hostXShapeInfo) == 2 && shape::order(hostXShapeInfo) == 'c' ) {
isVstack = true;
for (int i = 0; i < numArrays; i++) {
if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0 || shape::order(hostShapePointers[i]) != 'c') {
isVstack = false;
break;
}
}
}
if (!isScalar && !isVstack && dimension == 1 && shape::isVector(hostXShapeInfo)) {
isHstack = true;
for (int i = 0; i < numArrays; i++) {
if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0) {
isHstack = false;
break;
}
}
}
if (isScalar) {
if (debug && verbose)
printf("Going scalar concat\n");
smem = funcAttributes[39].sharedSizeBytes;
concatKernelScalarDouble<<< 128, 128, smem, *stream>>> (dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], resultData, resultShape, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]);
} else if (isVstack) {
if (debug && verbose)
printf("Going VStack concat\n");
smem = funcAttributes[41].sharedSizeBytes;
concatKernelVStackDouble<<< 128, 128, smem, *stream>>> (dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], resultData, resultShape, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]);
} else if (isHstack) {
if (debug && verbose)
printf("Going HStack concat\n");
smem = funcAttributes[43].sharedSizeBytes;
concatKernelHStackDouble<<< 128, 128, smem, *stream>>> (dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], resultData, resultShape, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]);
} else {
if (debug && verbose)
printf("Going generic concat\n");
smem = nd4j::math::nd4j_max<int>(funcAttributes[35].sharedSizeBytes + 768, 1280);
concatKernelDouble<<< 128, 128, smem, *stream>>> (dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], resultData, resultShape, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]);
}
if (debug && verbose)
printf("sharedMemory requested for concatFloat: [%i], registers: [%i]\n", smem, funcAttributes[31].numRegs);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
* This method saves
*/
void NativeOps::tadOnlyShapeInfo(Nd4jPointer xShapeInfo, Nd4jPointer dimension, int dimensionLength, Nd4jPointer targetBuffer, Nd4jPointer offsetsBuffer) {
int *hostXShapeInfo = reinterpret_cast<int *>(xShapeInfo);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
int *target = reinterpret_cast<int *>(targetBuffer);
int *offsets = reinterpret_cast<int *>(offsetsBuffer);
shape::TAD *tad = new shape::TAD();
tad->init(hostXShapeInfo, dimensionPointer, dimensionLength);
//tad->setOutputBuffer(target);
tad->createTadOnlyShapeInfo();
tad->createOffsets();
std::memcpy((void *) target, tad->tadOnlyShapeInfo, (tad->tadOnlyShapeInfo[0] * 2 + 4) * sizeof(int));
std::memcpy((void *) offsets, tad->tadOffsets, tad->numTads * sizeof(int));
/*
shape::printShapeInfoLinear(hostXShapeInfo);
shape::printShapeInfoLinear(tad->tadOnlyShapeInfo);
shape::printShapeInfoLinear(target);
*/
delete tad;
}
int NativeOps::memcpyConstantAsync(Nd4jIndex dst, Nd4jPointer src, Nd4jIndex size, int flags, Nd4jPointer reserved) {
cudaStream_t *pStream = reinterpret_cast<cudaStream_t *>(&reserved);
cudaMemcpyKind kind;
if (debug)
checkCudaErrors(cudaStreamSynchronize(*pStream));
switch (flags) {
case 0: {
kind = cudaMemcpyHostToHost;
}
break;
case 1: {
kind = cudaMemcpyHostToDevice;
}
break;
case 2: {
kind = cudaMemcpyDeviceToHost;
}
case 3: {
kind = cudaMemcpyDeviceToDevice;
}
break;
}
//cudaError_t result = cudaMemcpyAsync((void *) dst, (const void *) src, (size_t) size, kind, *pStream);
cudaError_t result = cudaMemcpyToSymbolAsync(deviceConstantMemory, (const void *) src, size, dst, kind, *pStream);
checkCudaErrors(result);
if (result != 0) {
printf("Symbol failed on [%lu] -> [%lu], size: [%i], direction: [%i]\n", src, dst, size, flags );
return 0L;
}
else return 1;
}
Nd4jPointer NativeOps::getConstantSpace() {
Nd4jPointer dConstAddr;
cudaError_t result = cudaGetSymbolAddress((void **)&dConstAddr, deviceConstantMemory);
return dConstAddr;
}
void NativeOps::pullRowsHalf(Nd4jPointer *extraPointers, Nd4jPointer x, Nd4jPointer xShapeInfo, Nd4jPointer z, Nd4jPointer zShapeInfo, int n, Nd4jPointer indexes, Nd4jPointer tadShapeInfo, Nd4jPointer tadOffsets) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
nd4j::float16 *xBuffer = reinterpret_cast<nd4j::float16 *>(x);
nd4j::float16 *zBuffer = reinterpret_cast<nd4j::float16 *>(z);
int *zShape = reinterpret_cast<int *>(zShapeInfo);
int *xShape = reinterpret_cast<int *>(xShapeInfo);
int *index = reinterpret_cast<int *>(indexes);
int *tadOnlyShapeInfo = reinterpret_cast<int *>(tadShapeInfo);
int *tadOffset = reinterpret_cast<int *>(tadOffsets);
pullRowsKernelHalf<<<32, 32, 1024, *stream>>>(xBuffer, xShape, zBuffer, zShape, n, index, tadOnlyShapeInfo, tadOffset);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::pullRowsFloat(Nd4jPointer *extraPointers, Nd4jPointer x, Nd4jPointer xShapeInfo, Nd4jPointer z, Nd4jPointer zShapeInfo, int n, Nd4jPointer indexes, Nd4jPointer tadShapeInfo, Nd4jPointer tadOffsets) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
float *xBuffer = reinterpret_cast<float *>(x);
float *zBuffer = reinterpret_cast<float *>(z);
int *zShape = reinterpret_cast<int *>(zShapeInfo);
int *xShape = reinterpret_cast<int *>(xShapeInfo);
int *index = reinterpret_cast<int *>(indexes);
int *tadOnlyShapeInfo = reinterpret_cast<int *>(tadShapeInfo);
int *tadOffset = reinterpret_cast<int *>(tadOffsets);
pullRowsKernelFloat<<<32, 32, 1024, *stream>>>(xBuffer, xShape, zBuffer, zShape, n, index, tadOnlyShapeInfo, tadOffset);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::pullRowsDouble(Nd4jPointer *extraPointers, Nd4jPointer x, Nd4jPointer xShapeInfo, Nd4jPointer z, Nd4jPointer zShapeInfo, int n, Nd4jPointer indexes, Nd4jPointer tadShapeInfo, Nd4jPointer tadOffsets) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
double *xBuffer = reinterpret_cast<double *>(x);
double *zBuffer = reinterpret_cast<double *>(z);
int *zShape = reinterpret_cast<int *>(zShapeInfo);
int *xShape = reinterpret_cast<int *>(xShapeInfo);
int *index = reinterpret_cast<int *>(indexes);
int *tadOnlyShapeInfo = reinterpret_cast<int *>(tadShapeInfo);
int *tadOffset = reinterpret_cast<int *>(tadOffsets);
pullRowsKernelDouble<<<32, 32, 1024, *stream>>>(xBuffer, xShape, zBuffer, zShape, n, index, tadOnlyShapeInfo, tadOffset);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::convertHalfsToFloats(Nd4jPointer *extraPointers, Nd4jPointer dx, int n, Nd4jPointer dz) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
half *x = reinterpret_cast<half *>(dx);
float *z = reinterpret_cast<float *>(dz);
kernelHalfsToFloats<<<32, 32, 1024, *stream>>>(x, n, z);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::convertHalfsToDoubles(Nd4jPointer *extraPointers, Nd4jPointer dx, int n, Nd4jPointer dz) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
half *x = reinterpret_cast<half *>(dx);
double *z = reinterpret_cast<double *>(dz);
kernelHalfsToDoubles<<<32, 32, 1024, *stream>>>(x, n, z);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::convertDoublesToHalfs(Nd4jPointer *extraPointers, Nd4jPointer dx, int n, Nd4jPointer dz) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
double *x = reinterpret_cast<double *>(dx);
half *z = reinterpret_cast<half *>(dz);
kernelDoublesToHalfs<<<32, 32, 1024, *stream>>>(x, n, z);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::convertFloatsToHalfs(Nd4jPointer *extraPointers, Nd4jPointer dx, int n, Nd4jPointer dz) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
float *x = reinterpret_cast<float *>(dx);
half *z = reinterpret_cast<half *>(dz);
kernelFloatsToHalfs<<<32, 32, 1024, *stream>>>(x, n, z);
if (debug)
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::averageHalf(Nd4jPointer *extras, Nd4jPointer dx, Nd4jPointer dz, int n, Nd4jIndex length, bool propagate) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
nd4j::float16 **x = reinterpret_cast<nd4j::float16 **>(dx);
nd4j::float16 *z = reinterpret_cast<nd4j::float16 *>(dz);
if (debug && verbose)
printf("averageHalf called\n");
dim3 launchDims = getBasicLaunchParams(getDeviceId(extras[2]), length, sizeof(nd4j::float16), funcAttributes[44]);
averagingKernelHalf<<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(x, z, n, length, propagate);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::averageFloat(Nd4jPointer *extras, Nd4jPointer dx, Nd4jPointer dz, int n, Nd4jIndex length, bool propagate) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
float **x = reinterpret_cast<float **>(dx);
float *z = reinterpret_cast<float *>(dz);
if (debug && verbose)
printf("averageFloat called\n");
dim3 launchDims = getBasicLaunchParams(getDeviceId(extras[2]), length, sizeof(float), funcAttributes[45]);
averagingKernelFloat<<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(x, z, n, length, propagate);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::averageDouble(Nd4jPointer *extras, Nd4jPointer dx, Nd4jPointer dz, int n, Nd4jIndex length, bool propagate) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
double **x = reinterpret_cast<double **>(dx);
double *z = reinterpret_cast<double *>(dz);
if (debug && verbose)
printf("averageDouble called\n");
dim3 launchDims = getBasicLaunchParams(getDeviceId(extras[2]), length, sizeof(double), funcAttributes[46]);
averagingKernelDouble<<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(x, z, n, length, propagate);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
|
f33bbf0b681a31a5e18ae760dbc9bca040ef6176.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "compute/div/div_internal.h"
namespace magmadnn {
namespace internal {
template <typename T>
__global__ void tensor_div_tensor_full_device(T *a, T *b, T *out, unsigned int size) {
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int stride = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < size; i += stride) {
if (b[i] == (T) 0) continue;
out[i] = a[i] / b[i];
}
}
template <typename T>
void tensor_div_tensor_full_device(Tensor<T> *a, Tensor<T> *b, Tensor<T> *out) {
unsigned int size = out->get_size();
hipLaunchKernelGGL(( tensor_div_tensor_full_device) , dim3(1), dim3(size) , 0, 0, a->get_ptr(), b->get_ptr(), out->get_ptr(), size);
}
template void tensor_div_tensor_full_device(Tensor<int> *a, Tensor<int> *b, Tensor<int> *out);
template void tensor_div_tensor_full_device(Tensor<float> *a, Tensor<float> *b, Tensor<float> *out);
template void tensor_div_tensor_full_device(Tensor<double> *a, Tensor<double> *b, Tensor<double> *out);
template <typename T>
__global__ void kernel_tensor_div_scalar_full_device(T *a, T scalar, T *out, unsigned int size) {
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int stride = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < size; i += stride) {
out[i] = a[i] / scalar;
}
}
template <typename T>
void tensor_div_scalar_full_device(Tensor<T> *a, T scalar, Tensor<T> *out) {
if (scalar == (T) 0) return;
unsigned int size = out->get_size();
hipLaunchKernelGGL(( kernel_tensor_div_scalar_full_device) , dim3(1), dim3(size) , 0, 0, a->get_ptr(), scalar, out->get_ptr(), size);
}
template void tensor_div_scalar_full_device(Tensor<int> *a, int scalar, Tensor<int> *out);
template void tensor_div_scalar_full_device(Tensor<float> *a, float scalar, Tensor<float> *out);
template void tensor_div_scalar_full_device(Tensor<double> *a, double scalar, Tensor<double> *out);
template <typename T>
__global__ void kernel_scalar_div_tensor_full_device(T scalar, T *a, T *out, unsigned int size) {
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int stride = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < size; i += stride) {
if (a[i] == (T) 0) continue;
out[i] = scalar / a[i];
}
}
template <typename T>
void scalar_div_tensor_full_device(T scalar, Tensor<T> *a, Tensor<T> *out) {
unsigned int size = out->get_size();
hipLaunchKernelGGL(( kernel_scalar_div_tensor_full_device) , dim3(1), dim3(size) , 0, 0, scalar, a->get_ptr(), out->get_ptr(), size);
}
template void scalar_div_tensor_full_device(int scalar, Tensor<int> *b, Tensor<int> *out);
template void scalar_div_tensor_full_device(float scalar, Tensor<float> *b, Tensor<float> *out);
template void scalar_div_tensor_full_device(double scalar, Tensor<double> *b, Tensor<double> *out);
} // namespace op
} // namespace magmadnn
|
f33bbf0b681a31a5e18ae760dbc9bca040ef6176.cu
|
#include "compute/div/div_internal.h"
namespace magmadnn {
namespace internal {
template <typename T>
__global__ void tensor_div_tensor_full_device(T *a, T *b, T *out, unsigned int size) {
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int stride = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < size; i += stride) {
if (b[i] == (T) 0) continue;
out[i] = a[i] / b[i];
}
}
template <typename T>
void tensor_div_tensor_full_device(Tensor<T> *a, Tensor<T> *b, Tensor<T> *out) {
unsigned int size = out->get_size();
tensor_div_tensor_full_device <<< 1, size >>> (a->get_ptr(), b->get_ptr(), out->get_ptr(), size);
}
template void tensor_div_tensor_full_device(Tensor<int> *a, Tensor<int> *b, Tensor<int> *out);
template void tensor_div_tensor_full_device(Tensor<float> *a, Tensor<float> *b, Tensor<float> *out);
template void tensor_div_tensor_full_device(Tensor<double> *a, Tensor<double> *b, Tensor<double> *out);
template <typename T>
__global__ void kernel_tensor_div_scalar_full_device(T *a, T scalar, T *out, unsigned int size) {
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int stride = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < size; i += stride) {
out[i] = a[i] / scalar;
}
}
template <typename T>
void tensor_div_scalar_full_device(Tensor<T> *a, T scalar, Tensor<T> *out) {
if (scalar == (T) 0) return;
unsigned int size = out->get_size();
kernel_tensor_div_scalar_full_device <<< 1, size >>> (a->get_ptr(), scalar, out->get_ptr(), size);
}
template void tensor_div_scalar_full_device(Tensor<int> *a, int scalar, Tensor<int> *out);
template void tensor_div_scalar_full_device(Tensor<float> *a, float scalar, Tensor<float> *out);
template void tensor_div_scalar_full_device(Tensor<double> *a, double scalar, Tensor<double> *out);
template <typename T>
__global__ void kernel_scalar_div_tensor_full_device(T scalar, T *a, T *out, unsigned int size) {
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int stride = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < size; i += stride) {
if (a[i] == (T) 0) continue;
out[i] = scalar / a[i];
}
}
template <typename T>
void scalar_div_tensor_full_device(T scalar, Tensor<T> *a, Tensor<T> *out) {
unsigned int size = out->get_size();
kernel_scalar_div_tensor_full_device <<< 1, size >>> (scalar, a->get_ptr(), out->get_ptr(), size);
}
template void scalar_div_tensor_full_device(int scalar, Tensor<int> *b, Tensor<int> *out);
template void scalar_div_tensor_full_device(float scalar, Tensor<float> *b, Tensor<float> *out);
template void scalar_div_tensor_full_device(double scalar, Tensor<double> *b, Tensor<double> *out);
} // namespace op
} // namespace magmadnn
|
8146d7edfb635971c62d24afc6e93edfe1be9d98.hip
|
// !!! This is a file automatically generated by hipify!!!
//#include <iostream>
//#include <fstream>
//#include <iomanip>
//#include <string>
//
//#include <cmath>
//#include <cstdio>
//
//#include <hip/hip_runtime.h>
//#include <device_launch_parameters.h>
//
////using namespace std;
//using std::ifstream;
//using std::string;
//using std::cout;
//using std::endl;
//using std::ios;
//using std::setiosflags;
//using std::setprecision;
//
////#define length 8
//#define PI 3.14159265
//#define length 4
//#define block_len 16
//
//hipError_t dctWithCuda_1(const double *d, double *D);
//
//hipError_t dctWithCuda_2(const double *f, double *F);
//
///*__global__ void dct(float *f, float *F){
// int tidy = blockIdx.x*blockDim.x + threadIdx.x;
// int tidx = blockIdx.y*blockDim.y + threadIdx.y;
// int index = tidx*len + tidy;
// float tmp;
// float beta,alfa;
// if(tidx == 0)
// beta = sqrt(1.0/length);
// else
// beta = sqrt(2.0/length);
// if(tidy == 0)
// alfa = sqrt(1.0/length);
// else
// alfa = sqrt(2.0/length);
// if(tidx<length && tidy<length){
// for(i=0; i<length; i++){
// int x = i/length;
// int y = i%length;
// tmp+=((int)data[i])*cos((2*x+1)*tidx*PI/(2.0*length))*
// cos((2*y+1)*tidy*PI/(2.0*length));
// }
// F[index]=(float)alfa*beta*tmp;
// }
// }
//*/
//
//__global__ void dct_1(const double *f, double *F) {
// int bid = blockIdx.x;
// //int tid = threadIdx.x;
// int i, j;
// //double data[length]={0.0};
// double tmp;
// //printf("length = %d\n", length);
// if (bid < length)
// {
// //__shared__
// double data[length];
// for (i = 0; i < length; i++)
// {
// data[i] = f[bid * length + i]; //load row data from f.
// }
// __syncthreads();
// for (i = 0; i < length; i++)
// {
// if (i == 0)
// {
// tmp = (double) (1.0 / sqrt(1.0 * length));
// F[bid * length + i] = 0.0; //why use F[bid]? Do transpose at the same time.
// for (j = 0; j < length; j++)
// F[bid * length + i] += data[j];
// F[bid * length] *= tmp;
// }
// else
// {
// tmp = (double) (sqrt(2.0 / (1.0 * length)));
// for (i = 1; i < length; i++)
// {
// F[bid * length + i] = 0.0;
// for (j = 0; j < length; j++)
// {
// F[bid * length + i] +=
// (double) (data[j] * cos((2 * j + 1) * i * PI / (2 * length)));
// }
// F[bid * length + i] *= tmp;
// }
// }
// }
//// __syncthreads();
//// if (bid == 0)
//// {
//// for (int k = 0; k < length; k++)
//// {
//// for (int l = 0; l < length; l++)
//// {
//// printf("%lf\t", F[k * length + l]);
//// }
//// printf("\n");
//// }
//// printf("\n");
//// }
//
// __syncthreads();
// for (i = 0; i < length; i++)
// {
// data[i] = F[i * length + bid];
// }
// __syncthreads();
// for (i = 0; i < length; i++)
// {
// if (i == 0)
// {
// tmp = (double) (1.0 / sqrt(1.0 * length));
// F[bid] = 0;
// for (j = 0; j < length; j++)
// F[bid] += data[j];
// F[bid] *= tmp;
// }
// else
// {
// tmp = (double) (sqrt(2.0 / (1.0 * length)));
// for (i = 1; i < length; i++)
// {
// F[i * length + bid] = 0;
// for (j = 0; j < length; j++)
// {
// F[i * length + bid] +=
// (double) (data[j] * cos((2 * j + 1) * i * PI / (2 * length)));
// }
// F[i * length + bid] *= tmp;
// }
// }
// }
// __syncthreads();
// }
//}
//
//__global__ void dct_2(const double *f, double *F) {
// int tidy = blockIdx.x * blockDim.x + threadIdx.x;
// int tidx = blockIdx.y * blockDim.y + threadIdx.y;
// int index = tidx * length + tidy;
// int i;
// double tmp;
// double beta, alfa;
// if (tidx == 0)
// beta = sqrt(1.0 / length);
// else
// beta = sqrt(2.0 / length);
// if (tidy == 0)
// alfa = sqrt(1.0 / length);
// else
// alfa = sqrt(2.0 / length);
// if (tidx < length && tidy < length) {
// for (i = 0; i < length * length; i++) {
// int x = i / length;
// int y = i % length;
// tmp += ((double) f[i])
// * cos((2 * x + 1) * tidx * PI / (2.0 * length))
// * cos((2 * y + 1) * tidy * PI / (2.0 * length));
// }
// F[index] = (double) alfa * beta * tmp;
// }
//}
//
//int main() {
// ifstream infile("/home/zhujian/cuda-workspace/dct_10.16/gradient.txt");
// int i = 0;
// string line;
// double f[length * length] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
// 14, 15 };
// double F[length * length] = { 0.0 };
// while(i<length*length){
// if(getline(infile, line)){
//
// f[i] = atof(line.c_str());
// cout<<"f[i]: "<<f[i]<<endl;
// }
// i++;
// }
// cout << "before" << endl;
// for (i = 0; i < length * length; i++)
// {
// cout << f[i] << " ";
// if ((i + 1) % length == 0)
// cout << endl;
// }
// cout << endl;
//
// for (i = 0; i < length * length; i++)
// {
// cout << F[i] << " ";
// if ((i + 1) % length == 0)
// cout << endl;
// }
// cout << endl;
//
//
// /*
// * execute dct_1
// */
//
// hipError_t cudaStatus = dctWithCuda_1(f, F);
// if (cudaStatus != hipSuccess)
// {
// fprintf(stderr, "dctWithCuda_1 failed!");
// return 1;
// }
//
// cout << "after" << endl;
// for (i = 0; i < length * length; i++)
// {
// cout << setiosflags(ios::right) << f[i] << "\t";
// if ((i + 1) % length == 0)
// cout << endl;
// }
//
// cout << endl;
// for (i = 0; i < length * length; i++)
// {
//
// /* GPU can't calculate floating number precisely
// * 0 will be a very small floating number.
// * so print this numbers with 7 digits after decimal point
// */
// cout << setiosflags(ios::right)
// << setiosflags(ios::fixed) << setprecision(7)
// << F[i] << "\t";
// if ((i + 1) % length == 0)
// cout << endl;
// }
// return 0;
//
//}
//
//hipError_t dctWithCuda_1(const double *d, double *D) {
// double *dev_d = 0;
// double *dev_D = 0;
// hipError_t cudaStatus;
//
// cudaStatus = hipSetDevice(0);
// if (cudaStatus != hipSuccess) {
// fprintf(stderr,
// "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
// goto Error;
// }
//
// cudaStatus = hipMalloc((void**) &dev_d, length * length * sizeof(double));
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMalloc failed!");
// goto Error;
// }
//
// cudaStatus = hipMalloc((void**) &dev_D, length * length * sizeof(double));
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMalloc failed!");
// goto Error;
// }
//
// //copy input vectors from host memory to GPU buffers.
// cudaStatus = hipMemcpy(dev_d, d, length * length * sizeof(double),
// hipMemcpyHostToDevice);
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMemcpy-- failed");
// goto Error;
// }
// //launch a kernel on the GPU
// dct_1<<<length, 1>>>(dev_d, dev_D);
//
// cudaStatus = hipDeviceSynchronize();
// if (cudaStatus != hipSuccess) {
// fprintf(stderr,
// "hipDeviceSynchronize returned error code %d after launching addKernel!\n",
// cudaStatus);
// goto Error;
// }
//
// cudaStatus = hipMemcpy(D, dev_D, length * length * sizeof(double),
// hipMemcpyDeviceToHost);
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMemcpy failed!");
// goto Error;
// }
// Error: hipFree(dev_d);
// hipFree(dev_D);
// return cudaStatus;
//}
//
//hipError_t dctWithCuda_2(const double *d, double *D) {
// double *dev_d = 0;
// double *dev_D = 0;
// hipError_t cudaStatus;
//
// cudaStatus = hipSetDevice(0);
// if (cudaStatus != hipSuccess) {
// fprintf(stderr,
// "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
// goto Error;
// }
//
// cudaStatus = hipMalloc((void**) &dev_d, length * sizeof(double));
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMalloc failed!");
// goto Error;
// }
//
// cudaStatus = hipMalloc((void**) &dev_D, length * sizeof(double));
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMalloc failed!");
// goto Error;
// }
//
// //copy input vectors from host memory to GPU buffers.
// cudaStatus = hipMemcpy(dev_d, d, length * sizeof(double),
// hipMemcpyHostToDevice);
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMalloc failed");
// goto Error;
// }
//
// //launch a kernel on the GPU
// dct_2<<<1, (length / block_len) * (length / block_len),
// block_len * block_len>>>(dev_d, dev_D);
//
// cudaStatus = hipDeviceSynchronize();
// if (cudaStatus != hipSuccess) {
// fprintf(stderr,
// "hipDeviceSynchronize returned error code %d after launching addKernel!\n",
// cudaStatus);
// goto Error;
// }
//
// cudaStatus = hipMemcpy(D, dev_D, length * length * sizeof(double),
// hipMemcpyDeviceToHost);
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMemcpy failed!");
// goto Error;
// }
// Error: hipFree(dev_d);
// hipFree(dev_D);
//
// return cudaStatus;
//}
//
|
8146d7edfb635971c62d24afc6e93edfe1be9d98.cu
|
//#include <iostream>
//#include <fstream>
//#include <iomanip>
//#include <string>
//
//#include <cmath>
//#include <cstdio>
//
//#include <cuda_runtime.h>
//#include <device_launch_parameters.h>
//
////using namespace std;
//using std::ifstream;
//using std::string;
//using std::cout;
//using std::endl;
//using std::ios;
//using std::setiosflags;
//using std::setprecision;
//
////#define length 8
//#define PI 3.14159265
//#define length 4
//#define block_len 16
//
//cudaError_t dctWithCuda_1(const double *d, double *D);
//
//cudaError_t dctWithCuda_2(const double *f, double *F);
//
///*__global__ void dct(float *f, float *F){
// int tidy = blockIdx.x*blockDim.x + threadIdx.x;
// int tidx = blockIdx.y*blockDim.y + threadIdx.y;
// int index = tidx*len + tidy;
// float tmp;
// float beta,alfa;
// if(tidx == 0)
// beta = sqrt(1.0/length);
// else
// beta = sqrt(2.0/length);
// if(tidy == 0)
// alfa = sqrt(1.0/length);
// else
// alfa = sqrt(2.0/length);
// if(tidx<length && tidy<length){
// for(i=0; i<length; i++){
// int x = i/length;
// int y = i%length;
// tmp+=((int)data[i])*cos((2*x+1)*tidx*PI/(2.0*length))*
// cos((2*y+1)*tidy*PI/(2.0*length));
// }
// F[index]=(float)alfa*beta*tmp;
// }
// }
//*/
//
//__global__ void dct_1(const double *f, double *F) {
// int bid = blockIdx.x;
// //int tid = threadIdx.x;
// int i, j;
// //double data[length]={0.0};
// double tmp;
// //printf("length = %d\n", length);
// if (bid < length)
// {
// //__shared__
// double data[length];
// for (i = 0; i < length; i++)
// {
// data[i] = f[bid * length + i]; //load row data from f.
// }
// __syncthreads();
// for (i = 0; i < length; i++)
// {
// if (i == 0)
// {
// tmp = (double) (1.0 / sqrt(1.0 * length));
// F[bid * length + i] = 0.0; //why use F[bid]? Do transpose at the same time.
// for (j = 0; j < length; j++)
// F[bid * length + i] += data[j];
// F[bid * length] *= tmp;
// }
// else
// {
// tmp = (double) (sqrt(2.0 / (1.0 * length)));
// for (i = 1; i < length; i++)
// {
// F[bid * length + i] = 0.0;
// for (j = 0; j < length; j++)
// {
// F[bid * length + i] +=
// (double) (data[j] * cos((2 * j + 1) * i * PI / (2 * length)));
// }
// F[bid * length + i] *= tmp;
// }
// }
// }
//// __syncthreads();
//// if (bid == 0)
//// {
//// for (int k = 0; k < length; k++)
//// {
//// for (int l = 0; l < length; l++)
//// {
//// printf("%lf\t", F[k * length + l]);
//// }
//// printf("\n");
//// }
//// printf("\n");
//// }
//
// __syncthreads();
// for (i = 0; i < length; i++)
// {
// data[i] = F[i * length + bid];
// }
// __syncthreads();
// for (i = 0; i < length; i++)
// {
// if (i == 0)
// {
// tmp = (double) (1.0 / sqrt(1.0 * length));
// F[bid] = 0;
// for (j = 0; j < length; j++)
// F[bid] += data[j];
// F[bid] *= tmp;
// }
// else
// {
// tmp = (double) (sqrt(2.0 / (1.0 * length)));
// for (i = 1; i < length; i++)
// {
// F[i * length + bid] = 0;
// for (j = 0; j < length; j++)
// {
// F[i * length + bid] +=
// (double) (data[j] * cos((2 * j + 1) * i * PI / (2 * length)));
// }
// F[i * length + bid] *= tmp;
// }
// }
// }
// __syncthreads();
// }
//}
//
//__global__ void dct_2(const double *f, double *F) {
// int tidy = blockIdx.x * blockDim.x + threadIdx.x;
// int tidx = blockIdx.y * blockDim.y + threadIdx.y;
// int index = tidx * length + tidy;
// int i;
// double tmp;
// double beta, alfa;
// if (tidx == 0)
// beta = sqrt(1.0 / length);
// else
// beta = sqrt(2.0 / length);
// if (tidy == 0)
// alfa = sqrt(1.0 / length);
// else
// alfa = sqrt(2.0 / length);
// if (tidx < length && tidy < length) {
// for (i = 0; i < length * length; i++) {
// int x = i / length;
// int y = i % length;
// tmp += ((double) f[i])
// * cos((2 * x + 1) * tidx * PI / (2.0 * length))
// * cos((2 * y + 1) * tidy * PI / (2.0 * length));
// }
// F[index] = (double) alfa * beta * tmp;
// }
//}
//
//int main() {
// ifstream infile("/home/zhujian/cuda-workspace/dct_10.16/gradient.txt");
// int i = 0;
// string line;
// double f[length * length] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
// 14, 15 };
// double F[length * length] = { 0.0 };
// while(i<length*length){
// if(getline(infile, line)){
//
// f[i] = atof(line.c_str());
// cout<<"f[i]: "<<f[i]<<endl;
// }
// i++;
// }
// cout << "before" << endl;
// for (i = 0; i < length * length; i++)
// {
// cout << f[i] << " ";
// if ((i + 1) % length == 0)
// cout << endl;
// }
// cout << endl;
//
// for (i = 0; i < length * length; i++)
// {
// cout << F[i] << " ";
// if ((i + 1) % length == 0)
// cout << endl;
// }
// cout << endl;
//
//
// /*
// * execute dct_1
// */
//
// cudaError_t cudaStatus = dctWithCuda_1(f, F);
// if (cudaStatus != cudaSuccess)
// {
// fprintf(stderr, "dctWithCuda_1 failed!");
// return 1;
// }
//
// cout << "after" << endl;
// for (i = 0; i < length * length; i++)
// {
// cout << setiosflags(ios::right) << f[i] << "\t";
// if ((i + 1) % length == 0)
// cout << endl;
// }
//
// cout << endl;
// for (i = 0; i < length * length; i++)
// {
//
// /* GPU can't calculate floating number precisely
// * 0 will be a very small floating number.
// * so print this numbers with 7 digits after decimal point
// */
// cout << setiosflags(ios::right)
// << setiosflags(ios::fixed) << setprecision(7)
// << F[i] << "\t";
// if ((i + 1) % length == 0)
// cout << endl;
// }
// return 0;
//
//}
//
//cudaError_t dctWithCuda_1(const double *d, double *D) {
// double *dev_d = 0;
// double *dev_D = 0;
// cudaError_t cudaStatus;
//
// cudaStatus = cudaSetDevice(0);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr,
// "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
// goto Error;
// }
//
// cudaStatus = cudaMalloc((void**) &dev_d, length * length * sizeof(double));
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed!");
// goto Error;
// }
//
// cudaStatus = cudaMalloc((void**) &dev_D, length * length * sizeof(double));
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed!");
// goto Error;
// }
//
// //copy input vectors from host memory to GPU buffers.
// cudaStatus = cudaMemcpy(dev_d, d, length * length * sizeof(double),
// cudaMemcpyHostToDevice);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMemcpy-- failed");
// goto Error;
// }
// //launch a kernel on the GPU
// dct_1<<<length, 1>>>(dev_d, dev_D);
//
// cudaStatus = cudaThreadSynchronize();
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr,
// "cudaThreadSynchronize returned error code %d after launching addKernel!\n",
// cudaStatus);
// goto Error;
// }
//
// cudaStatus = cudaMemcpy(D, dev_D, length * length * sizeof(double),
// cudaMemcpyDeviceToHost);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMemcpy failed!");
// goto Error;
// }
// Error: cudaFree(dev_d);
// cudaFree(dev_D);
// return cudaStatus;
//}
//
//cudaError_t dctWithCuda_2(const double *d, double *D) {
// double *dev_d = 0;
// double *dev_D = 0;
// cudaError_t cudaStatus;
//
// cudaStatus = cudaSetDevice(0);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr,
// "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
// goto Error;
// }
//
// cudaStatus = cudaMalloc((void**) &dev_d, length * sizeof(double));
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed!");
// goto Error;
// }
//
// cudaStatus = cudaMalloc((void**) &dev_D, length * sizeof(double));
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed!");
// goto Error;
// }
//
// //copy input vectors from host memory to GPU buffers.
// cudaStatus = cudaMemcpy(dev_d, d, length * sizeof(double),
// cudaMemcpyHostToDevice);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed");
// goto Error;
// }
//
// //launch a kernel on the GPU
// dct_2<<<1, (length / block_len) * (length / block_len),
// block_len * block_len>>>(dev_d, dev_D);
//
// cudaStatus = cudaThreadSynchronize();
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr,
// "cudaThreadSynchronize returned error code %d after launching addKernel!\n",
// cudaStatus);
// goto Error;
// }
//
// cudaStatus = cudaMemcpy(D, dev_D, length * length * sizeof(double),
// cudaMemcpyDeviceToHost);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMemcpy failed!");
// goto Error;
// }
// Error: cudaFree(dev_d);
// cudaFree(dev_D);
//
// return cudaStatus;
//}
//
|
f0cc873c5f88acc559f4fa96a8c1750df0f930ea.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <exception>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/for_each.h>
#include <thrust/extrema.h>
#include <thrust/count.h>
#include <thrust/remove.h>
#include <thrust/sort.h>
#include <thrust/unique.h>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include "NVStrings.h"
#include "NVStringsImpl.h"
#include "ipc_transfer.h"
#include "custring_view.cuh"
#include "StringsStatistics.h"
#include "unicode/is_flags.h"
#include "util.h"
#ifdef __INTELLISENSE__
unsigned int atomicAdd(unsigned int* address, unsigned int val);
#endif
// ctor and dtor are private to control the memory allocation in a single shared-object module
NVStrings::NVStrings(unsigned int count)
{
pImpl = new NVStringsImpl(count);
}
NVStrings::NVStrings()
{
pImpl = new NVStringsImpl(0);
}
NVStrings::NVStrings(const NVStrings& strsIn)
{
NVStrings& strs = (NVStrings&)strsIn;
unsigned int count = strs.size();
pImpl = new NVStringsImpl(count);
if( count )
{
std::vector<NVStrings*> strslist;
strslist.push_back(&strs);
NVStrings_copy_strings(pImpl,strslist);
}
}
NVStrings& NVStrings::operator=(const NVStrings& strsIn)
{
delete pImpl;
NVStrings& strs = (NVStrings&)strsIn;
unsigned int count = strs.size();
pImpl = new NVStringsImpl(count);
if( count )
{
std::vector<NVStrings*> strslist;
strslist.push_back(&strs);
NVStrings_copy_strings(pImpl,strslist);
}
return *this;
}
NVStrings::~NVStrings()
{
delete pImpl;
}
NVStrings* NVStrings::create_from_array( const char** strs, unsigned int count)
{
NVStrings* rtn = new NVStrings(count);
if( count )
NVStrings_init_from_strings(rtn->pImpl,strs,count);
return rtn;
}
NVStrings* NVStrings::create_from_index(std::pair<const char*,size_t>* strs, unsigned int count, bool devmem, sorttype stype)
{
NVStrings* rtn = new NVStrings(count);
if( !count )
return rtn;
int rc = NVStrings_init_from_indexes(rtn->pImpl,strs,count,devmem,stype);
if( rc )
{
// cannot make any other CUDA calls if IllegalAddress error occurs
if( rc==(int)hipErrorIllegalAddress )
throw std::invalid_argument("nvstrings::create_from_index bad_device_ptr");
else
{
delete rtn;
throw std::runtime_error("nvstrings::create_from_index runtime_error");
}
}
return rtn;
}
NVStrings* NVStrings::create_from_offsets(const char* strs, int count, const int* offsets, const unsigned char* nullbitmask, int nulls)
{
NVStrings* rtn = new NVStrings(count);
if( count )
NVStrings_init_from_offsets(rtn->pImpl,strs,count,offsets,nullbitmask,nulls);
return rtn;
}
NVStrings* NVStrings::create_from_strings( std::vector<NVStrings*> strs )
{
unsigned int count = 0;
for( auto itr=strs.begin(); itr!=strs.end(); itr++ )
count += (*itr)->size();
NVStrings* rtn = new NVStrings(count);
if( count )
NVStrings_copy_strings(rtn->pImpl,strs);
return rtn;
}
NVStrings* NVStrings::create_from_ipc( nvstrings_ipc_transfer& ipc )
{
unsigned count = ipc.count;
NVStrings* rtn = new NVStrings(count);
if( count==0 )
return rtn;
rtn->pImpl->setMemoryHandle(ipc.getMemoryPtr(),ipc.size);
custring_view_array strings = (custring_view_array)ipc.getStringsPtr();
// copy the pointers so they can be fixed up
hipError_t err = hipMemcpy(rtn->pImpl->getStringsPtr(),strings,count*sizeof(custring_view*),hipMemcpyDeviceToDevice);
hipIpcCloseMemHandle((void *) strings);
if( err!=hipSuccess )
printCudaError(err,"nvs-create-ipc");
// fix up the pointers for this context
NVStrings_fixup_pointers(rtn->pImpl,ipc.base_address);
return rtn;
}
NVStrings* NVStrings::create_from_csv( const char* csvfile, unsigned int column, unsigned int lines, sorttype stype, bool nullIsEmpty)
{
unsigned int flags = nullIsEmpty ? CSV_NULL_IS_EMPTY : 0;
if( stype & NVStrings::length )
flags |= CSV_SORT_LENGTH;
if( stype & NVStrings::name )
flags |= CSV_SORT_NAME;
std::string fpath = csvfile;
return createFromCSV(fpath,column,lines,flags);
}
void NVStrings::destroy(NVStrings* inst)
{
delete inst;
}
size_t NVStrings::memsize() const
{
return pImpl->bufferSize;
}
NVStrings* NVStrings::copy()
{
unsigned int count = size();
NVStrings* rtn = new NVStrings(count);
if( count )
{
std::vector<NVStrings*> strslist;
strslist.push_back(this);
NVStrings_copy_strings(rtn->pImpl,strslist);
}
return rtn;
}
//
void NVStrings::print( int start, int end, int maxwidth, const char* delimiter )
{
unsigned int count = size();
if( end < 0 || end > count )
end = count;
if( start < 0 )
start = 0;
if( start >= end )
return;
count = end - start;
//
auto execpol = rmm::exec_policy(0);
custring_view** d_strings = pImpl->getStringsPtr();
rmm::device_vector<size_t> lens(count,0);
size_t* d_lens = lens.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(start), end,
[d_strings, start, maxwidth, d_lens] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
int len = dstr->size();
if( maxwidth > 0 )
len = dstr->byte_offset_for(maxwidth);
d_lens[idx-start] = len +1; // include null-terminator;
});
// allocate large device buffer to hold all the strings
size_t msize = thrust::reduce(execpol->on(0),lens.begin(),lens.end());
if( msize==0 )
{
printf("all %d strings are null\n",count);
return;
}
char* d_buffer = nullptr;
RMM_ALLOC(&d_buffer,msize,0);
// convert lengths to offsets
rmm::device_vector<size_t> offsets(count,0);
thrust::exclusive_scan(execpol->on(0),lens.begin(),lens.end(),offsets.begin());
size_t* d_offsets = offsets.data().get();
// copy strings into single buffer
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(start), end,
[d_strings, start, maxwidth, d_offsets, d_lens, d_buffer] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
size_t offset = d_offsets[idx-start];
char* optr = d_buffer + offset;
if( dstr )
{
dstr->copy(optr,maxwidth);
size_t len = d_lens[idx-start];
//memcpy(optr,dstr->data(),len-1);
*(optr+len-1) = 0;
}
});
//
//hipDeviceSynchronize();
// copy strings to host
char* h_buffer = new char[msize];
hipMemcpy(h_buffer, d_buffer, msize, hipMemcpyDeviceToHost);
RMM_FREE(d_buffer,0);
// print strings to stdout
thrust::host_vector<custring_view*> h_strings(*(pImpl->pList)); // just for checking nulls
thrust::host_vector<size_t> h_lens(lens);
char* hstr = h_buffer;
for( int idx=0; idx < count; ++idx )
{
printf("%d:",idx);
if( !h_strings[idx] )
printf("<null>");
else
printf("[%s]",hstr);
printf("%s",delimiter);
hstr += h_lens[idx];
}
delete h_buffer;
}
//
int NVStrings::to_host(char** list, int start, int end)
{
unsigned int count = size();
if( end < 0 || end > count )
end = count;
if( start >= end )
return 0;
count = end - start;
// compute size of specified strings
auto execpol = rmm::exec_policy(0);
rmm::device_vector<size_t> lens(count,0);
size_t* d_lens = lens.data().get();
custring_view** d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(start), end,
[d_strings, start, d_lens] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
d_lens[idx-start] = dstr->size()+1; // include space for null terminator
});
hipError_t err = hipSuccess;
size_t msize = thrust::reduce(execpol->on(0),lens.begin(),lens.end());
if( msize==0 )
{
memset(list,0,count*sizeof(char*));
return 0; // every string is null so we are done
}
// allocate device memory to copy strings temporarily
char* d_buffer = nullptr;
rmmError_t rerr = RMM_ALLOC(&d_buffer,msize,0);
if( rerr != RMM_SUCCESS )
{
fprintf(stderr,"nvs-to_host: RM_ALLOC(%p,%lu)=%d\n", d_buffer,msize,(int)rerr);
//printCudaError(err);
return (int)err;
}
// convert lengths to offsets
rmm::device_vector<size_t> offsets(count,0);
thrust::exclusive_scan(execpol->on(0),lens.begin(),lens.end(),offsets.begin());
size_t* d_offsets = offsets.data().get();
// copy strings into temporary buffer
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(start), end,
[d_strings, start, d_offsets, d_buffer] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
size_t offset = d_offsets[idx-start];
char* optr = d_buffer + offset;
if( dstr )
{
int len = dstr->size();
memcpy(optr,dstr->data(),len);
*(optr + len) = 0;
}
});
//
//err = hipDeviceSynchronize();
//if( err != hipSuccess )
//{
// printCudaError(err,"nvs-to_host: copying strings device to device");
// RMM_FREE(d_buffer,0);
// return (int)err;
//}
// copy strings to host
char* h_buffer = new char[msize];
err = hipMemcpy(h_buffer, d_buffer, msize, hipMemcpyDeviceToHost);
RMM_FREE(d_buffer,0); // done with device buffer
if( err != hipSuccess )
{
printCudaError(err, "nvs-to_host: copying strings device to host");
delete h_buffer;
return (int)err;
}
// Deserialization host memory to memory provided by the caller
thrust::host_vector<custring_view*> h_strings(*(pImpl->pList)); // just for checking nulls
thrust::host_vector<size_t> h_offsets(offsets);
h_offsets.push_back(msize); // include size as last offset
for( unsigned int idx=0; idx < count; ++idx )
{
if( h_strings[idx]==0 )
{
list[idx] = 0;
continue;
}
size_t offset = h_offsets[idx];
size_t len = h_offsets[idx+1] - offset;
const char* p_data = h_buffer + offset;
//char* h_data = new char[len]; // make memory on the host
//h_data[len-1] = 0; // null terminate for the caller
//memcpy(h_data, p_data, len-1);
//list[idx] = h_data;
if( list[idx] )
memcpy(list[idx], p_data, len-1);
}
delete h_buffer;
return 0;
}
// build a string-index from this instances strings
int NVStrings::create_index(std::pair<const char*,size_t>* strs, bool bdevmem )
{
unsigned int count = size();
if( count==0 )
return 0;
auto execpol = rmm::exec_policy(0);
rmm::device_vector< thrust::pair<const char*,size_t> > indexes(count);
thrust::pair<const char*,size_t>* d_indexes = indexes.data().get();
custring_view_array d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_indexes] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
{
d_indexes[idx].first = (const char*)dstr->data();
d_indexes[idx].second = (size_t)dstr->size();
}
else
{
d_indexes[idx].first = nullptr;
d_indexes[idx].second = 0;
}
});
hipError_t err = hipSuccess; //hipDeviceSynchronize();
if( bdevmem )
err = hipMemcpy( strs, indexes.data().get(), count * sizeof(std::pair<const char*,size_t>), hipMemcpyDeviceToDevice );
else
err = hipMemcpy( strs, indexes.data().get(), count * sizeof(std::pair<const char*,size_t>), hipMemcpyDeviceToHost );
if( err != hipSuccess )
{
printCudaError(err,"nvs-create_index");
return (int)err;
}
return 0;
}
//
int NVStrings::create_custring_index( custring_view** strs, bool bdevmem )
{
unsigned int count = size();
if( count==0 )
return 0;
custring_view_array d_strings = pImpl->getStringsPtr();
if( bdevmem )
hipMemcpy( strs, d_strings, count * sizeof(custring_view*), hipMemcpyDeviceToDevice );
else
hipMemcpy( strs, d_strings, count * sizeof(custring_view*), hipMemcpyDeviceToHost );
return 0;
}
// copy strings into memory provided
int NVStrings::create_offsets( char* strs, int* offsets, unsigned char* nullbitmask, bool bdevmem )
{
unsigned int count = size();
if( count==0 )
return 0;
if( strs==0 || offsets==0 )
return 0;
auto execpol = rmm::exec_policy(0);
// first compute offsets/nullbitmask
int* d_offsets = offsets;
unsigned char* d_nulls = nullbitmask;
if( !bdevmem )
{
RMM_ALLOC(&d_offsets,(count+1)*sizeof(int),0);
if( nullbitmask )
{
RMM_ALLOC(&d_nulls,((count+7)/8)*sizeof(unsigned char),0);
hipMemset(d_nulls,0,((count+7)/8));
}
}
//
rmm::device_vector<int> sizes(count+1,0);
int* d_sizes = sizes.data().get();
custring_view** d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_sizes] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
d_sizes[idx] = (int)dstr->size();
});
// ^^^-- these two for-each-n's can likely be combined --vvv
if( d_nulls )
{
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), (count+7)/8,
[d_strings, count, d_nulls] __device__(unsigned int idx){
unsigned int ndx = idx * 8;
unsigned char nb = 0;
for( int i=0; i<8; ++i )
{
nb = nb >> 1;
if( ndx+i < count )
{
custring_view* dstr = d_strings[ndx+i];
if( dstr )
nb |= 128;
}
}
d_nulls[idx] = nb;
});
}
//
thrust::exclusive_scan(execpol->on(0),d_sizes, d_sizes+(count+1), d_offsets);
// compute/allocate of memory
size_t totalbytes = thrust::reduce(execpol->on(0), d_sizes, d_sizes+count);
char* d_strs = strs;
if( !bdevmem )
RMM_ALLOC(&d_strs,totalbytes,0);
// shuffle strings into memory
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_strs, d_offsets] __device__(unsigned int idx){
char* buffer = d_strs + d_offsets[idx];
custring_view* dstr = d_strings[idx];
if( dstr )
memcpy(buffer,dstr->data(),dstr->size());
});
// copy memory to parameters (if necessary)
if( !bdevmem )
{
hipMemcpy(offsets,d_offsets,(count+1)*sizeof(int),hipMemcpyDeviceToHost);
hipMemcpy(strs,d_strs,totalbytes,hipMemcpyDeviceToHost);
if( nullbitmask )
hipMemcpy(nullbitmask,d_nulls,((count+7)/8)*sizeof(unsigned char),hipMemcpyDeviceToHost);
}
return 0;
}
int NVStrings::create_ipc_transfer( nvstrings_ipc_transfer& ipc )
{
ipc.setStrsHandle(pImpl->getStringsPtr(),pImpl->getMemoryPtr(),size());
ipc.setMemHandle(pImpl->getMemoryPtr(),pImpl->getMemorySize());
return 0;
}
// fills in a bitarray with 0 for null values and 1 for non-null values
// if emptyIsNull=true, empty strings will have bit values of 0 as well
unsigned int NVStrings::set_null_bitarray( unsigned char* bitarray, bool emptyIsNull, bool devmem )
{
unsigned int count = size();
if( count==0 )
return 0;
auto execpol = rmm::exec_policy(0);
unsigned int size = (count + 7)/8; // round up to byte align
unsigned char* d_bitarray = bitarray;
if( !devmem )
RMM_ALLOC(&d_bitarray,size,0);
// count nulls in range for return value
custring_view** d_strings = pImpl->getStringsPtr();
unsigned int ncount = thrust::count_if(execpol->on(0), d_strings, d_strings + count,
[emptyIsNull] __device__ (custring_view*& dstr) { return (dstr==0) || (emptyIsNull && !dstr->size()); });
// fill in the bitarray
// the bitmask is in arrow format which means for each byte
// the null indicator is in bit position right-to-left: 76543210
// logic sets the high-bit and shifts to the right
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), size,
[d_strings, count, emptyIsNull, d_bitarray] __device__(unsigned int byteIdx){
unsigned char byte = 0; // set one byte per thread -- init to all nulls
for( unsigned int i=0; i < 8; ++i )
{
unsigned int idx = i + (byteIdx*8); // compute d_strings index
byte = byte >> 1; // shift until we are done
if( idx < count ) // check boundary
{
custring_view* dstr = d_strings[idx];
if( dstr && (!emptyIsNull || dstr->size()) )
byte |= 128; // string is not null, set high bit
}
}
d_bitarray[byteIdx] = byte;
});
//
//hipError_t err = hipDeviceSynchronize();
//if( err != hipSuccess )
//{
// fprintf(stderr,"nvs-set_null_bitarray(%p,%d,%d) size=%u\n",bitarray,(int)emptyIsNull,(int)devmem,count);
// printCudaError(err);
//}
//
if( !devmem )
{
hipMemcpy(bitarray,d_bitarray,size,hipMemcpyDeviceToHost);
RMM_FREE(d_bitarray,0);
}
return ncount;
}
// set int array with position of null strings
unsigned int NVStrings::get_nulls( unsigned int* array, bool emptyIsNull, bool devmem )
{
unsigned int count = size();
if( count==0 )
return 0;
auto execpol = rmm::exec_policy(0);
rmm::device_vector<int> darray(count,-1);
int* d_array = darray.data().get();
custring_view** d_strings = pImpl->getStringsPtr();
//unsigned int ncount = thrust::count_if(execpol->on(0), d_strings, d_strings + count,
// [emptyIsNull] __device__ (custring_view*& dstr) { return (dstr==0) || (emptyIsNull && !dstr->size()); });
// fill in the array
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, emptyIsNull, d_array] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr && (!emptyIsNull || dstr->size()) )
d_array[idx] = -1; // not null
else
d_array[idx] = idx; // null
});
//
//hipError_t err = hipDeviceSynchronize();
//if( err != hipSuccess )
//{
// fprintf(stderr,"nvs-get_nulls(%p,%d,%d) size=%u\n",array,(int)emptyIsNull,(int)devmem,count);
// printCudaError(err);
//}
// compact out the negative values
int* newend = thrust::remove_if(execpol->on(0), d_array, d_array + count, [] __device__ (int val) {return val<0;});
unsigned int ncount = (unsigned int)(newend - d_array);
//
hipError_t err = hipSuccess;
if( array )
{
if( devmem )
err = hipMemcpy(array,d_array,sizeof(int)*ncount,hipMemcpyDeviceToDevice);
else
err = hipMemcpy(array,d_array,sizeof(int)*ncount,hipMemcpyDeviceToHost);
}
if( err != hipSuccess )
{
fprintf(stderr,"nvs-get_nulls(%p,%d,%d) size=%u\n",array,(int)emptyIsNull,(int)devmem,count);
printCudaError(err);
}
return ncount;
}
// number of strings in this instance
unsigned int NVStrings::size() const
{
return (unsigned int)pImpl->pList->size();
}
struct statistics_attrs
{
custring_view_array d_strings;
unsigned char* d_flags;
size_t* d_values;
unsigned int d_mask;
statistics_attrs( custring_view_array strings, unsigned char* flags, size_t* values, unsigned int mask )
: d_strings(strings), d_flags(flags), d_values(values), d_mask(mask) {}
__device__ void operator()(unsigned int idx)
{
custring_view* dstr = d_strings[idx];
size_t spaces = 0;
if( dstr )
{
for( auto itr = dstr->begin(); itr != dstr->end(); itr++ )
{
unsigned int uni = u82u(*itr);
unsigned int flg = uni <= 0x00FFFF ? d_flags[uni] : 0;
spaces += (size_t)((flg & d_mask)>0);
}
}
d_values[idx] = spaces;
}
};
void NVStrings::compute_statistics(StringsStatistics& stats)
{
unsigned int count = size();
memset((void*)&stats,0,sizeof(stats));
if( count==0 )
return;
stats.total_strings = count;
auto execpol = rmm::exec_policy(0);
size_t stringsmem = pImpl->getMemorySize();
size_t ptrsmem = pImpl->pList->size() * sizeof(custring_view*);
stats.total_memory = stringsmem + ptrsmem;
custring_view_array d_strings = pImpl->getStringsPtr();
rmm::device_vector<size_t> values(count,0);
size_t* d_values = values.data().get();
// bytes
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_values] __device__ (unsigned int idx) {
custring_view* dstr = d_strings[idx];
d_values[idx] = dstr ? dstr->size() : 0;
});
stats.bytes_max = *thrust::max_element(execpol->on(0), values.begin(), values.end());
stats.bytes_min = *thrust::min_element(execpol->on(0), values.begin(), values.end());
stats.total_bytes = thrust::reduce(execpol->on(0), values.begin(), values.end());
stats.bytes_avg = stats.total_bytes / count;
// chars
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_values] __device__ (unsigned int idx) {
custring_view* dstr = d_strings[idx];
d_values[idx] = dstr ? dstr->chars_count() : 0;
});
stats.chars_max = *thrust::max_element(execpol->on(0), values.begin(), values.end());
stats.chars_min = *thrust::min_element(execpol->on(0), values.begin(), values.end());
stats.total_chars = thrust::reduce(execpol->on(0), values.begin(), values.end());
stats.chars_avg = stats.total_bytes / count;
// memory
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_values] __device__ (unsigned int idx) {
custring_view* dstr = d_strings[idx];
d_values[idx] = dstr ? dstr->alloc_size() : 0;
});
stats.mem_max = *thrust::max_element(execpol->on(0), values.begin(), values.end());
stats.mem_min = *thrust::min_element(execpol->on(0), values.begin(), values.end());
size_t mem_total = thrust::reduce(execpol->on(0), values.begin(), values.end());
stats.mem_avg = mem_total / count;
// attrs
unsigned char* d_flags = get_unicode_flags();
// spaces
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
statistics_attrs(d_strings, d_flags, d_values, 16));
stats.whitespace_count = thrust::reduce(execpol->on(0), values.begin(), values.end());
// digits
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
statistics_attrs(d_strings, d_flags, d_values, 4));
stats.digits_count = thrust::reduce(execpol->on(0), values.begin(), values.end());
// uppercase
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
statistics_attrs(d_strings, d_flags, d_values, 32));
stats.uppercase_count = thrust::reduce(execpol->on(0), values.begin(), values.end());
// lowercase
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
statistics_attrs(d_strings, d_flags, d_values, 64));
stats.lowercase_count = thrust::reduce(execpol->on(0), values.begin(), values.end());
// count strings
stats.total_nulls = thrust::count_if(execpol->on(0), d_strings, d_strings + count,
[] __device__ (custring_view* dstr) { return dstr==0; });
stats.total_empty = thrust::count_if(execpol->on(0), d_strings, d_strings + count,
[] __device__ (custring_view* dstr) { return dstr && dstr->empty(); });
// unique strings
{
// make a copy of the pointers so we can sort them
rmm::device_vector<custring_view*> sortcopy(*(pImpl->pList));
custring_view_array d_sortcopy = sortcopy.data().get();
thrust::sort(execpol->on(0), d_sortcopy, d_sortcopy+count,
[] __device__ (custring_view*& lhs, custring_view*& rhs) {
return (lhs && rhs) ? (lhs->compare(*rhs) < 0): rhs!=0;
});
auto nend = thrust::unique(execpol->on(0), d_sortcopy, d_sortcopy+count,
[] __device__ (custring_view* lhs, custring_view* rhs) {
if( lhs==0 || rhs==0 )
return lhs==rhs;
return lhs->compare(*rhs)==0;
});
stats.unique_strings = (size_t)(nend - d_sortcopy);
}
// histogram the characters
{
unsigned int uset_count = 0x010000;
rmm::device_vector<unsigned int> charset(uset_count,0);
unsigned int* d_charset = charset.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_charset] __device__ (unsigned int idx) {
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
for( auto itr = dstr->begin(); itr != dstr->end(); itr++ )
{
unsigned int uni = u82u(*itr);
if( uni <= 0x00FFFF )
atomicAdd(&(d_charset[uni]),1);
}
});
rmm::device_vector<thrust::pair<unsigned int, unsigned int> > charcounts(uset_count);
thrust::pair<unsigned int,unsigned int>* d_charcounts = charcounts.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), uset_count,
[d_charset, d_charcounts] __device__ (unsigned int idx) {
unsigned int val = d_charset[idx];
if( val )
{
d_charcounts[idx].first = u2u8(idx);
d_charcounts[idx].second = val;
}
else
{
d_charcounts[idx].first = 0;
d_charcounts[idx].second = 0;
}
});
auto nend = thrust::remove_if(execpol->on(0), d_charcounts, d_charcounts + uset_count,
[] __device__ (thrust::pair<unsigned int,unsigned int> cc) { return cc.first==0; });
// allocate host memory
size_t elems = (size_t)(nend - d_charcounts);
std::vector<std::pair<unsigned int, unsigned int> > hcharcounts(elems);
// copy d_charcounts to host memory
hipMemcpy(hcharcounts.data(),d_charcounts,elems*sizeof(std::pair<unsigned int,unsigned int>),hipMemcpyDeviceToHost);
// copy hcharcounts to stats.char_counts;
stats.char_counts.reserve(uset_count);
stats.char_counts.swap(hcharcounts);
}
}
|
f0cc873c5f88acc559f4fa96a8c1750df0f930ea.cu
|
/*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <exception>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/for_each.h>
#include <thrust/extrema.h>
#include <thrust/count.h>
#include <thrust/remove.h>
#include <thrust/sort.h>
#include <thrust/unique.h>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include "NVStrings.h"
#include "NVStringsImpl.h"
#include "ipc_transfer.h"
#include "custring_view.cuh"
#include "StringsStatistics.h"
#include "unicode/is_flags.h"
#include "util.h"
#ifdef __INTELLISENSE__
unsigned int atomicAdd(unsigned int* address, unsigned int val);
#endif
// ctor and dtor are private to control the memory allocation in a single shared-object module
NVStrings::NVStrings(unsigned int count)
{
pImpl = new NVStringsImpl(count);
}
NVStrings::NVStrings()
{
pImpl = new NVStringsImpl(0);
}
NVStrings::NVStrings(const NVStrings& strsIn)
{
NVStrings& strs = (NVStrings&)strsIn;
unsigned int count = strs.size();
pImpl = new NVStringsImpl(count);
if( count )
{
std::vector<NVStrings*> strslist;
strslist.push_back(&strs);
NVStrings_copy_strings(pImpl,strslist);
}
}
NVStrings& NVStrings::operator=(const NVStrings& strsIn)
{
delete pImpl;
NVStrings& strs = (NVStrings&)strsIn;
unsigned int count = strs.size();
pImpl = new NVStringsImpl(count);
if( count )
{
std::vector<NVStrings*> strslist;
strslist.push_back(&strs);
NVStrings_copy_strings(pImpl,strslist);
}
return *this;
}
NVStrings::~NVStrings()
{
delete pImpl;
}
NVStrings* NVStrings::create_from_array( const char** strs, unsigned int count)
{
NVStrings* rtn = new NVStrings(count);
if( count )
NVStrings_init_from_strings(rtn->pImpl,strs,count);
return rtn;
}
NVStrings* NVStrings::create_from_index(std::pair<const char*,size_t>* strs, unsigned int count, bool devmem, sorttype stype)
{
NVStrings* rtn = new NVStrings(count);
if( !count )
return rtn;
int rc = NVStrings_init_from_indexes(rtn->pImpl,strs,count,devmem,stype);
if( rc )
{
// cannot make any other CUDA calls if IllegalAddress error occurs
if( rc==(int)cudaErrorIllegalAddress )
throw std::invalid_argument("nvstrings::create_from_index bad_device_ptr");
else
{
delete rtn;
throw std::runtime_error("nvstrings::create_from_index runtime_error");
}
}
return rtn;
}
NVStrings* NVStrings::create_from_offsets(const char* strs, int count, const int* offsets, const unsigned char* nullbitmask, int nulls)
{
NVStrings* rtn = new NVStrings(count);
if( count )
NVStrings_init_from_offsets(rtn->pImpl,strs,count,offsets,nullbitmask,nulls);
return rtn;
}
NVStrings* NVStrings::create_from_strings( std::vector<NVStrings*> strs )
{
unsigned int count = 0;
for( auto itr=strs.begin(); itr!=strs.end(); itr++ )
count += (*itr)->size();
NVStrings* rtn = new NVStrings(count);
if( count )
NVStrings_copy_strings(rtn->pImpl,strs);
return rtn;
}
NVStrings* NVStrings::create_from_ipc( nvstrings_ipc_transfer& ipc )
{
unsigned count = ipc.count;
NVStrings* rtn = new NVStrings(count);
if( count==0 )
return rtn;
rtn->pImpl->setMemoryHandle(ipc.getMemoryPtr(),ipc.size);
custring_view_array strings = (custring_view_array)ipc.getStringsPtr();
// copy the pointers so they can be fixed up
cudaError_t err = cudaMemcpy(rtn->pImpl->getStringsPtr(),strings,count*sizeof(custring_view*),cudaMemcpyDeviceToDevice);
cudaIpcCloseMemHandle((void *) strings);
if( err!=cudaSuccess )
printCudaError(err,"nvs-create-ipc");
// fix up the pointers for this context
NVStrings_fixup_pointers(rtn->pImpl,ipc.base_address);
return rtn;
}
NVStrings* NVStrings::create_from_csv( const char* csvfile, unsigned int column, unsigned int lines, sorttype stype, bool nullIsEmpty)
{
unsigned int flags = nullIsEmpty ? CSV_NULL_IS_EMPTY : 0;
if( stype & NVStrings::length )
flags |= CSV_SORT_LENGTH;
if( stype & NVStrings::name )
flags |= CSV_SORT_NAME;
std::string fpath = csvfile;
return createFromCSV(fpath,column,lines,flags);
}
void NVStrings::destroy(NVStrings* inst)
{
delete inst;
}
size_t NVStrings::memsize() const
{
return pImpl->bufferSize;
}
NVStrings* NVStrings::copy()
{
unsigned int count = size();
NVStrings* rtn = new NVStrings(count);
if( count )
{
std::vector<NVStrings*> strslist;
strslist.push_back(this);
NVStrings_copy_strings(rtn->pImpl,strslist);
}
return rtn;
}
//
void NVStrings::print( int start, int end, int maxwidth, const char* delimiter )
{
unsigned int count = size();
if( end < 0 || end > count )
end = count;
if( start < 0 )
start = 0;
if( start >= end )
return;
count = end - start;
//
auto execpol = rmm::exec_policy(0);
custring_view** d_strings = pImpl->getStringsPtr();
rmm::device_vector<size_t> lens(count,0);
size_t* d_lens = lens.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(start), end,
[d_strings, start, maxwidth, d_lens] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
int len = dstr->size();
if( maxwidth > 0 )
len = dstr->byte_offset_for(maxwidth);
d_lens[idx-start] = len +1; // include null-terminator;
});
// allocate large device buffer to hold all the strings
size_t msize = thrust::reduce(execpol->on(0),lens.begin(),lens.end());
if( msize==0 )
{
printf("all %d strings are null\n",count);
return;
}
char* d_buffer = nullptr;
RMM_ALLOC(&d_buffer,msize,0);
// convert lengths to offsets
rmm::device_vector<size_t> offsets(count,0);
thrust::exclusive_scan(execpol->on(0),lens.begin(),lens.end(),offsets.begin());
size_t* d_offsets = offsets.data().get();
// copy strings into single buffer
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(start), end,
[d_strings, start, maxwidth, d_offsets, d_lens, d_buffer] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
size_t offset = d_offsets[idx-start];
char* optr = d_buffer + offset;
if( dstr )
{
dstr->copy(optr,maxwidth);
size_t len = d_lens[idx-start];
//memcpy(optr,dstr->data(),len-1);
*(optr+len-1) = 0;
}
});
//
//cudaDeviceSynchronize();
// copy strings to host
char* h_buffer = new char[msize];
cudaMemcpy(h_buffer, d_buffer, msize, cudaMemcpyDeviceToHost);
RMM_FREE(d_buffer,0);
// print strings to stdout
thrust::host_vector<custring_view*> h_strings(*(pImpl->pList)); // just for checking nulls
thrust::host_vector<size_t> h_lens(lens);
char* hstr = h_buffer;
for( int idx=0; idx < count; ++idx )
{
printf("%d:",idx);
if( !h_strings[idx] )
printf("<null>");
else
printf("[%s]",hstr);
printf("%s",delimiter);
hstr += h_lens[idx];
}
delete h_buffer;
}
//
int NVStrings::to_host(char** list, int start, int end)
{
unsigned int count = size();
if( end < 0 || end > count )
end = count;
if( start >= end )
return 0;
count = end - start;
// compute size of specified strings
auto execpol = rmm::exec_policy(0);
rmm::device_vector<size_t> lens(count,0);
size_t* d_lens = lens.data().get();
custring_view** d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(start), end,
[d_strings, start, d_lens] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
d_lens[idx-start] = dstr->size()+1; // include space for null terminator
});
cudaError_t err = cudaSuccess;
size_t msize = thrust::reduce(execpol->on(0),lens.begin(),lens.end());
if( msize==0 )
{
memset(list,0,count*sizeof(char*));
return 0; // every string is null so we are done
}
// allocate device memory to copy strings temporarily
char* d_buffer = nullptr;
rmmError_t rerr = RMM_ALLOC(&d_buffer,msize,0);
if( rerr != RMM_SUCCESS )
{
fprintf(stderr,"nvs-to_host: RM_ALLOC(%p,%lu)=%d\n", d_buffer,msize,(int)rerr);
//printCudaError(err);
return (int)err;
}
// convert lengths to offsets
rmm::device_vector<size_t> offsets(count,0);
thrust::exclusive_scan(execpol->on(0),lens.begin(),lens.end(),offsets.begin());
size_t* d_offsets = offsets.data().get();
// copy strings into temporary buffer
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(start), end,
[d_strings, start, d_offsets, d_buffer] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
size_t offset = d_offsets[idx-start];
char* optr = d_buffer + offset;
if( dstr )
{
int len = dstr->size();
memcpy(optr,dstr->data(),len);
*(optr + len) = 0;
}
});
//
//err = cudaDeviceSynchronize();
//if( err != cudaSuccess )
//{
// printCudaError(err,"nvs-to_host: copying strings device to device");
// RMM_FREE(d_buffer,0);
// return (int)err;
//}
// copy strings to host
char* h_buffer = new char[msize];
err = cudaMemcpy(h_buffer, d_buffer, msize, cudaMemcpyDeviceToHost);
RMM_FREE(d_buffer,0); // done with device buffer
if( err != cudaSuccess )
{
printCudaError(err, "nvs-to_host: copying strings device to host");
delete h_buffer;
return (int)err;
}
// Deserialization host memory to memory provided by the caller
thrust::host_vector<custring_view*> h_strings(*(pImpl->pList)); // just for checking nulls
thrust::host_vector<size_t> h_offsets(offsets);
h_offsets.push_back(msize); // include size as last offset
for( unsigned int idx=0; idx < count; ++idx )
{
if( h_strings[idx]==0 )
{
list[idx] = 0;
continue;
}
size_t offset = h_offsets[idx];
size_t len = h_offsets[idx+1] - offset;
const char* p_data = h_buffer + offset;
//char* h_data = new char[len]; // make memory on the host
//h_data[len-1] = 0; // null terminate for the caller
//memcpy(h_data, p_data, len-1);
//list[idx] = h_data;
if( list[idx] )
memcpy(list[idx], p_data, len-1);
}
delete h_buffer;
return 0;
}
// build a string-index from this instances strings
int NVStrings::create_index(std::pair<const char*,size_t>* strs, bool bdevmem )
{
unsigned int count = size();
if( count==0 )
return 0;
auto execpol = rmm::exec_policy(0);
rmm::device_vector< thrust::pair<const char*,size_t> > indexes(count);
thrust::pair<const char*,size_t>* d_indexes = indexes.data().get();
custring_view_array d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_indexes] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
{
d_indexes[idx].first = (const char*)dstr->data();
d_indexes[idx].second = (size_t)dstr->size();
}
else
{
d_indexes[idx].first = nullptr;
d_indexes[idx].second = 0;
}
});
cudaError_t err = cudaSuccess; //cudaDeviceSynchronize();
if( bdevmem )
err = cudaMemcpy( strs, indexes.data().get(), count * sizeof(std::pair<const char*,size_t>), cudaMemcpyDeviceToDevice );
else
err = cudaMemcpy( strs, indexes.data().get(), count * sizeof(std::pair<const char*,size_t>), cudaMemcpyDeviceToHost );
if( err != cudaSuccess )
{
printCudaError(err,"nvs-create_index");
return (int)err;
}
return 0;
}
//
int NVStrings::create_custring_index( custring_view** strs, bool bdevmem )
{
unsigned int count = size();
if( count==0 )
return 0;
custring_view_array d_strings = pImpl->getStringsPtr();
if( bdevmem )
cudaMemcpy( strs, d_strings, count * sizeof(custring_view*), cudaMemcpyDeviceToDevice );
else
cudaMemcpy( strs, d_strings, count * sizeof(custring_view*), cudaMemcpyDeviceToHost );
return 0;
}
// copy strings into memory provided
int NVStrings::create_offsets( char* strs, int* offsets, unsigned char* nullbitmask, bool bdevmem )
{
unsigned int count = size();
if( count==0 )
return 0;
if( strs==0 || offsets==0 )
return 0;
auto execpol = rmm::exec_policy(0);
// first compute offsets/nullbitmask
int* d_offsets = offsets;
unsigned char* d_nulls = nullbitmask;
if( !bdevmem )
{
RMM_ALLOC(&d_offsets,(count+1)*sizeof(int),0);
if( nullbitmask )
{
RMM_ALLOC(&d_nulls,((count+7)/8)*sizeof(unsigned char),0);
cudaMemset(d_nulls,0,((count+7)/8));
}
}
//
rmm::device_vector<int> sizes(count+1,0);
int* d_sizes = sizes.data().get();
custring_view** d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_sizes] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
d_sizes[idx] = (int)dstr->size();
});
// ^^^-- these two for-each-n's can likely be combined --vvv
if( d_nulls )
{
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), (count+7)/8,
[d_strings, count, d_nulls] __device__(unsigned int idx){
unsigned int ndx = idx * 8;
unsigned char nb = 0;
for( int i=0; i<8; ++i )
{
nb = nb >> 1;
if( ndx+i < count )
{
custring_view* dstr = d_strings[ndx+i];
if( dstr )
nb |= 128;
}
}
d_nulls[idx] = nb;
});
}
//
thrust::exclusive_scan(execpol->on(0),d_sizes, d_sizes+(count+1), d_offsets);
// compute/allocate of memory
size_t totalbytes = thrust::reduce(execpol->on(0), d_sizes, d_sizes+count);
char* d_strs = strs;
if( !bdevmem )
RMM_ALLOC(&d_strs,totalbytes,0);
// shuffle strings into memory
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_strs, d_offsets] __device__(unsigned int idx){
char* buffer = d_strs + d_offsets[idx];
custring_view* dstr = d_strings[idx];
if( dstr )
memcpy(buffer,dstr->data(),dstr->size());
});
// copy memory to parameters (if necessary)
if( !bdevmem )
{
cudaMemcpy(offsets,d_offsets,(count+1)*sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(strs,d_strs,totalbytes,cudaMemcpyDeviceToHost);
if( nullbitmask )
cudaMemcpy(nullbitmask,d_nulls,((count+7)/8)*sizeof(unsigned char),cudaMemcpyDeviceToHost);
}
return 0;
}
int NVStrings::create_ipc_transfer( nvstrings_ipc_transfer& ipc )
{
ipc.setStrsHandle(pImpl->getStringsPtr(),pImpl->getMemoryPtr(),size());
ipc.setMemHandle(pImpl->getMemoryPtr(),pImpl->getMemorySize());
return 0;
}
// fills in a bitarray with 0 for null values and 1 for non-null values
// if emptyIsNull=true, empty strings will have bit values of 0 as well
unsigned int NVStrings::set_null_bitarray( unsigned char* bitarray, bool emptyIsNull, bool devmem )
{
unsigned int count = size();
if( count==0 )
return 0;
auto execpol = rmm::exec_policy(0);
unsigned int size = (count + 7)/8; // round up to byte align
unsigned char* d_bitarray = bitarray;
if( !devmem )
RMM_ALLOC(&d_bitarray,size,0);
// count nulls in range for return value
custring_view** d_strings = pImpl->getStringsPtr();
unsigned int ncount = thrust::count_if(execpol->on(0), d_strings, d_strings + count,
[emptyIsNull] __device__ (custring_view*& dstr) { return (dstr==0) || (emptyIsNull && !dstr->size()); });
// fill in the bitarray
// the bitmask is in arrow format which means for each byte
// the null indicator is in bit position right-to-left: 76543210
// logic sets the high-bit and shifts to the right
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), size,
[d_strings, count, emptyIsNull, d_bitarray] __device__(unsigned int byteIdx){
unsigned char byte = 0; // set one byte per thread -- init to all nulls
for( unsigned int i=0; i < 8; ++i )
{
unsigned int idx = i + (byteIdx*8); // compute d_strings index
byte = byte >> 1; // shift until we are done
if( idx < count ) // check boundary
{
custring_view* dstr = d_strings[idx];
if( dstr && (!emptyIsNull || dstr->size()) )
byte |= 128; // string is not null, set high bit
}
}
d_bitarray[byteIdx] = byte;
});
//
//cudaError_t err = cudaDeviceSynchronize();
//if( err != cudaSuccess )
//{
// fprintf(stderr,"nvs-set_null_bitarray(%p,%d,%d) size=%u\n",bitarray,(int)emptyIsNull,(int)devmem,count);
// printCudaError(err);
//}
//
if( !devmem )
{
cudaMemcpy(bitarray,d_bitarray,size,cudaMemcpyDeviceToHost);
RMM_FREE(d_bitarray,0);
}
return ncount;
}
// set int array with position of null strings
unsigned int NVStrings::get_nulls( unsigned int* array, bool emptyIsNull, bool devmem )
{
unsigned int count = size();
if( count==0 )
return 0;
auto execpol = rmm::exec_policy(0);
rmm::device_vector<int> darray(count,-1);
int* d_array = darray.data().get();
custring_view** d_strings = pImpl->getStringsPtr();
//unsigned int ncount = thrust::count_if(execpol->on(0), d_strings, d_strings + count,
// [emptyIsNull] __device__ (custring_view*& dstr) { return (dstr==0) || (emptyIsNull && !dstr->size()); });
// fill in the array
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, emptyIsNull, d_array] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr && (!emptyIsNull || dstr->size()) )
d_array[idx] = -1; // not null
else
d_array[idx] = idx; // null
});
//
//cudaError_t err = cudaDeviceSynchronize();
//if( err != cudaSuccess )
//{
// fprintf(stderr,"nvs-get_nulls(%p,%d,%d) size=%u\n",array,(int)emptyIsNull,(int)devmem,count);
// printCudaError(err);
//}
// compact out the negative values
int* newend = thrust::remove_if(execpol->on(0), d_array, d_array + count, [] __device__ (int val) {return val<0;});
unsigned int ncount = (unsigned int)(newend - d_array);
//
cudaError_t err = cudaSuccess;
if( array )
{
if( devmem )
err = cudaMemcpy(array,d_array,sizeof(int)*ncount,cudaMemcpyDeviceToDevice);
else
err = cudaMemcpy(array,d_array,sizeof(int)*ncount,cudaMemcpyDeviceToHost);
}
if( err != cudaSuccess )
{
fprintf(stderr,"nvs-get_nulls(%p,%d,%d) size=%u\n",array,(int)emptyIsNull,(int)devmem,count);
printCudaError(err);
}
return ncount;
}
// number of strings in this instance
unsigned int NVStrings::size() const
{
return (unsigned int)pImpl->pList->size();
}
struct statistics_attrs
{
custring_view_array d_strings;
unsigned char* d_flags;
size_t* d_values;
unsigned int d_mask;
statistics_attrs( custring_view_array strings, unsigned char* flags, size_t* values, unsigned int mask )
: d_strings(strings), d_flags(flags), d_values(values), d_mask(mask) {}
__device__ void operator()(unsigned int idx)
{
custring_view* dstr = d_strings[idx];
size_t spaces = 0;
if( dstr )
{
for( auto itr = dstr->begin(); itr != dstr->end(); itr++ )
{
unsigned int uni = u82u(*itr);
unsigned int flg = uni <= 0x00FFFF ? d_flags[uni] : 0;
spaces += (size_t)((flg & d_mask)>0);
}
}
d_values[idx] = spaces;
}
};
void NVStrings::compute_statistics(StringsStatistics& stats)
{
unsigned int count = size();
memset((void*)&stats,0,sizeof(stats));
if( count==0 )
return;
stats.total_strings = count;
auto execpol = rmm::exec_policy(0);
size_t stringsmem = pImpl->getMemorySize();
size_t ptrsmem = pImpl->pList->size() * sizeof(custring_view*);
stats.total_memory = stringsmem + ptrsmem;
custring_view_array d_strings = pImpl->getStringsPtr();
rmm::device_vector<size_t> values(count,0);
size_t* d_values = values.data().get();
// bytes
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_values] __device__ (unsigned int idx) {
custring_view* dstr = d_strings[idx];
d_values[idx] = dstr ? dstr->size() : 0;
});
stats.bytes_max = *thrust::max_element(execpol->on(0), values.begin(), values.end());
stats.bytes_min = *thrust::min_element(execpol->on(0), values.begin(), values.end());
stats.total_bytes = thrust::reduce(execpol->on(0), values.begin(), values.end());
stats.bytes_avg = stats.total_bytes / count;
// chars
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_values] __device__ (unsigned int idx) {
custring_view* dstr = d_strings[idx];
d_values[idx] = dstr ? dstr->chars_count() : 0;
});
stats.chars_max = *thrust::max_element(execpol->on(0), values.begin(), values.end());
stats.chars_min = *thrust::min_element(execpol->on(0), values.begin(), values.end());
stats.total_chars = thrust::reduce(execpol->on(0), values.begin(), values.end());
stats.chars_avg = stats.total_bytes / count;
// memory
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_values] __device__ (unsigned int idx) {
custring_view* dstr = d_strings[idx];
d_values[idx] = dstr ? dstr->alloc_size() : 0;
});
stats.mem_max = *thrust::max_element(execpol->on(0), values.begin(), values.end());
stats.mem_min = *thrust::min_element(execpol->on(0), values.begin(), values.end());
size_t mem_total = thrust::reduce(execpol->on(0), values.begin(), values.end());
stats.mem_avg = mem_total / count;
// attrs
unsigned char* d_flags = get_unicode_flags();
// spaces
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
statistics_attrs(d_strings, d_flags, d_values, 16));
stats.whitespace_count = thrust::reduce(execpol->on(0), values.begin(), values.end());
// digits
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
statistics_attrs(d_strings, d_flags, d_values, 4));
stats.digits_count = thrust::reduce(execpol->on(0), values.begin(), values.end());
// uppercase
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
statistics_attrs(d_strings, d_flags, d_values, 32));
stats.uppercase_count = thrust::reduce(execpol->on(0), values.begin(), values.end());
// lowercase
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
statistics_attrs(d_strings, d_flags, d_values, 64));
stats.lowercase_count = thrust::reduce(execpol->on(0), values.begin(), values.end());
// count strings
stats.total_nulls = thrust::count_if(execpol->on(0), d_strings, d_strings + count,
[] __device__ (custring_view* dstr) { return dstr==0; });
stats.total_empty = thrust::count_if(execpol->on(0), d_strings, d_strings + count,
[] __device__ (custring_view* dstr) { return dstr && dstr->empty(); });
// unique strings
{
// make a copy of the pointers so we can sort them
rmm::device_vector<custring_view*> sortcopy(*(pImpl->pList));
custring_view_array d_sortcopy = sortcopy.data().get();
thrust::sort(execpol->on(0), d_sortcopy, d_sortcopy+count,
[] __device__ (custring_view*& lhs, custring_view*& rhs) {
return (lhs && rhs) ? (lhs->compare(*rhs) < 0): rhs!=0;
});
auto nend = thrust::unique(execpol->on(0), d_sortcopy, d_sortcopy+count,
[] __device__ (custring_view* lhs, custring_view* rhs) {
if( lhs==0 || rhs==0 )
return lhs==rhs;
return lhs->compare(*rhs)==0;
});
stats.unique_strings = (size_t)(nend - d_sortcopy);
}
// histogram the characters
{
unsigned int uset_count = 0x010000;
rmm::device_vector<unsigned int> charset(uset_count,0);
unsigned int* d_charset = charset.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_charset] __device__ (unsigned int idx) {
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
for( auto itr = dstr->begin(); itr != dstr->end(); itr++ )
{
unsigned int uni = u82u(*itr);
if( uni <= 0x00FFFF )
atomicAdd(&(d_charset[uni]),1);
}
});
rmm::device_vector<thrust::pair<unsigned int, unsigned int> > charcounts(uset_count);
thrust::pair<unsigned int,unsigned int>* d_charcounts = charcounts.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), uset_count,
[d_charset, d_charcounts] __device__ (unsigned int idx) {
unsigned int val = d_charset[idx];
if( val )
{
d_charcounts[idx].first = u2u8(idx);
d_charcounts[idx].second = val;
}
else
{
d_charcounts[idx].first = 0;
d_charcounts[idx].second = 0;
}
});
auto nend = thrust::remove_if(execpol->on(0), d_charcounts, d_charcounts + uset_count,
[] __device__ (thrust::pair<unsigned int,unsigned int> cc) { return cc.first==0; });
// allocate host memory
size_t elems = (size_t)(nend - d_charcounts);
std::vector<std::pair<unsigned int, unsigned int> > hcharcounts(elems);
// copy d_charcounts to host memory
cudaMemcpy(hcharcounts.data(),d_charcounts,elems*sizeof(std::pair<unsigned int,unsigned int>),cudaMemcpyDeviceToHost);
// copy hcharcounts to stats.char_counts;
stats.char_counts.reserve(uset_count);
stats.char_counts.swap(hcharcounts);
}
}
|
ded0b9ec13be8620d5ae10b267bd66d53f8b4c5b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <random>
#include <ctime>
#define MATRIX_SIZE 900
#define MATRIX_MAX_NUMBER 15
#define BLOCK_SIZE 100
typedef double matrix[MATRIX_SIZE+1][MATRIX_SIZE+1];
matrix A;
matrix A_GPUresult;
double b[MATRIX_SIZE];
//double b[MATRIX_SIZE];
//double y[MATRIX_SIZE];
__host__
void generateMatrix();
//__host__
//void generateVectors();
__host__
void printMatrix(matrix mat);
__host__
void solveOnCPU();
__host__
bool solveOnGPU();
__host__
void generateMatrix()
{
for (int i = 0; i < MATRIX_SIZE; i++)
{
for (int j = 0; j < MATRIX_SIZE+1; j++)
{
if(i == j)
A[i][j] = (double)(rand() % MATRIX_MAX_NUMBER) + 5.0;
else
A[i][j] = (double)(rand() % MATRIX_MAX_NUMBER) + 1.0;
}
}
}
//__host__
//void generateVectors()
//{
// for (int i = 0; i < MATRIX_SIZE; i++)
// {
// b[i] = 2.0;
// y[i] = 1.0;
// }
//}
__host__
void printMatrix(matrix mat)
{
for (int i = 0; i < MATRIX_SIZE + 1; i++)
{
std::cout << "[";
for (int j = 0; j < MATRIX_SIZE; j++)
std::cout << " " << mat[i][j] << ",";
std::cout << " " << mat[i][MATRIX_SIZE] << " ]\n";
}
}
__host__
void solveOnCPU()
{
// k = Will need to be seperate kernel calls
// The integer k would fit PERFECTLY as a symbol/constant
// j = To be replaced with threads in parallell
// i = Amount of work each thread needs to to per row
// *********************
// FIRST KERNEL : Solve the bottom triangle
// *********************
// k = The row being processed at the moment
// Let's call it the "selected row"
for (int k = 0; k < MATRIX_SIZE; k++)
{
// *********************
// FIRST STEP : Dividing the selected row
// *********************
// temp = The leftmost element that we will be dividing the entire row with
// If an entire MATRIX_SIZE can fit in a block, tempDiv does not have to be
// allocated in Shared Memory. Otherwise, we got to.
/*SHARED*/ double temp = A[k][k];
// selectedRow = pointer to the selected row. This will be stored in Shared Memory
// once implemented as parallell code.
/*SHARED*/ double *selectedRow = A[k];
// j = selecting each element on the row
// j = k : No point processing elements before k; they're 0
for (int j = k; j < MATRIX_SIZE + 1; j++)
{
selectedRow[j] /= temp;
}
// There is a big risk that A[k][k] doesn't become 1, which would be very troublesome
// *********************
// SECOND STEP : Subtract all other rows!
// *********************
// i = Row we want to do subtraction on at the moment
// i = k + 1 : do all rows underneath the selected row
for (int i = k + 1; i < MATRIX_SIZE; i++)
{
// temp = the leftmost element (that isn't a 0)
temp = A[i][k];
// j = selecting each element on the row
for (int j = k; j < MATRIX_SIZE + 1; j++)
{
A[i][j] -= selectedRow[j] * temp;
}
}
}
// Now the bottom half the matrix is solved
//printMatrix();
// This is where sequential and parallell split up implementation wise
// In parallell I'm intending on transposing the matrix in the first part
// to make use of memory bursting.
// In sequential, this does probably not speed up the implementation,
// it would rather slow it down because of uncessesary memory writing.
// *********************
// SECOND KERNEL : Solve the top triangle
// *********************
// j = What column we're on
// j = MATRIX_SIZE - 1 : to start from the column rightmost (not the vector column)
// j > 0 : Don't do the one leftmost, it's just 1/1
for (int j = MATRIX_SIZE - 1; j > 0; j--)
{
// i = What row we're on
// i = MATRIX_SIZE - 2 : (MATRIX_SIZE - 1) to start from the row lowest down,
// then an extra -1 because we already have the solution for the bottom row.
for (int i = 0; i < j; i++)
{
/*std::cout << "i: " << i << ", j: " << j << std::endl;
for (int ii = 0; ii < MATRIX_SIZE; ii++)
{
std::cout << "[ ";
for (int jj = 0; jj < MATRIX_SIZE+1; jj++)
{
if (ii == i && jj == MATRIX_SIZE)
std::cout << "T ";
else if (ii == j && jj == MATRIX_SIZE)
std::cout << "A ";
else if (ii == i && jj == j)
std::cout << "M ";
else
std::cout << "- ";
}
std::cout << "]\n";
}
std::cout << std::endl;*/
A[i][MATRIX_SIZE] -= A[j][MATRIX_SIZE] * A[i][j];
}
}
//std::cout << std::endl;
//for (int i = 0; i < MATRIX_SIZE; i++)
//{
// b[i] = A[i][MATRIX_SIZE];
// //std::cout << b[i] << ", ";
//}
//std::cout << std::endl;
}
int main()
{
srand((unsigned int)time(NULL));
int totalFail = 0;
for (int j = 0; j < 1000; j++)
{
generateMatrix();
//generateVectors();
//printMatrix(A);
//std::cout << std::endl << std::endl;
solveOnGPU();
//printMatrix(A_GPUresult);
//std::cout << std::endl << std::endl;
solveOnCPU();
//printMatrix(A);
int fail = 0;
for (int i = 0; i < MATRIX_SIZE; i++)
{
if (abs(A_GPUresult[MATRIX_SIZE][i] - A[i][MATRIX_SIZE]) > 0.01)
{
//std::cout << "FAIL\n" << A_GPUresult[MATRIX_SIZE][i] << " : " << A[i][MATRIX_SIZE] << std::endl;
fail++;
}
}
//std::cout << "\nTotal Fail: " << fail << std::endl;
if (fail != 0)
std::cout << "@";
else
std::cout << ".";
totalFail += fail;
}
std::cout << "\nAll together: " << totalFail;
//std::cout << std::endl << std::endl
// << "GPU: ";
//for (int i = 0; i < MATRIX_SIZE; i++)
//{
// std::cout << A_GPUresult[MATRIX_SIZE][i] << ", ";
//}
//std::cout << std::endl
// << "CPU: ";
//for (int i = 0; i < MATRIX_SIZE; i++)
//{
// std::cout << A[i][MATRIX_SIZE] << ", ";
//}
//std::cout << std::endl << std::endl;
//printMatrix();
return 0;
}
__constant__ int k;
__global__
void gpuSolveBottom(matrix d_A)
{
int j = (blockIdx.x * blockDim.x + threadIdx.x) + k;
__shared__ double temp;
temp = d_A[k][k];
double selectedRow = d_A[k][j] / temp;
__syncthreads();
for (int i = k + 1; i < MATRIX_SIZE; i++)
{
temp = d_A[i][k]; // Load the entire thing directly?
//d_A[i][j] = k;
d_A[i][j] -= selectedRow * temp;
__syncthreads();
}
d_A[j][k] = selectedRow;
//d_A[k][j] = selectedRow;
}
__global__
void gpuSolveTop(matrix d_A)
{
int i = (blockIdx.x * blockDim.x + threadIdx.x);
for (int j = MATRIX_SIZE - 1; j > 0; j--)
{
if (i < j)
{
d_A[MATRIX_SIZE][i] -= d_A[MATRIX_SIZE][j] * d_A[j][i];
__syncthreads();
}
}
}
__host__
bool solveOnGPU()
{
hipError_t cudaStatus;
matrix* d_A;
//int *d_k;
int sizeOfMatrix = (MATRIX_SIZE + 1) * (MATRIX_SIZE + 1) * sizeof(double);
cudaStatus = hipMalloc((void**)&d_A, sizeOfMatrix);
if (cudaStatus != hipSuccess)
{
std::cerr << "hipMalloc failed on d_A!\n";
goto Error;
}
//cudaStatus = hipMalloc((void**)&d_k, sizeof(int));
//if (cudaStatus != hipSuccess)
//{
// std::cerr << "hipMalloc failed on d_k!\n";
// goto Error;
//}
cudaStatus = hipMemcpy(d_A, A, sizeOfMatrix, hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess)
{
std::cerr << "hipMemcpy failed!\n" << hipGetErrorString(cudaStatus) << std::endl;
goto Error;
}
//cudaStatus = hipMemcpy(d_k, &k, sizeof(int), hipMemcpyHostToDevice);
//if (cudaStatus != hipSuccess)
//{
// std::cerr << "hipMemcpy failed!\n";
// goto Error;
//}
// *******************
// KERNEL CALLS GOES HERE!
// *******************
for (int i = 0; i < MATRIX_SIZE; i++)
{
cudaStatus = hipMemcpyToSymbol(k, &i, sizeof(int));
if (cudaStatus != hipSuccess)
{
std::cerr << "hipMemcpyToSymbol failed at iteration "<< k <<"!\n";
goto Error;
}
hipLaunchKernelGGL(( gpuSolveBottom), dim3(1), dim3(MATRIX_SIZE+1-i), 0, 0, *d_A);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess)
{
std::cerr << "gpuSortEven kernel call failed at iteration " << k << "!\n"
<< hipGetErrorString(cudaStatus) << std::endl;
goto Error;
}
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess)
{
std::cerr << "hipDeviceSynchronize returned error code " << cudaStatus << " after launching gpuSolveBottom!\n";
goto Error;
}
}
gpuSolveTop<<<1, MATRIX_SIZE>> > (*d_A);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess)
{
std::cerr << "gpuSortEven kernel call failed at iteration " << k << "!\n"
<< hipGetErrorString(cudaStatus) << std::endl;
goto Error;
}
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess)
{
std::cerr << "hipDeviceSynchronize returned error code " << cudaStatus << " after launching addKernel!\n";
goto Error;
}
//cudaStatus = hipMemcpy(b, *d_A + MATRIX_SIZE, (MATRIX_SIZE) * sizeof(double), hipMemcpyDeviceToHost);
cudaStatus = hipMemcpy(A_GPUresult, d_A, sizeOfMatrix, hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess)
{
std::cerr << "hipMemcpy (hipMemcpyDeviceToHost) failed!\n";
goto Error;
}
Error:
hipFree(d_A);
return false;
}
|
ded0b9ec13be8620d5ae10b267bd66d53f8b4c5b.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <random>
#include <ctime>
#define MATRIX_SIZE 900
#define MATRIX_MAX_NUMBER 15
#define BLOCK_SIZE 100
typedef double matrix[MATRIX_SIZE+1][MATRIX_SIZE+1];
matrix A;
matrix A_GPUresult;
double b[MATRIX_SIZE];
//double b[MATRIX_SIZE];
//double y[MATRIX_SIZE];
__host__
void generateMatrix();
//__host__
//void generateVectors();
__host__
void printMatrix(matrix mat);
__host__
void solveOnCPU();
__host__
bool solveOnGPU();
__host__
void generateMatrix()
{
for (int i = 0; i < MATRIX_SIZE; i++)
{
for (int j = 0; j < MATRIX_SIZE+1; j++)
{
if(i == j)
A[i][j] = (double)(rand() % MATRIX_MAX_NUMBER) + 5.0;
else
A[i][j] = (double)(rand() % MATRIX_MAX_NUMBER) + 1.0;
}
}
}
//__host__
//void generateVectors()
//{
// for (int i = 0; i < MATRIX_SIZE; i++)
// {
// b[i] = 2.0;
// y[i] = 1.0;
// }
//}
__host__
void printMatrix(matrix mat)
{
for (int i = 0; i < MATRIX_SIZE + 1; i++)
{
std::cout << "[";
for (int j = 0; j < MATRIX_SIZE; j++)
std::cout << " " << mat[i][j] << ",";
std::cout << " " << mat[i][MATRIX_SIZE] << " ]\n";
}
}
__host__
void solveOnCPU()
{
// k = Will need to be seperate kernel calls
// The integer k would fit PERFECTLY as a symbol/constant
// j = To be replaced with threads in parallell
// i = Amount of work each thread needs to to per row
// *********************
// FIRST KERNEL : Solve the bottom triangle
// *********************
// k = The row being processed at the moment
// Let's call it the "selected row"
for (int k = 0; k < MATRIX_SIZE; k++)
{
// *********************
// FIRST STEP : Dividing the selected row
// *********************
// temp = The leftmost element that we will be dividing the entire row with
// If an entire MATRIX_SIZE can fit in a block, tempDiv does not have to be
// allocated in Shared Memory. Otherwise, we got to.
/*SHARED*/ double temp = A[k][k];
// selectedRow = pointer to the selected row. This will be stored in Shared Memory
// once implemented as parallell code.
/*SHARED*/ double *selectedRow = A[k];
// j = selecting each element on the row
// j = k : No point processing elements before k; they're 0
for (int j = k; j < MATRIX_SIZE + 1; j++)
{
selectedRow[j] /= temp;
}
// There is a big risk that A[k][k] doesn't become 1, which would be very troublesome
// *********************
// SECOND STEP : Subtract all other rows!
// *********************
// i = Row we want to do subtraction on at the moment
// i = k + 1 : do all rows underneath the selected row
for (int i = k + 1; i < MATRIX_SIZE; i++)
{
// temp = the leftmost element (that isn't a 0)
temp = A[i][k];
// j = selecting each element on the row
for (int j = k; j < MATRIX_SIZE + 1; j++)
{
A[i][j] -= selectedRow[j] * temp;
}
}
}
// Now the bottom half the matrix is solved
//printMatrix();
// This is where sequential and parallell split up implementation wise
// In parallell I'm intending on transposing the matrix in the first part
// to make use of memory bursting.
// In sequential, this does probably not speed up the implementation,
// it would rather slow it down because of uncessesary memory writing.
// *********************
// SECOND KERNEL : Solve the top triangle
// *********************
// j = What column we're on
// j = MATRIX_SIZE - 1 : to start from the column rightmost (not the vector column)
// j > 0 : Don't do the one leftmost, it's just 1/1
for (int j = MATRIX_SIZE - 1; j > 0; j--)
{
// i = What row we're on
// i = MATRIX_SIZE - 2 : (MATRIX_SIZE - 1) to start from the row lowest down,
// then an extra -1 because we already have the solution for the bottom row.
for (int i = 0; i < j; i++)
{
/*std::cout << "i: " << i << ", j: " << j << std::endl;
for (int ii = 0; ii < MATRIX_SIZE; ii++)
{
std::cout << "[ ";
for (int jj = 0; jj < MATRIX_SIZE+1; jj++)
{
if (ii == i && jj == MATRIX_SIZE)
std::cout << "T ";
else if (ii == j && jj == MATRIX_SIZE)
std::cout << "A ";
else if (ii == i && jj == j)
std::cout << "M ";
else
std::cout << "- ";
}
std::cout << "]\n";
}
std::cout << std::endl;*/
A[i][MATRIX_SIZE] -= A[j][MATRIX_SIZE] * A[i][j];
}
}
//std::cout << std::endl;
//for (int i = 0; i < MATRIX_SIZE; i++)
//{
// b[i] = A[i][MATRIX_SIZE];
// //std::cout << b[i] << ", ";
//}
//std::cout << std::endl;
}
int main()
{
srand((unsigned int)time(NULL));
int totalFail = 0;
for (int j = 0; j < 1000; j++)
{
generateMatrix();
//generateVectors();
//printMatrix(A);
//std::cout << std::endl << std::endl;
solveOnGPU();
//printMatrix(A_GPUresult);
//std::cout << std::endl << std::endl;
solveOnCPU();
//printMatrix(A);
int fail = 0;
for (int i = 0; i < MATRIX_SIZE; i++)
{
if (abs(A_GPUresult[MATRIX_SIZE][i] - A[i][MATRIX_SIZE]) > 0.01)
{
//std::cout << "FAIL\n" << A_GPUresult[MATRIX_SIZE][i] << " : " << A[i][MATRIX_SIZE] << std::endl;
fail++;
}
}
//std::cout << "\nTotal Fail: " << fail << std::endl;
if (fail != 0)
std::cout << "@";
else
std::cout << ".";
totalFail += fail;
}
std::cout << "\nAll together: " << totalFail;
//std::cout << std::endl << std::endl
// << "GPU: ";
//for (int i = 0; i < MATRIX_SIZE; i++)
//{
// std::cout << A_GPUresult[MATRIX_SIZE][i] << ", ";
//}
//std::cout << std::endl
// << "CPU: ";
//for (int i = 0; i < MATRIX_SIZE; i++)
//{
// std::cout << A[i][MATRIX_SIZE] << ", ";
//}
//std::cout << std::endl << std::endl;
//printMatrix();
return 0;
}
__constant__ int k;
__global__
void gpuSolveBottom(matrix d_A)
{
int j = (blockIdx.x * blockDim.x + threadIdx.x) + k;
__shared__ double temp;
temp = d_A[k][k];
double selectedRow = d_A[k][j] / temp;
__syncthreads();
for (int i = k + 1; i < MATRIX_SIZE; i++)
{
temp = d_A[i][k]; // Load the entire thing directly?
//d_A[i][j] = k;
d_A[i][j] -= selectedRow * temp;
__syncthreads();
}
d_A[j][k] = selectedRow;
//d_A[k][j] = selectedRow;
}
__global__
void gpuSolveTop(matrix d_A)
{
int i = (blockIdx.x * blockDim.x + threadIdx.x);
for (int j = MATRIX_SIZE - 1; j > 0; j--)
{
if (i < j)
{
d_A[MATRIX_SIZE][i] -= d_A[MATRIX_SIZE][j] * d_A[j][i];
__syncthreads();
}
}
}
__host__
bool solveOnGPU()
{
cudaError_t cudaStatus;
matrix* d_A;
//int *d_k;
int sizeOfMatrix = (MATRIX_SIZE + 1) * (MATRIX_SIZE + 1) * sizeof(double);
cudaStatus = cudaMalloc((void**)&d_A, sizeOfMatrix);
if (cudaStatus != cudaSuccess)
{
std::cerr << "cudaMalloc failed on d_A!\n";
goto Error;
}
//cudaStatus = cudaMalloc((void**)&d_k, sizeof(int));
//if (cudaStatus != cudaSuccess)
//{
// std::cerr << "cudaMalloc failed on d_k!\n";
// goto Error;
//}
cudaStatus = cudaMemcpy(d_A, A, sizeOfMatrix, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess)
{
std::cerr << "cudaMemcpy failed!\n" << cudaGetErrorString(cudaStatus) << std::endl;
goto Error;
}
//cudaStatus = cudaMemcpy(d_k, &k, sizeof(int), cudaMemcpyHostToDevice);
//if (cudaStatus != cudaSuccess)
//{
// std::cerr << "cudaMemcpy failed!\n";
// goto Error;
//}
// *******************
// KERNEL CALLS GOES HERE!
// *******************
for (int i = 0; i < MATRIX_SIZE; i++)
{
cudaStatus = cudaMemcpyToSymbol(k, &i, sizeof(int));
if (cudaStatus != cudaSuccess)
{
std::cerr << "cudaMemcpyToSymbol failed at iteration "<< k <<"!\n";
goto Error;
}
gpuSolveBottom<<<1, MATRIX_SIZE+1-i>>>(*d_A);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess)
{
std::cerr << "gpuSortEven kernel call failed at iteration " << k << "!\n"
<< cudaGetErrorString(cudaStatus) << std::endl;
goto Error;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess)
{
std::cerr << "cudaDeviceSynchronize returned error code " << cudaStatus << " after launching gpuSolveBottom!\n";
goto Error;
}
}
gpuSolveTop<<<1, MATRIX_SIZE>> > (*d_A);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess)
{
std::cerr << "gpuSortEven kernel call failed at iteration " << k << "!\n"
<< cudaGetErrorString(cudaStatus) << std::endl;
goto Error;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess)
{
std::cerr << "cudaDeviceSynchronize returned error code " << cudaStatus << " after launching addKernel!\n";
goto Error;
}
//cudaStatus = cudaMemcpy(b, *d_A + MATRIX_SIZE, (MATRIX_SIZE) * sizeof(double), cudaMemcpyDeviceToHost);
cudaStatus = cudaMemcpy(A_GPUresult, d_A, sizeOfMatrix, cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess)
{
std::cerr << "cudaMemcpy (cudaMemcpyDeviceToHost) failed!\n";
goto Error;
}
Error:
cudaFree(d_A);
return false;
}
|
01b21b3c1140dc1518e151c2623a502f9489e3c9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "reduceVector.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *v1 = NULL;
hipMalloc(&v1, XSIZE*YSIZE);
float *v2 = NULL;
hipMalloc(&v2, XSIZE*YSIZE);
float *res = NULL;
hipMalloc(&res, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
reduceVector), dim3(gridBlock),dim3(threadBlock), 0, 0, v1,v2,res);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
reduceVector), dim3(gridBlock),dim3(threadBlock), 0, 0, v1,v2,res);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
reduceVector), dim3(gridBlock),dim3(threadBlock), 0, 0, v1,v2,res);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
01b21b3c1140dc1518e151c2623a502f9489e3c9.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "reduceVector.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *v1 = NULL;
cudaMalloc(&v1, XSIZE*YSIZE);
float *v2 = NULL;
cudaMalloc(&v2, XSIZE*YSIZE);
float *res = NULL;
cudaMalloc(&res, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
reduceVector<<<gridBlock,threadBlock>>>(v1,v2,res);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
reduceVector<<<gridBlock,threadBlock>>>(v1,v2,res);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
reduceVector<<<gridBlock,threadBlock>>>(v1,v2,res);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
fbadbd062ca83e24a1f2c7dfdf590b340e78020f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cudamat_kernels.cuh"
#include "float.h"
/* ------------------------- Random number generation ------------------------- */
__global__ void kSeedRandom(unsigned int* rndMults, unsigned long long* rndWords, unsigned int seed) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// The initial x is the seed and the initial carry is 1
unsigned long long rndWord = ((unsigned long long)seed << 32) + 1;
const unsigned int rndMult = rndMults[idx];
/*
* Run the chain for a few steps so that all the streams have a chance
* to differentiate. They start out generating similar random numbers
* because all the multipliers are similar.
*/
for(unsigned int i = 0; i < NUM_RND_BURNIN; i++) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
}
rndWords[idx] = rndWord;
}
__global__ void kRandomUniform(unsigned int* rndMults, unsigned long long* rndWords, float* gData, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
gData[i] = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
}
rndWords[idx] = rndWord;
}
__global__ void kRandomGaussian(unsigned int* rndMults, unsigned long long* rndWords, float* gData, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
float rnd1, rnd2, R, T;
for(unsigned int i = idx; i < numElements; i += 2*NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd1 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd2 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
T = 2 * PI * rnd2;
R = sqrtf(-2 * __logf(rnd1));
gData[i] = R * __cosf(T);
if (i + NUM_RND_STREAMS < numElements)
gData[i + NUM_RND_STREAMS] = R * __sinf(T);
}
rndWords[idx] = rndWord;
}
/* ------------------------- Data copying ------------------------- */
/*
Copy row slice from source to target. There is a block for every 32x32 chunk being copied.
*/
__global__ void kGetRowSlice(float* source, float* target, int start, int end, int width, int height) {
const int row = start + blockIdx.x * 32 + threadIdx.x;
const int start_col = blockIdx.y * 32;
const int end_col = (start_col + 32 < width) ? start_col + 32: width;
const int target_height = end - start;
if (row < end) {
for (int cur_col = start_col; cur_col < end_col; cur_col++)
target[cur_col * target_height + row - start] = source[cur_col * height + row];
}
}
__global__ void kSetRowSlice(float* source, float* target, int start, int end, int width, int height) {
const int row = start + blockIdx.x * 32 + threadIdx.x;
const int start_col = blockIdx.y * 32;
const int end_col = (start_col + 32 < width) ? start_col + 32: width;
const int source_height = end - start;
if (row < end) {
for (int cur_col = start_col; cur_col < end_col; cur_col++)
target[cur_col * height + row] = source[cur_col * source_height + row - start];
//source[cur_col * height + row - start] = target[cur_col * target_height + row];
}
}
__global__ void kTranspose(float *odata, float *idata, int width, int height) {
__shared__ float block[COPY_BLOCK_SIZE][COPY_BLOCK_SIZE+1];
// read the matrix tile into shared memory
unsigned int xIndex = blockIdx.x * COPY_BLOCK_SIZE + threadIdx.x;
unsigned int yIndex = blockIdx.y * COPY_BLOCK_SIZE + threadIdx.y;
if((xIndex < width) && (yIndex < height)) {
unsigned int index_in = yIndex * width + xIndex;
block[threadIdx.y][threadIdx.x] = idata[index_in];
}
__syncthreads();
// write the transposed matrix tile to global memory
xIndex = blockIdx.y * COPY_BLOCK_SIZE + threadIdx.x;
yIndex = blockIdx.x * COPY_BLOCK_SIZE + threadIdx.y;
if((xIndex < height) && (yIndex < width)) {
unsigned int index_out = yIndex * height + xIndex;
odata[index_out] = block[threadIdx.x][threadIdx.y];
}
}
/* ------------------------- Mathematical operations ------------------------- */
__global__ void kLessThan(float* mat1, float* mat2, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = mat1[i] < mat2[i];
}
}
__global__ void kLessThanScalar(float* mat, float val, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = mat[i] < val;
}
}
__global__ void kGreaterThan(float* mat1, float* mat2, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = mat1[i] > mat2[i];
}
}
__global__ void kGreaterThanScalar(float* mat, float val, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = mat[i] > val;
}
}
__global__ void kEquals(float* mat1, float* mat2, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = mat1[i] == mat2[i];
}
}
__global__ void kEqualsScalar(float* mat, float val, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = mat[i] == val;
}
}
__global__ void kSparseDot(int m, int n, int k, float *data, int* indptr, int* indices, float *dense_data, float* target, float beta, float alpha) {
const unsigned int row = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int col = blockIdx.y * blockDim.y + threadIdx.y;
if (row < m && col < n) {
const int start = indptr[row];
const int end = indptr[row + 1];
float sum = 0;
for (int i = start; i < end; i++) {
sum += data[i] * dense_data[col * k + indices[i]];
}
const int pos = col * m + row;
target[pos] = alpha * sum + ((beta == 0) ? 0 : beta * target[pos]);
}
}
__global__ void kMinimum(float* mat1, float* mat2, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = fminf(mat1[i], mat2[i]);
}
}
__global__ void kMinimumScalar(float* mat, float val, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = fminf(mat[i], val);
}
}
__global__ void kMaximum(float* mat1, float* mat2, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = fmaxf(mat1[i], mat2[i]);
}
}
__global__ void kMaximumScalar(float* mat, float val, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = fmaxf(mat[i], val);
}
}
__global__ void kMinColumnwise(float* mat, float* target, unsigned int width, unsigned int height) {
__shared__ float min_vals[32];
float cur_min = FLT_MAX;
float val = 0;
for (unsigned int i = threadIdx.x; i < height; i += 32) {
val = mat[blockIdx.x * height + i];
if (val < cur_min)
cur_min = val;
}
min_vals[threadIdx.x] = cur_min;
__syncthreads();
if (threadIdx.x == 0) {
cur_min = FLT_MAX;
for (unsigned int i = 0; i < 32; i++)
if (min_vals[i] < cur_min)
cur_min = min_vals[i];
target[blockIdx.x] = cur_min;
}
}
__global__ void kMinRowwise(float* mat, float* target, unsigned int width, unsigned int height) {
__shared__ float min_vals[32];
float cur_min = FLT_MAX;
float val = 0;
for (unsigned int i = threadIdx.x; i < width; i += 32) {
val = mat[i * height + blockIdx.x];
if (val < cur_min)
cur_min = val;
}
min_vals[threadIdx.x] = cur_min;
__syncthreads();
if (threadIdx.x == 0) {
cur_min = FLT_MAX;
for (unsigned int i = 0; i < 32; i++)
if (min_vals[i] < cur_min)
cur_min = min_vals[i];
target[blockIdx.x] = cur_min;
}
}
__global__ void kMaxColumnwise(float* mat, float* target, unsigned int width, unsigned int height) {
__shared__ float max_vals[32];
float cur_max = -FLT_MAX;
float val = 0;
for (unsigned int i = threadIdx.x; i < height; i += 32) {
val = mat[blockIdx.x * height + i];
if (val > cur_max)
cur_max = val;
}
max_vals[threadIdx.x] = cur_max;
__syncthreads();
if (threadIdx.x == 0) {
cur_max = -FLT_MAX;
for (unsigned int i = 0; i < 32; i++)
if (max_vals[i] > cur_max)
cur_max = max_vals[i];
target[blockIdx.x] = cur_max;
}
}
__global__ void kMaxRowwise(float* mat, float* target, unsigned int width, unsigned int height) {
__shared__ float max_vals[32];
float cur_max = -FLT_MAX;
float val = 0;
for (unsigned int i = threadIdx.x; i < width; i += 32) {
val = mat[i * height + blockIdx.x];
if (val > cur_max)
cur_max = val;
}
max_vals[threadIdx.x] = cur_max;
__syncthreads();
if (threadIdx.x == 0) {
cur_max = -FLT_MAX;
for (unsigned int i = 0; i < 32; i++)
if (max_vals[i] > cur_max)
cur_max = max_vals[i];
target[blockIdx.x] = cur_max;
}
}
__global__ void kArgMinColumnwise(float* mat, float* target, unsigned int width, unsigned int height) {
__shared__ float min_vals[32];
__shared__ unsigned int min_args[32];
float cur_min = FLT_MAX;
unsigned int cur_arg = 0;
float val = 0;
for (unsigned int i = threadIdx.x; i < height; i += 32) {
val = mat[blockIdx.x * height + i];
if (val < cur_min) {
cur_min = val;
cur_arg = i;
}
}
min_vals[threadIdx.x] = cur_min;
min_args[threadIdx.x] = cur_arg;
__syncthreads();
if (threadIdx.x == 0) {
cur_min = FLT_MAX;
cur_arg = 0;
for (unsigned int i = 0; i < 32; i++)
if (min_vals[i] < cur_min) {
cur_min = min_vals[i];
cur_arg = min_args[i];
}
target[blockIdx.x] = cur_arg;
}
}
__global__ void kArgMinRowwise(float* mat, float* target, unsigned int width, unsigned int height) {
__shared__ float min_vals[32];
__shared__ unsigned int min_args[32];
float cur_min = FLT_MAX;
unsigned int cur_arg = 0;
float val = 0;
for (unsigned int i = threadIdx.x; i < width; i += 32) {
val = mat[i * height + blockIdx.x];
if (val < cur_min) {
cur_min = val;
cur_arg = i;
}
}
min_vals[threadIdx.x] = cur_min;
min_args[threadIdx.x] = cur_arg;
__syncthreads();
if (threadIdx.x == 0) {
cur_min = FLT_MAX;
cur_arg = 0;
for (unsigned int i = 0; i < 32; i++)
if (min_vals[i] < cur_min) {
cur_min = min_vals[i];
cur_arg = min_args[i];
}
target[blockIdx.x] = cur_arg;
}
}
__global__ void kArgMaxColumnwise(float* mat, float* target, unsigned int width, unsigned int height) {
__shared__ float max_vals[32];
__shared__ unsigned int max_args[32];
float cur_max = -FLT_MAX;
unsigned int cur_arg = 0;
float val = 0;
for (unsigned int i = threadIdx.x; i < height; i += 32) {
val = mat[blockIdx.x * height + i];
if (val > cur_max) {
cur_max = val;
cur_arg = i;
}
}
max_vals[threadIdx.x] = cur_max;
max_args[threadIdx.x] = cur_arg;
__syncthreads();
if (threadIdx.x == 0) {
cur_max = -FLT_MAX;
cur_arg = 0;
for (unsigned int i = 0; i < 32; i++)
if (max_vals[i] > cur_max) {
cur_max = max_vals[i];
cur_arg = max_args[i];
}
target[blockIdx.x] = cur_arg;
}
}
__global__ void kArgMaxRowwise(float* mat, float* target, unsigned int width, unsigned int height) {
__shared__ float max_vals[32];
__shared__ unsigned int max_args[32];
float cur_max = -FLT_MAX;
unsigned int cur_arg = 0;
float val = 0;
for (unsigned int i = threadIdx.x; i < width; i += 32) {
val = mat[i * height + blockIdx.x];
if (val > cur_max) {
cur_max = val;
cur_arg = i;
}
}
max_vals[threadIdx.x] = cur_max;
max_args[threadIdx.x] = cur_arg;
__syncthreads();
if (threadIdx.x == 0) {
cur_max = -FLT_MAX;
cur_arg = 0;
for (unsigned int i = 0; i < 32; i++)
if (max_vals[i] > cur_max) {
cur_max = max_vals[i];
cur_arg = max_args[i];
}
target[blockIdx.x] = cur_arg;
}
}
__global__ void kSign(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = mat[i] ? copysignf(1., mat[i]) : 0.;
}
}
__global__ void kApplySigmoid(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = 1 / (1 + __expf(-mat[i]));
}
}
__global__ void kApplyTanh(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
float mat_i, exp2x;
for (unsigned int i = idx; i < len; i += numThreads) {
mat_i = mat[i];
exp2x = __expf(2 * mat_i);
target[i] = 1 - 2 / (exp2x + 1);
}
}
__global__ void kApplySoftThreshold(float* mat, float alpha, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
float f = mat[i];
target[i] = f > 0 ? max(0., f - alpha) : min(0., f + alpha);
}
}
__global__ void kApplyAbs(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = mat[i] * ((mat[i] > 0) - (mat[i] < 0));
}
}
__global__ void kApplyLog1PlusExp(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
float mat_i;
for (unsigned int i = idx; i < len; i += numThreads) {
mat_i = mat[i];
if (mat_i > 0)
target[i] = (__logf(1 + __expf(-mat_i)) + mat_i);
else
target[i] = __logf(1 + __expf(mat_i));
}
}
__global__ void kLog(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = __logf(mat[i]);
}
}
__global__ void kExp(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = __expf(mat[i]);
}
}
__global__ void kGamma(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = tgammaf(mat[i]);
}
}
__global__ void kLogGamma(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = lgammaf(mat[i]);
}
}
__global__ void kSqrt(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = sqrt(mat[i]);
}
}
__global__ void kPow(float* mat, float pow, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = powf(mat[i], pow);
}
}
__global__ void kPowMatrix(float* mat, float* pow, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = powf(mat[i], pow[i]);
}
}
__global__ void kReciprocal(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads)
target[i] = 1.f / mat[i];
}
__global__ void kAddColVector(float* mat, float* vec, float* tgtMat, unsigned int width,
unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] + vec[i % height];
}
}
__global__ void kAddRowVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] + vec[i / height];
}
}
__global__ void kAddColMult(float* mat, float* vec, float* tgtMat, float mult,
unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] + mult * vec[i % height];
}
}
__global__ void kMultByColVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] * vec[i % height];
}
}
__global__ void kMultByRowVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] * vec[i / height];
}
}
__global__ void kDivByColVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] / vec[i % height];
}
}
__global__ void kDivByRowVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] / vec[i / height];
}
}
__global__ void kAdd(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] + b[i];
}
}
__global__ void kSubtract(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] - b[i];
}
}
__global__ void kDivide(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] / b[i];
}
}
__global__ void kMult(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] * b[i];
}
}
__global__ void kMultScalar(float* mat, float alpha, float* dest, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
dest[i] = alpha * mat[i];
}
}
__global__ void kAssignScalar(float* dest, float alpha, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
dest[i] = alpha;
}
}
__global__ void kDivideScalar(float* mat, float alpha, float* dest, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
dest[i] = mat[i] / alpha;
}
}
__global__ void kAddScalar(float* a, float alpha, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] + alpha;
}
}
__global__ void kSelectRows(float* source, float* target, float* indices, int nRowIs, int nCols, int nSourceRows){
__shared__ int sourceRowIndices[32];
const int startTargetRowI = blockIdx.x * 32;
const int tid = threadIdx.x;
const int localNRowIs = min(32, nRowIs-startTargetRowI);
// cooperatively load 32 row indices
if (tid < localNRowIs){
sourceRowIndices[tid] = int(indices[startTargetRowI + tid]);
if (sourceRowIndices[tid]<0)
sourceRowIndices[tid] += nSourceRows;
if (sourceRowIndices[tid]<0 || sourceRowIndices[tid]>=nSourceRows)
sourceRowIndices[tid] = -1;
}
__syncthreads();
// copy 32 rows
for (int i=0; i<localNRowIs; i++){
const int targetRowI = startTargetRowI + i, sourceRowI = sourceRowIndices[i];
for (int colI=tid; colI<nCols; colI+=32)
target[targetRowI * nCols + colI] = sourceRowI==-1 ? (1.0/0.0 -1.0/0.0) : source[sourceRowI * nCols + colI];
}
}
__global__ void kSetSelectedRows(float* target, float* source, float* indices, int nRowIs, int nCols, int nTargetRows){
__shared__ int targetRowIndices[32];
const int startSourceRowI = blockIdx.x * 32;
const int tid = threadIdx.x;
const int localNRowIs = min(32, nRowIs-startSourceRowI);
// cooperatively load 32 row indices
if (tid < localNRowIs){
targetRowIndices[tid] = int(indices[startSourceRowI + tid]);
if (targetRowIndices[tid]<0)
targetRowIndices[tid] += nTargetRows;
if (targetRowIndices[tid]<0 || targetRowIndices[tid]>=nTargetRows)
targetRowIndices[tid] = -1;
}
__syncthreads();
// copy 32 rows
for (int i=0; i<localNRowIs; i++){
const int sourceRowI = startSourceRowI + i, targetRowI = targetRowIndices[i];
for (int colI=tid; colI<nCols; colI+=32)
target[targetRowI * nCols + colI] = targetRowI==-1 ? (1.0/0.0 -1.0/0.0) : source[sourceRowI * nCols + colI];
}
}
__global__ void kWhere(float* condition_mat, float* if_mat, float* else_mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = condition_mat[i] ? if_mat[i] : else_mat[i];
}
}
|
fbadbd062ca83e24a1f2c7dfdf590b340e78020f.cu
|
#include "cudamat_kernels.cuh"
#include "float.h"
/* ------------------------- Random number generation ------------------------- */
__global__ void kSeedRandom(unsigned int* rndMults, unsigned long long* rndWords, unsigned int seed) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// The initial x is the seed and the initial carry is 1
unsigned long long rndWord = ((unsigned long long)seed << 32) + 1;
const unsigned int rndMult = rndMults[idx];
/*
* Run the chain for a few steps so that all the streams have a chance
* to differentiate. They start out generating similar random numbers
* because all the multipliers are similar.
*/
for(unsigned int i = 0; i < NUM_RND_BURNIN; i++) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
}
rndWords[idx] = rndWord;
}
__global__ void kRandomUniform(unsigned int* rndMults, unsigned long long* rndWords, float* gData, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
gData[i] = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
}
rndWords[idx] = rndWord;
}
__global__ void kRandomGaussian(unsigned int* rndMults, unsigned long long* rndWords, float* gData, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
float rnd1, rnd2, R, T;
for(unsigned int i = idx; i < numElements; i += 2*NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd1 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd2 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
T = 2 * PI * rnd2;
R = sqrtf(-2 * __logf(rnd1));
gData[i] = R * __cosf(T);
if (i + NUM_RND_STREAMS < numElements)
gData[i + NUM_RND_STREAMS] = R * __sinf(T);
}
rndWords[idx] = rndWord;
}
/* ------------------------- Data copying ------------------------- */
/*
Copy row slice from source to target. There is a block for every 32x32 chunk being copied.
*/
__global__ void kGetRowSlice(float* source, float* target, int start, int end, int width, int height) {
const int row = start + blockIdx.x * 32 + threadIdx.x;
const int start_col = blockIdx.y * 32;
const int end_col = (start_col + 32 < width) ? start_col + 32: width;
const int target_height = end - start;
if (row < end) {
for (int cur_col = start_col; cur_col < end_col; cur_col++)
target[cur_col * target_height + row - start] = source[cur_col * height + row];
}
}
__global__ void kSetRowSlice(float* source, float* target, int start, int end, int width, int height) {
const int row = start + blockIdx.x * 32 + threadIdx.x;
const int start_col = blockIdx.y * 32;
const int end_col = (start_col + 32 < width) ? start_col + 32: width;
const int source_height = end - start;
if (row < end) {
for (int cur_col = start_col; cur_col < end_col; cur_col++)
target[cur_col * height + row] = source[cur_col * source_height + row - start];
//source[cur_col * height + row - start] = target[cur_col * target_height + row];
}
}
__global__ void kTranspose(float *odata, float *idata, int width, int height) {
__shared__ float block[COPY_BLOCK_SIZE][COPY_BLOCK_SIZE+1];
// read the matrix tile into shared memory
unsigned int xIndex = blockIdx.x * COPY_BLOCK_SIZE + threadIdx.x;
unsigned int yIndex = blockIdx.y * COPY_BLOCK_SIZE + threadIdx.y;
if((xIndex < width) && (yIndex < height)) {
unsigned int index_in = yIndex * width + xIndex;
block[threadIdx.y][threadIdx.x] = idata[index_in];
}
__syncthreads();
// write the transposed matrix tile to global memory
xIndex = blockIdx.y * COPY_BLOCK_SIZE + threadIdx.x;
yIndex = blockIdx.x * COPY_BLOCK_SIZE + threadIdx.y;
if((xIndex < height) && (yIndex < width)) {
unsigned int index_out = yIndex * height + xIndex;
odata[index_out] = block[threadIdx.x][threadIdx.y];
}
}
/* ------------------------- Mathematical operations ------------------------- */
__global__ void kLessThan(float* mat1, float* mat2, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = mat1[i] < mat2[i];
}
}
__global__ void kLessThanScalar(float* mat, float val, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = mat[i] < val;
}
}
__global__ void kGreaterThan(float* mat1, float* mat2, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = mat1[i] > mat2[i];
}
}
__global__ void kGreaterThanScalar(float* mat, float val, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = mat[i] > val;
}
}
__global__ void kEquals(float* mat1, float* mat2, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = mat1[i] == mat2[i];
}
}
__global__ void kEqualsScalar(float* mat, float val, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = mat[i] == val;
}
}
__global__ void kSparseDot(int m, int n, int k, float *data, int* indptr, int* indices, float *dense_data, float* target, float beta, float alpha) {
const unsigned int row = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int col = blockIdx.y * blockDim.y + threadIdx.y;
if (row < m && col < n) {
const int start = indptr[row];
const int end = indptr[row + 1];
float sum = 0;
for (int i = start; i < end; i++) {
sum += data[i] * dense_data[col * k + indices[i]];
}
const int pos = col * m + row;
target[pos] = alpha * sum + ((beta == 0) ? 0 : beta * target[pos]);
}
}
__global__ void kMinimum(float* mat1, float* mat2, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = fminf(mat1[i], mat2[i]);
}
}
__global__ void kMinimumScalar(float* mat, float val, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = fminf(mat[i], val);
}
}
__global__ void kMaximum(float* mat1, float* mat2, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = fmaxf(mat1[i], mat2[i]);
}
}
__global__ void kMaximumScalar(float* mat, float val, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = fmaxf(mat[i], val);
}
}
__global__ void kMinColumnwise(float* mat, float* target, unsigned int width, unsigned int height) {
__shared__ float min_vals[32];
float cur_min = FLT_MAX;
float val = 0;
for (unsigned int i = threadIdx.x; i < height; i += 32) {
val = mat[blockIdx.x * height + i];
if (val < cur_min)
cur_min = val;
}
min_vals[threadIdx.x] = cur_min;
__syncthreads();
if (threadIdx.x == 0) {
cur_min = FLT_MAX;
for (unsigned int i = 0; i < 32; i++)
if (min_vals[i] < cur_min)
cur_min = min_vals[i];
target[blockIdx.x] = cur_min;
}
}
__global__ void kMinRowwise(float* mat, float* target, unsigned int width, unsigned int height) {
__shared__ float min_vals[32];
float cur_min = FLT_MAX;
float val = 0;
for (unsigned int i = threadIdx.x; i < width; i += 32) {
val = mat[i * height + blockIdx.x];
if (val < cur_min)
cur_min = val;
}
min_vals[threadIdx.x] = cur_min;
__syncthreads();
if (threadIdx.x == 0) {
cur_min = FLT_MAX;
for (unsigned int i = 0; i < 32; i++)
if (min_vals[i] < cur_min)
cur_min = min_vals[i];
target[blockIdx.x] = cur_min;
}
}
__global__ void kMaxColumnwise(float* mat, float* target, unsigned int width, unsigned int height) {
__shared__ float max_vals[32];
float cur_max = -FLT_MAX;
float val = 0;
for (unsigned int i = threadIdx.x; i < height; i += 32) {
val = mat[blockIdx.x * height + i];
if (val > cur_max)
cur_max = val;
}
max_vals[threadIdx.x] = cur_max;
__syncthreads();
if (threadIdx.x == 0) {
cur_max = -FLT_MAX;
for (unsigned int i = 0; i < 32; i++)
if (max_vals[i] > cur_max)
cur_max = max_vals[i];
target[blockIdx.x] = cur_max;
}
}
__global__ void kMaxRowwise(float* mat, float* target, unsigned int width, unsigned int height) {
__shared__ float max_vals[32];
float cur_max = -FLT_MAX;
float val = 0;
for (unsigned int i = threadIdx.x; i < width; i += 32) {
val = mat[i * height + blockIdx.x];
if (val > cur_max)
cur_max = val;
}
max_vals[threadIdx.x] = cur_max;
__syncthreads();
if (threadIdx.x == 0) {
cur_max = -FLT_MAX;
for (unsigned int i = 0; i < 32; i++)
if (max_vals[i] > cur_max)
cur_max = max_vals[i];
target[blockIdx.x] = cur_max;
}
}
__global__ void kArgMinColumnwise(float* mat, float* target, unsigned int width, unsigned int height) {
__shared__ float min_vals[32];
__shared__ unsigned int min_args[32];
float cur_min = FLT_MAX;
unsigned int cur_arg = 0;
float val = 0;
for (unsigned int i = threadIdx.x; i < height; i += 32) {
val = mat[blockIdx.x * height + i];
if (val < cur_min) {
cur_min = val;
cur_arg = i;
}
}
min_vals[threadIdx.x] = cur_min;
min_args[threadIdx.x] = cur_arg;
__syncthreads();
if (threadIdx.x == 0) {
cur_min = FLT_MAX;
cur_arg = 0;
for (unsigned int i = 0; i < 32; i++)
if (min_vals[i] < cur_min) {
cur_min = min_vals[i];
cur_arg = min_args[i];
}
target[blockIdx.x] = cur_arg;
}
}
__global__ void kArgMinRowwise(float* mat, float* target, unsigned int width, unsigned int height) {
__shared__ float min_vals[32];
__shared__ unsigned int min_args[32];
float cur_min = FLT_MAX;
unsigned int cur_arg = 0;
float val = 0;
for (unsigned int i = threadIdx.x; i < width; i += 32) {
val = mat[i * height + blockIdx.x];
if (val < cur_min) {
cur_min = val;
cur_arg = i;
}
}
min_vals[threadIdx.x] = cur_min;
min_args[threadIdx.x] = cur_arg;
__syncthreads();
if (threadIdx.x == 0) {
cur_min = FLT_MAX;
cur_arg = 0;
for (unsigned int i = 0; i < 32; i++)
if (min_vals[i] < cur_min) {
cur_min = min_vals[i];
cur_arg = min_args[i];
}
target[blockIdx.x] = cur_arg;
}
}
__global__ void kArgMaxColumnwise(float* mat, float* target, unsigned int width, unsigned int height) {
__shared__ float max_vals[32];
__shared__ unsigned int max_args[32];
float cur_max = -FLT_MAX;
unsigned int cur_arg = 0;
float val = 0;
for (unsigned int i = threadIdx.x; i < height; i += 32) {
val = mat[blockIdx.x * height + i];
if (val > cur_max) {
cur_max = val;
cur_arg = i;
}
}
max_vals[threadIdx.x] = cur_max;
max_args[threadIdx.x] = cur_arg;
__syncthreads();
if (threadIdx.x == 0) {
cur_max = -FLT_MAX;
cur_arg = 0;
for (unsigned int i = 0; i < 32; i++)
if (max_vals[i] > cur_max) {
cur_max = max_vals[i];
cur_arg = max_args[i];
}
target[blockIdx.x] = cur_arg;
}
}
__global__ void kArgMaxRowwise(float* mat, float* target, unsigned int width, unsigned int height) {
__shared__ float max_vals[32];
__shared__ unsigned int max_args[32];
float cur_max = -FLT_MAX;
unsigned int cur_arg = 0;
float val = 0;
for (unsigned int i = threadIdx.x; i < width; i += 32) {
val = mat[i * height + blockIdx.x];
if (val > cur_max) {
cur_max = val;
cur_arg = i;
}
}
max_vals[threadIdx.x] = cur_max;
max_args[threadIdx.x] = cur_arg;
__syncthreads();
if (threadIdx.x == 0) {
cur_max = -FLT_MAX;
cur_arg = 0;
for (unsigned int i = 0; i < 32; i++)
if (max_vals[i] > cur_max) {
cur_max = max_vals[i];
cur_arg = max_args[i];
}
target[blockIdx.x] = cur_arg;
}
}
__global__ void kSign(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = mat[i] ? copysignf(1., mat[i]) : 0.;
}
}
__global__ void kApplySigmoid(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = 1 / (1 + __expf(-mat[i]));
}
}
__global__ void kApplyTanh(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
float mat_i, exp2x;
for (unsigned int i = idx; i < len; i += numThreads) {
mat_i = mat[i];
exp2x = __expf(2 * mat_i);
target[i] = 1 - 2 / (exp2x + 1);
}
}
__global__ void kApplySoftThreshold(float* mat, float alpha, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
float f = mat[i];
target[i] = f > 0 ? max(0., f - alpha) : min(0., f + alpha);
}
}
__global__ void kApplyAbs(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = mat[i] * ((mat[i] > 0) - (mat[i] < 0));
}
}
__global__ void kApplyLog1PlusExp(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
float mat_i;
for (unsigned int i = idx; i < len; i += numThreads) {
mat_i = mat[i];
if (mat_i > 0)
target[i] = (__logf(1 + __expf(-mat_i)) + mat_i);
else
target[i] = __logf(1 + __expf(mat_i));
}
}
__global__ void kLog(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = __logf(mat[i]);
}
}
__global__ void kExp(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = __expf(mat[i]);
}
}
__global__ void kGamma(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = tgammaf(mat[i]);
}
}
__global__ void kLogGamma(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = lgammaf(mat[i]);
}
}
__global__ void kSqrt(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = sqrt(mat[i]);
}
}
__global__ void kPow(float* mat, float pow, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = powf(mat[i], pow);
}
}
__global__ void kPowMatrix(float* mat, float* pow, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = powf(mat[i], pow[i]);
}
}
__global__ void kReciprocal(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads)
target[i] = 1.f / mat[i];
}
__global__ void kAddColVector(float* mat, float* vec, float* tgtMat, unsigned int width,
unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] + vec[i % height];
}
}
__global__ void kAddRowVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] + vec[i / height];
}
}
__global__ void kAddColMult(float* mat, float* vec, float* tgtMat, float mult,
unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] + mult * vec[i % height];
}
}
__global__ void kMultByColVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] * vec[i % height];
}
}
__global__ void kMultByRowVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] * vec[i / height];
}
}
__global__ void kDivByColVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] / vec[i % height];
}
}
__global__ void kDivByRowVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] / vec[i / height];
}
}
__global__ void kAdd(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] + b[i];
}
}
__global__ void kSubtract(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] - b[i];
}
}
__global__ void kDivide(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] / b[i];
}
}
__global__ void kMult(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] * b[i];
}
}
__global__ void kMultScalar(float* mat, float alpha, float* dest, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
dest[i] = alpha * mat[i];
}
}
__global__ void kAssignScalar(float* dest, float alpha, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
dest[i] = alpha;
}
}
__global__ void kDivideScalar(float* mat, float alpha, float* dest, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
dest[i] = mat[i] / alpha;
}
}
__global__ void kAddScalar(float* a, float alpha, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] + alpha;
}
}
__global__ void kSelectRows(float* source, float* target, float* indices, int nRowIs, int nCols, int nSourceRows){
__shared__ int sourceRowIndices[32];
const int startTargetRowI = blockIdx.x * 32;
const int tid = threadIdx.x;
const int localNRowIs = min(32, nRowIs-startTargetRowI);
// cooperatively load 32 row indices
if (tid < localNRowIs){
sourceRowIndices[tid] = int(indices[startTargetRowI + tid]);
if (sourceRowIndices[tid]<0)
sourceRowIndices[tid] += nSourceRows;
if (sourceRowIndices[tid]<0 || sourceRowIndices[tid]>=nSourceRows)
sourceRowIndices[tid] = -1;
}
__syncthreads();
// copy 32 rows
for (int i=0; i<localNRowIs; i++){
const int targetRowI = startTargetRowI + i, sourceRowI = sourceRowIndices[i];
for (int colI=tid; colI<nCols; colI+=32)
target[targetRowI * nCols + colI] = sourceRowI==-1 ? (1.0/0.0 -1.0/0.0) : source[sourceRowI * nCols + colI];
}
}
__global__ void kSetSelectedRows(float* target, float* source, float* indices, int nRowIs, int nCols, int nTargetRows){
__shared__ int targetRowIndices[32];
const int startSourceRowI = blockIdx.x * 32;
const int tid = threadIdx.x;
const int localNRowIs = min(32, nRowIs-startSourceRowI);
// cooperatively load 32 row indices
if (tid < localNRowIs){
targetRowIndices[tid] = int(indices[startSourceRowI + tid]);
if (targetRowIndices[tid]<0)
targetRowIndices[tid] += nTargetRows;
if (targetRowIndices[tid]<0 || targetRowIndices[tid]>=nTargetRows)
targetRowIndices[tid] = -1;
}
__syncthreads();
// copy 32 rows
for (int i=0; i<localNRowIs; i++){
const int sourceRowI = startSourceRowI + i, targetRowI = targetRowIndices[i];
for (int colI=tid; colI<nCols; colI+=32)
target[targetRowI * nCols + colI] = targetRowI==-1 ? (1.0/0.0 -1.0/0.0) : source[sourceRowI * nCols + colI];
}
}
__global__ void kWhere(float* condition_mat, float* if_mat, float* else_mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = condition_mat[i] ? if_mat[i] : else_mat[i];
}
}
|
2b1b1c1001c8489c7c4f2f68466a01a23036eb99.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Created by grzegorz on 20.01.2020.
//
#include <cstring>
#include <cstdint>
#include "include/SHA1_cuda.cuh"
#define DIGEST_LENGTH 20
namespace SHA1_cuda {
struct block {
uint32_t a;
uint32_t b;
uint32_t c;
uint32_t d;
uint32_t e;
};
__constant__ block DEFAULT_DIGEST_BUFFER = {
0x67452301,
0xEFCDAB89,
0x98BADCFE,
0x10325476,
0xC3D2E1F0
};
__device__ uint32_t leftRotate(uint32_t x, uint32_t n) {
return (x << n) | (x >> (32 - n));
}
__device__ uint32_t funI(const uint32_t b, const uint32_t c, const uint32_t d) {
return b ^ c ^ d;
}
__device__ uint32_t funH(const uint32_t b, const uint32_t c, const uint32_t d) {
return (b & c) | (b & d) | (c & d);
}
__device__ uint32_t funG(const uint32_t b, const uint32_t c, const uint32_t d) {
return b ^ c ^ d;
}
__device__ uint32_t funF(const uint32_t b, const uint32_t c, const uint32_t d) {
return (b & c) | ((~b) & d);
}
__device__ uint32_t swap_bits(uint32_t x) {
uint8_t *ptr = reinterpret_cast<uint8_t *>(&x);
return (ptr[3] << 0) | (ptr[2] << 8) | (ptr[1] << 16) | (ptr[0] << 24);
}
__device__ void fillWorkingBuffer(const char *word, uint32_t *workingBuffer, unsigned int workingBufferLength,
unsigned int wordLength) {
unsigned int i = 0, j;
uint32_t *word_ptr = (uint32_t *) word;
for (i = 0; i < wordLength / 4; i++)
workingBuffer[i] = swap_bits(word_ptr[i]);
uint32_t split_word = 0;
for (j = 0; j < wordLength % 4; j++)
((uint8_t *) &split_word)[3 - j] = word[wordLength / 4 * 4 + j];
((uint8_t *) &split_word)[3 - j] = 0b10000000;
workingBuffer[i] = split_word;
i++;
while (i < workingBufferLength - 2) {
workingBuffer[i++] = 0;
}
uint64_t tmp = wordLength * 8;
std::memcpy(workingBuffer + i++, (uint32_t *) &tmp + 1, sizeof(uint32_t));
std::memcpy(workingBuffer + i++, (uint32_t *) &tmp, sizeof(uint32_t));
}
__global__ void calculateHashSum(unsigned char *digest, const char *word, unsigned long int workingBufferLength,
unsigned long int wordLength, unsigned long int n) {
unsigned long int threadId = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int wordBufferLength = wordLength + 4 - wordLength % 4;
if (threadId < n) {
uint32_t workingBuffer[256/4];
fillWorkingBuffer(word + wordBufferLength * threadId, workingBuffer, workingBufferLength, wordLength);
uint32_t w[80];
unsigned int numberOfChunks = workingBufferLength / 16;
block mdBuffer = DEFAULT_DIGEST_BUFFER;
block stepBuffer;
uint32_t temp;
for (unsigned int chunkNum = 0; chunkNum < numberOfChunks; chunkNum++) {
memcpy(w, workingBuffer + chunkNum * 16, 16 * sizeof(uint32_t));
for (int i = 16; i <= 79; i++)
w[i] = leftRotate(w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16], 1);
stepBuffer = mdBuffer;
for (int i = 0; i <= 79; i++) {
if (i <= 19)
temp = leftRotate(stepBuffer.a, 5) + funF(stepBuffer.b, stepBuffer.c, stepBuffer.d) +
stepBuffer.e + w[i] + 0x5A827999;
else if (i <= 39)
temp = leftRotate(stepBuffer.a, 5) + funG(stepBuffer.b, stepBuffer.c, stepBuffer.d) +
stepBuffer.e + w[i] + 0x6ED9EBA1;
else if (i <= 59)
temp = leftRotate(stepBuffer.a, 5) + funH(stepBuffer.b, stepBuffer.c, stepBuffer.d) +
stepBuffer.e + w[i] + 0x8F1BBCDC;
else
temp = leftRotate(stepBuffer.a, 5) + funI(stepBuffer.b, stepBuffer.c, stepBuffer.d) +
stepBuffer.e + w[i] + 0xCA62C1D6;
stepBuffer.e = stepBuffer.d;
stepBuffer.d = stepBuffer.c;
stepBuffer.c = leftRotate(stepBuffer.b, 30);
stepBuffer.b = stepBuffer.a;
stepBuffer.a = temp;
}
mdBuffer.a += stepBuffer.a;
mdBuffer.b += stepBuffer.b;
mdBuffer.c += stepBuffer.c;
mdBuffer.d += stepBuffer.d;
mdBuffer.e += stepBuffer.e;
}
mdBuffer.a = swap_bits(mdBuffer.a);
mdBuffer.b = swap_bits(mdBuffer.b);
mdBuffer.c = swap_bits(mdBuffer.c);
mdBuffer.d = swap_bits(mdBuffer.d);
mdBuffer.e = swap_bits(mdBuffer.e);
// mdBuffer.a=mdBuffer.b=mdBuffer.c=mdBuffer.d=mdBuffer.e = 0;
memcpy(digest + threadId * DIGEST_LENGTH, &mdBuffer, DIGEST_LENGTH);
}
}
}
|
2b1b1c1001c8489c7c4f2f68466a01a23036eb99.cu
|
//
// Created by grzegorz on 20.01.2020.
//
#include <cstring>
#include <cstdint>
#include "include/SHA1_cuda.cuh"
#define DIGEST_LENGTH 20
namespace SHA1_cuda {
struct block {
uint32_t a;
uint32_t b;
uint32_t c;
uint32_t d;
uint32_t e;
};
__constant__ block DEFAULT_DIGEST_BUFFER = {
0x67452301,
0xEFCDAB89,
0x98BADCFE,
0x10325476,
0xC3D2E1F0
};
__device__ uint32_t leftRotate(uint32_t x, uint32_t n) {
return (x << n) | (x >> (32 - n));
}
__device__ uint32_t funI(const uint32_t b, const uint32_t c, const uint32_t d) {
return b ^ c ^ d;
}
__device__ uint32_t funH(const uint32_t b, const uint32_t c, const uint32_t d) {
return (b & c) | (b & d) | (c & d);
}
__device__ uint32_t funG(const uint32_t b, const uint32_t c, const uint32_t d) {
return b ^ c ^ d;
}
__device__ uint32_t funF(const uint32_t b, const uint32_t c, const uint32_t d) {
return (b & c) | ((~b) & d);
}
__device__ uint32_t swap_bits(uint32_t x) {
uint8_t *ptr = reinterpret_cast<uint8_t *>(&x);
return (ptr[3] << 0) | (ptr[2] << 8) | (ptr[1] << 16) | (ptr[0] << 24);
}
__device__ void fillWorkingBuffer(const char *word, uint32_t *workingBuffer, unsigned int workingBufferLength,
unsigned int wordLength) {
unsigned int i = 0, j;
uint32_t *word_ptr = (uint32_t *) word;
for (i = 0; i < wordLength / 4; i++)
workingBuffer[i] = swap_bits(word_ptr[i]);
uint32_t split_word = 0;
for (j = 0; j < wordLength % 4; j++)
((uint8_t *) &split_word)[3 - j] = word[wordLength / 4 * 4 + j];
((uint8_t *) &split_word)[3 - j] = 0b10000000;
workingBuffer[i] = split_word;
i++;
while (i < workingBufferLength - 2) {
workingBuffer[i++] = 0;
}
uint64_t tmp = wordLength * 8;
std::memcpy(workingBuffer + i++, (uint32_t *) &tmp + 1, sizeof(uint32_t));
std::memcpy(workingBuffer + i++, (uint32_t *) &tmp, sizeof(uint32_t));
}
__global__ void calculateHashSum(unsigned char *digest, const char *word, unsigned long int workingBufferLength,
unsigned long int wordLength, unsigned long int n) {
unsigned long int threadId = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int wordBufferLength = wordLength + 4 - wordLength % 4;
if (threadId < n) {
uint32_t workingBuffer[256/4];
fillWorkingBuffer(word + wordBufferLength * threadId, workingBuffer, workingBufferLength, wordLength);
uint32_t w[80];
unsigned int numberOfChunks = workingBufferLength / 16;
block mdBuffer = DEFAULT_DIGEST_BUFFER;
block stepBuffer;
uint32_t temp;
for (unsigned int chunkNum = 0; chunkNum < numberOfChunks; chunkNum++) {
memcpy(w, workingBuffer + chunkNum * 16, 16 * sizeof(uint32_t));
for (int i = 16; i <= 79; i++)
w[i] = leftRotate(w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16], 1);
stepBuffer = mdBuffer;
for (int i = 0; i <= 79; i++) {
if (i <= 19)
temp = leftRotate(stepBuffer.a, 5) + funF(stepBuffer.b, stepBuffer.c, stepBuffer.d) +
stepBuffer.e + w[i] + 0x5A827999;
else if (i <= 39)
temp = leftRotate(stepBuffer.a, 5) + funG(stepBuffer.b, stepBuffer.c, stepBuffer.d) +
stepBuffer.e + w[i] + 0x6ED9EBA1;
else if (i <= 59)
temp = leftRotate(stepBuffer.a, 5) + funH(stepBuffer.b, stepBuffer.c, stepBuffer.d) +
stepBuffer.e + w[i] + 0x8F1BBCDC;
else
temp = leftRotate(stepBuffer.a, 5) + funI(stepBuffer.b, stepBuffer.c, stepBuffer.d) +
stepBuffer.e + w[i] + 0xCA62C1D6;
stepBuffer.e = stepBuffer.d;
stepBuffer.d = stepBuffer.c;
stepBuffer.c = leftRotate(stepBuffer.b, 30);
stepBuffer.b = stepBuffer.a;
stepBuffer.a = temp;
}
mdBuffer.a += stepBuffer.a;
mdBuffer.b += stepBuffer.b;
mdBuffer.c += stepBuffer.c;
mdBuffer.d += stepBuffer.d;
mdBuffer.e += stepBuffer.e;
}
mdBuffer.a = swap_bits(mdBuffer.a);
mdBuffer.b = swap_bits(mdBuffer.b);
mdBuffer.c = swap_bits(mdBuffer.c);
mdBuffer.d = swap_bits(mdBuffer.d);
mdBuffer.e = swap_bits(mdBuffer.e);
// mdBuffer.a=mdBuffer.b=mdBuffer.c=mdBuffer.d=mdBuffer.e = 0;
memcpy(digest + threadId * DIGEST_LENGTH, &mdBuffer, DIGEST_LENGTH);
}
}
}
|
8e03d23c250e7421cf4079c17e2833811005400e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "BFS_kernel_one_block_spill.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
volatile unsigned int *frontier = NULL;
hipMalloc(&frontier, XSIZE*YSIZE);
unsigned int frontier_len = 1;
volatile unsigned int *cost = NULL;
hipMalloc(&cost, XSIZE*YSIZE);
volatile int *visited = NULL;
hipMalloc(&visited, XSIZE*YSIZE);
unsigned int *edgeArray = NULL;
hipMalloc(&edgeArray, XSIZE*YSIZE);
unsigned int *edgeArrayAux = NULL;
hipMalloc(&edgeArrayAux, XSIZE*YSIZE);
unsigned int numVertices = 1;
unsigned int numEdges = 1;
volatile unsigned int *frontier_length = NULL;
hipMalloc(&frontier_length, XSIZE*YSIZE);
const unsigned int max_mem = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
BFS_kernel_one_block_spill), dim3(gridBlock),dim3(threadBlock), 0, 0, frontier,frontier_len,cost,visited,edgeArray,edgeArrayAux,numVertices,numEdges,frontier_length,max_mem);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
BFS_kernel_one_block_spill), dim3(gridBlock),dim3(threadBlock), 0, 0, frontier,frontier_len,cost,visited,edgeArray,edgeArrayAux,numVertices,numEdges,frontier_length,max_mem);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
BFS_kernel_one_block_spill), dim3(gridBlock),dim3(threadBlock), 0, 0, frontier,frontier_len,cost,visited,edgeArray,edgeArrayAux,numVertices,numEdges,frontier_length,max_mem);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
8e03d23c250e7421cf4079c17e2833811005400e.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "BFS_kernel_one_block_spill.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
volatile unsigned int *frontier = NULL;
cudaMalloc(&frontier, XSIZE*YSIZE);
unsigned int frontier_len = 1;
volatile unsigned int *cost = NULL;
cudaMalloc(&cost, XSIZE*YSIZE);
volatile int *visited = NULL;
cudaMalloc(&visited, XSIZE*YSIZE);
unsigned int *edgeArray = NULL;
cudaMalloc(&edgeArray, XSIZE*YSIZE);
unsigned int *edgeArrayAux = NULL;
cudaMalloc(&edgeArrayAux, XSIZE*YSIZE);
unsigned int numVertices = 1;
unsigned int numEdges = 1;
volatile unsigned int *frontier_length = NULL;
cudaMalloc(&frontier_length, XSIZE*YSIZE);
const unsigned int max_mem = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
BFS_kernel_one_block_spill<<<gridBlock,threadBlock>>>(frontier,frontier_len,cost,visited,edgeArray,edgeArrayAux,numVertices,numEdges,frontier_length,max_mem);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
BFS_kernel_one_block_spill<<<gridBlock,threadBlock>>>(frontier,frontier_len,cost,visited,edgeArray,edgeArrayAux,numVertices,numEdges,frontier_length,max_mem);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
BFS_kernel_one_block_spill<<<gridBlock,threadBlock>>>(frontier,frontier_len,cost,visited,edgeArray,edgeArrayAux,numVertices,numEdges,frontier_length,max_mem);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
38139f3f5e77d853d42c698522638b70cf72cf78.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// This CUDA test program calculates standard deviations of randomly generated samples of SAMPLE_SIZE.
// The number of samples is defined by the variable nSamples in the main function.
//
// Created by Wagner Tsuchiya on 11/24/15.
// Copyright 2015 Wagner Tsuchiya. All rights reserved.
//
#include <iostream>
#include <stdlib.h>
#include <time.h>
using namespace std;
#define SAMPLE_SIZE 1000;
#define N_BLOCKS 100;
/*
* Function that calculates the standard deviation of a sample.
* The input is an array with sampleArraySize that contains 1-N samples of sampleSize.
* E.g: {s(0, 0), s(0, 1), s(1, 0), s(1, 1), s(3, 0), s(3, 1)}, with sample size 2 and sampleArraySize 6.
*/
__global__ void stddevPointer(double *sample, double *output, int sampleSize, int sampleArraySize) {
// Check the sizeof arrays
int outputIndex = threadIdx.x + blockIdx.x * blockDim.x;
int sampleIndex = outputIndex * sampleSize;
output[outputIndex] = 0;
for (int j = 0; j < sampleSize; j++) {
if(sampleIndex + j >= sampleArraySize) {
output[outputIndex] = 42;
return;
}
output[outputIndex] += sample[sampleIndex + j];
}
output[outputIndex] /= (sampleSize - 1);
}
double* generateRandomArray(int size) {
double *array = (double *)malloc(size * sizeof(double));
for(int i = 0; i < size; i++) {
array[i] = (double) rand() / RAND_MAX;
}
return array;
}
double diffclock(clock_t clock1, clock_t clock2)
{
double diffticks = clock1 - clock2;
double diffms = (diffticks) / (CLOCKS_PER_SEC / 1000);
return diffms;
}
int main(int argc, const char * argv[]) {
int nSamples = 100000;
int nBlocks = N_BLOCKS;
int nThreads = nSamples / nBlocks;
int sampleSize = SAMPLE_SIZE;
cout << "Threads: " << nThreads << endl;
cout << "Blocks: " << nBlocks << endl;
int sizeOfSampleArray = sampleSize * nSamples * sizeof(double);
int sizeOfOutput = nSamples * sizeof(double);
double *sample = generateRandomArray(nSamples * sampleSize);
double *deviceSample;
double *deviceOutput;
hipMalloc((void **) &deviceSample, sizeOfSampleArray);
hipMalloc((void **) &deviceOutput, sizeOfOutput);
hipMemcpy(deviceSample, sample, sizeOfSampleArray, hipMemcpyHostToDevice);
clock_t start = clock();
// Launch stddevPointer() kernel on GPU
hipLaunchKernelGGL(( stddevPointer), dim3(nBlocks),dim3(nThreads), 0, 0, deviceSample, deviceOutput, sampleSize, sizeOfSampleArray);
clock_t end = clock();
double* output = (double*) malloc(sizeOfOutput);
hipMemcpy(output, deviceOutput, sizeOfOutput, hipMemcpyDeviceToHost);
for (int i = 0; i < nSamples; i++) {
cout << "Std.Dev. #" << i + 1 << ": " << output[i] << endl;
}
cout << "Took " << diffclock(end, start) << "ms" << endl;
free(sample);
free(output);
hipFree(deviceSample);
hipFree(deviceOutput);
return 0;
}
|
38139f3f5e77d853d42c698522638b70cf72cf78.cu
|
//
// This CUDA test program calculates standard deviations of randomly generated samples of SAMPLE_SIZE.
// The number of samples is defined by the variable nSamples in the main function.
//
// Created by Wagner Tsuchiya on 11/24/15.
// Copyright © 2015 Wagner Tsuchiya. All rights reserved.
//
#include <iostream>
#include <stdlib.h>
#include <time.h>
using namespace std;
#define SAMPLE_SIZE 1000;
#define N_BLOCKS 100;
/*
* Function that calculates the standard deviation of a sample.
* The input is an array with sampleArraySize that contains 1-N samples of sampleSize.
* E.g: {s(0, 0), s(0, 1), s(1, 0), s(1, 1), s(3, 0), s(3, 1)}, with sample size 2 and sampleArraySize 6.
*/
__global__ void stddevPointer(double *sample, double *output, int sampleSize, int sampleArraySize) {
// Check the sizeof arrays
int outputIndex = threadIdx.x + blockIdx.x * blockDim.x;
int sampleIndex = outputIndex * sampleSize;
output[outputIndex] = 0;
for (int j = 0; j < sampleSize; j++) {
if(sampleIndex + j >= sampleArraySize) {
output[outputIndex] = 42;
return;
}
output[outputIndex] += sample[sampleIndex + j];
}
output[outputIndex] /= (sampleSize - 1);
}
double* generateRandomArray(int size) {
double *array = (double *)malloc(size * sizeof(double));
for(int i = 0; i < size; i++) {
array[i] = (double) rand() / RAND_MAX;
}
return array;
}
double diffclock(clock_t clock1, clock_t clock2)
{
double diffticks = clock1 - clock2;
double diffms = (diffticks) / (CLOCKS_PER_SEC / 1000);
return diffms;
}
int main(int argc, const char * argv[]) {
int nSamples = 100000;
int nBlocks = N_BLOCKS;
int nThreads = nSamples / nBlocks;
int sampleSize = SAMPLE_SIZE;
cout << "Threads: " << nThreads << endl;
cout << "Blocks: " << nBlocks << endl;
int sizeOfSampleArray = sampleSize * nSamples * sizeof(double);
int sizeOfOutput = nSamples * sizeof(double);
double *sample = generateRandomArray(nSamples * sampleSize);
double *deviceSample;
double *deviceOutput;
cudaMalloc((void **) &deviceSample, sizeOfSampleArray);
cudaMalloc((void **) &deviceOutput, sizeOfOutput);
cudaMemcpy(deviceSample, sample, sizeOfSampleArray, cudaMemcpyHostToDevice);
clock_t start = clock();
// Launch stddevPointer() kernel on GPU
stddevPointer<<<nBlocks,nThreads>>>(deviceSample, deviceOutput, sampleSize, sizeOfSampleArray);
clock_t end = clock();
double* output = (double*) malloc(sizeOfOutput);
cudaMemcpy(output, deviceOutput, sizeOfOutput, cudaMemcpyDeviceToHost);
for (int i = 0; i < nSamples; i++) {
cout << "Std.Dev. #" << i + 1 << ": " << output[i] << endl;
}
cout << "Took " << diffclock(end, start) << "ms" << endl;
free(sample);
free(output);
cudaFree(deviceSample);
cudaFree(deviceOutput);
return 0;
}
|
37bc0e33a224f6783e7ce979a56e6c1e4496a850.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
* @file rasterize.cu
* @brief CUDA-accelerated rasterization pipeline.
* @authors Skeleton code: Yining Karl Li, Kai Ninomiya, Shuai Shao (Shrek)
* @date 2012-2016
* @copyright University of Pennsylvania & STUDENT
*/
#include <cmath>
#include <cstdio>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <thrust/random.h>
#include <util/checkCUDAError.h>
#include <util/tiny_gltf_loader.h>
#include "rasterizeTools.h"
#include "rasterize.h"
#include <glm/gtc/quaternion.hpp>
#include <glm/gtc/matrix_transform.hpp>
#include <thrust/execution_policy.h>
#include "stream_compaction\efficient.h"
// A setting for toggling normal-based debug coloring of objects.
#define DEBUG_RENDER false
// A setting for toggling whether or not barycentric coordinates should properly
// color values across a primitive.
#define CORRECT_COLOR true
// A setting for toggling whether we should render only points.
#define RENDER_POINTS_ONLY false
// A setting for toggling whether we should render only lines.
#define RENDER_LINES_ONLY false
// A setting for toggling whether we should cull backfaces.
#define BACKFACE_CULLING true
namespace {
typedef unsigned short VertexIndex;
typedef glm::vec3 VertexAttributePosition;
typedef glm::vec3 VertexAttributeNormal;
typedef glm::vec2 VertexAttributeTexcoord;
typedef unsigned char TextureData;
typedef unsigned char BufferByte;
enum PrimitiveType{
Point = 1,
Line = 2,
Triangle = 3
};
struct VertexOut {
glm::vec4 pos;
// TODO: add new attributes to your VertexOut
// The attributes listed below might be useful,
// but always feel free to modify on your own
glm::vec3 eyePos; // eye space position used for shading
glm::vec3 eyeNor; // eye space normal used for shading, cuz normal will go wrong after perspective transformation
glm::vec3 col;
glm::vec2 texcoord0;
TextureData* dev_diffuseTex = NULL;
int texWidth, texHeight;
};
struct Primitive {
PrimitiveType primitiveType = Triangle; // C++ 11 init
VertexOut v[3];
bool isVisible;
};
struct Fragment {
glm::vec3 color;
// TODO: add new attributes to your Fragment
// The attributes listed below might be useful,
// but always feel free to modify on your own
// Pass through everything from VertexOut
glm::vec3 eyePos; // eye space position used for shading
glm::vec3 eyeNor; // eye space normal used for shading, cuz normal will go wrong after perspective transformation
glm::vec2 texcoord0;
TextureData* dev_diffuseTex = NULL;
int texWidth, texHeight;
};
struct PrimitiveDevBufPointers {
int primitiveMode; //from tinygltfloader macro
PrimitiveType primitiveType;
int numPrimitives;
int numIndices;
int numVertices;
// Vertex In, const after loaded
VertexIndex* dev_indices;
VertexAttributePosition* dev_position;
VertexAttributeNormal* dev_normal;
VertexAttributeTexcoord* dev_texcoord0;
// Materials, add more attributes when needed
TextureData* dev_diffuseTex;
int diffuseTexWidth;
int diffuseTexHeight;
// TextureData* dev_specularTex;
// TextureData* dev_normalTex;
// ...
// Vertex Out, vertex used for rasterization, this is changing every frame
VertexOut* dev_verticesOut;
// TODO: add more attributes when needed
};
}
static std::map<std::string, std::vector<PrimitiveDevBufPointers>> mesh2PrimitivesMap;
static int width = 0;
static int height = 0;
static int totalNumPrimitives = 0;
static Primitive *dev_primitives = NULL;
static Fragment *dev_fragmentBuffer = NULL;
static glm::vec3 *dev_framebuffer = NULL;
static int * dev_depth = NULL; // you might need this buffer when doing depth test
static int * dev_lock = NULL;
/**
* Kernel that writes the image to the OpenGL PBO directly.
*/
__global__
void sendImageToPBO(uchar4 *pbo, int w, int h, glm::vec3 *image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * w);
if (x < w && y < h) {
glm::vec3 color;
color.x = glm::clamp(image[index].x, 0.0f, 1.0f) * 255.0;
color.y = glm::clamp(image[index].y, 0.0f, 1.0f) * 255.0;
color.z = glm::clamp(image[index].z, 0.0f, 1.0f) * 255.0;
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
/**
* Writes fragment colors to the framebuffer
*/
__global__
void render(int w, int h, Fragment *fragmentBuffer, glm::vec3 *framebuffer) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * w);
// Make sure the fragment is within bounds.
if (x < w && y < h) {
// Retrieve the fragment's normal and send light from eyePos.
glm::vec3 surfaceNormal = glm::normalize(fragmentBuffer[index].eyeNor);
glm::vec3 lightDirection = glm::normalize(fragmentBuffer[index].eyePos);
// If this setting is enabled, shade using surface normals.
if (DEBUG_RENDER && !RENDER_POINTS_ONLY && !RENDER_LINES_ONLY) {
framebuffer[index] = fragmentBuffer[index].color
* glm::dot(surfaceNormal, lightDirection);
// If it has a texture...
} else if (!RENDER_POINTS_ONLY && !RENDER_LINES_ONLY
&& fragmentBuffer[index].dev_diffuseTex != NULL) {
// Retrieve texture coordinates and use texture to color it.
int u = (fragmentBuffer[index].texcoord0.x * fragmentBuffer[index].texWidth);
int v = (fragmentBuffer[index].texcoord0.y * fragmentBuffer[index].texHeight);
int texIndex = u + v * fragmentBuffer[index].texWidth;
float t0 = (float)fragmentBuffer[index].dev_diffuseTex[texIndex * 3 + 0];
float t1 = (float)fragmentBuffer[index].dev_diffuseTex[texIndex * 3 + 1];
float t2 = (float)fragmentBuffer[index].dev_diffuseTex[texIndex * 3 + 2];
framebuffer[index] = glm::vec3(t0 / 255.f, t1 / 255.f, t2 / 255.f)
* glm::dot(surfaceNormal, lightDirection);
// Otherwise something might be wrong.
// Just use default of surface normals..
} else {
framebuffer[index] = fragmentBuffer[index].color;
}
}
}
/**
* Called once at the beginning of the program to allocate memory.
*/
void rasterizeInit(int w, int h) {
width = w;
height = h;
hipFree(dev_fragmentBuffer);
hipMalloc(&dev_fragmentBuffer, width * height * sizeof(Fragment));
hipMemset(dev_fragmentBuffer, 0, width * height * sizeof(Fragment));
hipFree(dev_framebuffer);
hipMalloc(&dev_framebuffer, width * height * sizeof(glm::vec3));
hipMemset(dev_framebuffer, 0, width * height * sizeof(glm::vec3));
hipFree(dev_lock);
hipMalloc(&dev_lock, width * height * sizeof(int));
hipMemset(dev_lock, 0, width * height * sizeof(int));
hipFree(dev_depth);
hipMalloc(&dev_depth, width * height * sizeof(int));
checkCUDAError("rasterizeInit");
}
__global__
void initDepth(int w, int h, int * depth)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < w && y < h)
{
int index = x + (y * w);
depth[index] = INT_MAX;
}
}
/**
* kern function with support for stride to sometimes replace hipMemcpy
* One thread is responsible for copying one component
*/
__global__
void _deviceBufferCopy(int N, BufferByte* dev_dst, const BufferByte* dev_src, int n, int byteStride, int byteOffset, int componentTypeByteSize) {
// Attribute (vec3 position)
// component (3 * float)
// byte (4 * byte)
// id of component
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < N) {
int count = i / n;
int offset = i - count * n; // which component of the attribute
for (int j = 0; j < componentTypeByteSize; j++) {
dev_dst[count * componentTypeByteSize * n
+ offset * componentTypeByteSize
+ j]
=
dev_src[byteOffset
+ count * (byteStride == 0 ? componentTypeByteSize * n : byteStride)
+ offset * componentTypeByteSize
+ j];
}
}
}
__global__
void _nodeMatrixTransform(
int numVertices,
VertexAttributePosition* position,
VertexAttributeNormal* normal,
glm::mat4 MV, glm::mat3 MV_normal) {
// vertex id
int vid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (vid < numVertices) {
position[vid] = glm::vec3(MV * glm::vec4(position[vid], 1.0f));
normal[vid] = glm::normalize(MV_normal * normal[vid]);
}
}
glm::mat4 getMatrixFromNodeMatrixVector(const tinygltf::Node & n) {
glm::mat4 curMatrix(1.0);
const std::vector<double> &m = n.matrix;
if (m.size() > 0) {
// matrix, copy it
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
curMatrix[i][j] = (float)m.at(4 * i + j);
}
}
} else {
// no matrix, use rotation, scale, translation
if (n.translation.size() > 0) {
curMatrix[3][0] = n.translation[0];
curMatrix[3][1] = n.translation[1];
curMatrix[3][2] = n.translation[2];
}
if (n.rotation.size() > 0) {
glm::mat4 R;
glm::quat q;
q[0] = n.rotation[0];
q[1] = n.rotation[1];
q[2] = n.rotation[2];
R = glm::mat4_cast(q);
curMatrix = curMatrix * R;
}
if (n.scale.size() > 0) {
curMatrix = curMatrix * glm::scale(glm::vec3(n.scale[0], n.scale[1], n.scale[2]));
}
}
return curMatrix;
}
void traverseNode (
std::map<std::string, glm::mat4> & n2m,
const tinygltf::Scene & scene,
const std::string & nodeString,
const glm::mat4 & parentMatrix
)
{
const tinygltf::Node & n = scene.nodes.at(nodeString);
glm::mat4 M = parentMatrix * getMatrixFromNodeMatrixVector(n);
n2m.insert(std::pair<std::string, glm::mat4>(nodeString, M));
auto it = n.children.begin();
auto itEnd = n.children.end();
for (; it != itEnd; ++it) {
traverseNode(n2m, scene, *it, M);
}
}
void rasterizeSetBuffers(const tinygltf::Scene & scene) {
totalNumPrimitives = 0;
std::map<std::string, BufferByte*> bufferViewDevPointers;
// 1. copy all `bufferViews` to device memory
{
std::map<std::string, tinygltf::BufferView>::const_iterator it(
scene.bufferViews.begin());
std::map<std::string, tinygltf::BufferView>::const_iterator itEnd(
scene.bufferViews.end());
for (; it != itEnd; it++) {
const std::string key = it->first;
const tinygltf::BufferView &bufferView = it->second;
if (bufferView.target == 0) {
continue; // Unsupported bufferView.
}
const tinygltf::Buffer &buffer = scene.buffers.at(bufferView.buffer);
BufferByte* dev_bufferView;
hipMalloc(&dev_bufferView, bufferView.byteLength);
hipMemcpy(dev_bufferView, &buffer.data.front() + bufferView.byteOffset, bufferView.byteLength, hipMemcpyHostToDevice);
checkCUDAError("Set BufferView Device Mem");
bufferViewDevPointers.insert(std::make_pair(key, dev_bufferView));
}
}
// 2. for each mesh:
// for each primitive:
// build device buffer of indices, materail, and each attributes
// and store these pointers in a map
{
std::map<std::string, glm::mat4> nodeString2Matrix;
auto rootNodeNamesList = scene.scenes.at(scene.defaultScene);
{
auto it = rootNodeNamesList.begin();
auto itEnd = rootNodeNamesList.end();
for (; it != itEnd; ++it) {
traverseNode(nodeString2Matrix, scene, *it, glm::mat4(1.0f));
}
}
// parse through node to access mesh
auto itNode = nodeString2Matrix.begin();
auto itEndNode = nodeString2Matrix.end();
for (; itNode != itEndNode; ++itNode) {
const tinygltf::Node & N = scene.nodes.at(itNode->first);
const glm::mat4 & matrix = itNode->second;
const glm::mat3 & matrixNormal = glm::transpose(glm::inverse(glm::mat3(matrix)));
auto itMeshName = N.meshes.begin();
auto itEndMeshName = N.meshes.end();
for (; itMeshName != itEndMeshName; ++itMeshName) {
const tinygltf::Mesh & mesh = scene.meshes.at(*itMeshName);
auto res = mesh2PrimitivesMap.insert(std::pair<std::string, std::vector<PrimitiveDevBufPointers>>(mesh.name, std::vector<PrimitiveDevBufPointers>()));
std::vector<PrimitiveDevBufPointers> & primitiveVector = (res.first)->second;
// for each primitive
for (size_t i = 0; i < mesh.primitives.size(); i++) {
const tinygltf::Primitive &primitive = mesh.primitives[i];
if (primitive.indices.empty())
return;
// TODO: add new attributes for your PrimitiveDevBufPointers when you add new attributes
VertexIndex* dev_indices = NULL;
VertexAttributePosition* dev_position = NULL;
VertexAttributeNormal* dev_normal = NULL;
VertexAttributeTexcoord* dev_texcoord0 = NULL;
// ----------Indices-------------
const tinygltf::Accessor &indexAccessor = scene.accessors.at(primitive.indices);
const tinygltf::BufferView &bufferView = scene.bufferViews.at(indexAccessor.bufferView);
BufferByte* dev_bufferView = bufferViewDevPointers.at(indexAccessor.bufferView);
// assume type is SCALAR for indices
int n = 1;
int numIndices = indexAccessor.count;
int componentTypeByteSize = sizeof(VertexIndex);
int byteLength = numIndices * n * componentTypeByteSize;
dim3 numThreadsPerBlock(128);
dim3 numBlocks((numIndices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
hipMalloc(&dev_indices, byteLength);
_deviceBufferCopy << <numBlocks, numThreadsPerBlock >> > (
numIndices,
(BufferByte*)dev_indices,
dev_bufferView,
n,
indexAccessor.byteStride,
indexAccessor.byteOffset,
componentTypeByteSize);
checkCUDAError("Set Index Buffer");
// ---------Primitive Info-------
// Warning: LINE_STRIP is not supported in tinygltfloader
int numPrimitives;
PrimitiveType primitiveType;
switch (primitive.mode) {
case TINYGLTF_MODE_TRIANGLES:
primitiveType = PrimitiveType::Triangle;
numPrimitives = numIndices / 3;
break;
case TINYGLTF_MODE_TRIANGLE_STRIP:
primitiveType = PrimitiveType::Triangle;
numPrimitives = numIndices - 2;
break;
case TINYGLTF_MODE_TRIANGLE_FAN:
primitiveType = PrimitiveType::Triangle;
numPrimitives = numIndices - 2;
break;
case TINYGLTF_MODE_LINE:
primitiveType = PrimitiveType::Line;
numPrimitives = numIndices / 2;
break;
case TINYGLTF_MODE_LINE_LOOP:
primitiveType = PrimitiveType::Line;
numPrimitives = numIndices + 1;
break;
case TINYGLTF_MODE_POINTS:
primitiveType = PrimitiveType::Point;
numPrimitives = numIndices;
break;
default:
// output error
break;
};
// ----------Attributes-------------
auto it(primitive.attributes.begin());
auto itEnd(primitive.attributes.end());
int numVertices = 0;
// for each attribute
for (; it != itEnd; it++) {
const tinygltf::Accessor &accessor = scene.accessors.at(it->second);
const tinygltf::BufferView &bufferView = scene.bufferViews.at(accessor.bufferView);
int n = 1;
if (accessor.type == TINYGLTF_TYPE_SCALAR) {
n = 1;
}
else if (accessor.type == TINYGLTF_TYPE_VEC2) {
n = 2;
}
else if (accessor.type == TINYGLTF_TYPE_VEC3) {
n = 3;
}
else if (accessor.type == TINYGLTF_TYPE_VEC4) {
n = 4;
}
BufferByte * dev_bufferView = bufferViewDevPointers.at(accessor.bufferView);
BufferByte ** dev_attribute = NULL;
numVertices = accessor.count;
int componentTypeByteSize;
// Note: since the type of our attribute array (dev_position) is static (float32)
// We assume the glTF model attribute type are 5126(FLOAT) here
if (it->first.compare("POSITION") == 0) {
componentTypeByteSize = sizeof(VertexAttributePosition) / n;
dev_attribute = (BufferByte**)&dev_position;
}
else if (it->first.compare("NORMAL") == 0) {
componentTypeByteSize = sizeof(VertexAttributeNormal) / n;
dev_attribute = (BufferByte**)&dev_normal;
}
else if (it->first.compare("TEXCOORD_0") == 0) {
componentTypeByteSize = sizeof(VertexAttributeTexcoord) / n;
dev_attribute = (BufferByte**)&dev_texcoord0;
}
std::cout << accessor.bufferView << " - " << it->second << " - " << it->first << '\n';
dim3 numThreadsPerBlock(128);
dim3 numBlocks((n * numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
int byteLength = numVertices * n * componentTypeByteSize;
hipMalloc(dev_attribute, byteLength);
_deviceBufferCopy << <numBlocks, numThreadsPerBlock >> > (
n * numVertices,
*dev_attribute,
dev_bufferView,
n,
accessor.byteStride,
accessor.byteOffset,
componentTypeByteSize);
std::string msg = "Set Attribute Buffer: " + it->first;
checkCUDAError(msg.c_str());
}
// malloc for VertexOut
VertexOut* dev_vertexOut;
hipMalloc(&dev_vertexOut, numVertices * sizeof(VertexOut));
checkCUDAError("Malloc VertexOut Buffer");
// ----------Materials-------------
// You can only worry about this part once you started to
// implement textures for your rasterizer
TextureData* dev_diffuseTex = NULL;
int diffuseTexWidth = 0;
int diffuseTexHeight = 0;
if (!primitive.material.empty()) {
const tinygltf::Material &mat = scene.materials.at(primitive.material);
printf("material.name = %s\n", mat.name.c_str());
if (mat.values.find("diffuse") != mat.values.end()) {
std::string diffuseTexName = mat.values.at("diffuse").string_value;
if (scene.textures.find(diffuseTexName) != scene.textures.end()) {
const tinygltf::Texture &tex = scene.textures.at(diffuseTexName);
if (scene.images.find(tex.source) != scene.images.end()) {
const tinygltf::Image &image = scene.images.at(tex.source);
size_t s = image.image.size() * sizeof(TextureData);
hipMalloc(&dev_diffuseTex, s);
hipMemcpy(dev_diffuseTex, &image.image.at(0), s, hipMemcpyHostToDevice);
diffuseTexWidth = image.width;
diffuseTexHeight = image.height;
checkCUDAError("Set Texture Image data");
}
}
}
// TODO: write your code for other materails
// You may have to take a look at tinygltfloader
// You can also use the above code loading diffuse material as a start point
}
// ---------Node hierarchy transform--------
hipDeviceSynchronize();
dim3 numBlocksNodeTransform((numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
_nodeMatrixTransform << <numBlocksNodeTransform, numThreadsPerBlock >> > (
numVertices,
dev_position,
dev_normal,
matrix,
matrixNormal);
checkCUDAError("Node hierarchy transformation");
// at the end of the for loop of primitive
// push dev pointers to map
primitiveVector.push_back(PrimitiveDevBufPointers{
primitive.mode,
primitiveType,
numPrimitives,
numIndices,
numVertices,
dev_indices,
dev_position,
dev_normal,
dev_texcoord0,
dev_diffuseTex,
diffuseTexWidth,
diffuseTexHeight,
dev_vertexOut //VertexOut
});
totalNumPrimitives += numPrimitives;
} // for each primitive
} // for each mesh
} // for each node
}
// 3. Malloc for dev_primitives
{
hipMalloc(&dev_primitives, totalNumPrimitives * sizeof(Primitive));
}
// Finally, hipFree raw dev_bufferViews
{
std::map<std::string, BufferByte*>::const_iterator it(bufferViewDevPointers.begin());
std::map<std::string, BufferByte*>::const_iterator itEnd(bufferViewDevPointers.end());
//bufferViewDevPointers
for (; it != itEnd; it++) {
hipFree(it->second);
}
checkCUDAError("Free BufferView Device Mem");
}
}
__global__
void _vertexTransformAndAssembly(
int numVertices,
PrimitiveDevBufPointers primitive,
glm::mat4 MVP, glm::mat4 MV, glm::mat3 MV_normal,
int width, int height) {
// vertex id
int vid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (vid < numVertices) {
// TODO: Apply vertex transformation here
// Multiply the MVP matrix for each vertex position, this will transform everything into clipping space
// Then divide the pos by its w element to transform into NDC space
// Finally transform x and y to viewport space
glm::vec3 position = primitive.dev_position[vid];
glm::vec3 normal = primitive.dev_normal[vid];
glm::vec4 homogenous = glm::vec4(position.x, position.y, position.z, 1);
glm::vec4 projectionSpace = (MV * homogenous);
glm::vec4 clippingSpace = (MVP * homogenous);
clippingSpace = (clippingSpace / clippingSpace.w);
clippingSpace.x = 0.5f * (float)width * (clippingSpace.x + 1);
clippingSpace.y = 0.5f * (float)height * (-clippingSpace.y + 1);
// TODO: Apply vertex assembly here
// Assemble all attribute arraies into the primitive array
// Pack positions, normals, etc. into VertexOut.
primitive.dev_verticesOut[vid].pos = clippingSpace;
primitive.dev_verticesOut[vid].eyeNor = MV_normal * normal;
primitive.dev_verticesOut[vid].col = glm::vec3();
primitive.dev_verticesOut[vid].dev_diffuseTex = primitive.dev_diffuseTex;
primitive.dev_verticesOut[vid].eyePos = glm::vec3(projectionSpace.x,
projectionSpace.y, projectionSpace.z);
primitive.dev_verticesOut[vid].texcoord0 = primitive.dev_texcoord0[vid];
primitive.dev_verticesOut[vid].texHeight = primitive.diffuseTexHeight;
primitive.dev_verticesOut[vid].texWidth = primitive.diffuseTexWidth;
}
}
static int curPrimitiveBeginId = 0;
__global__
void _primitiveAssembly(int numIndices, int curPrimitiveBeginId, Primitive* dev_primitives, PrimitiveDevBufPointers primitive) {
// index id
int iid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (iid < numIndices) {
// TODO: uncomment the following code for a start
// This is primitive assembly for triangles
int pid; // id for cur primitives vector
if (primitive.primitiveMode == TINYGLTF_MODE_TRIANGLES) {
pid = iid / (int)primitive.primitiveType;
dev_primitives[pid + curPrimitiveBeginId].v[iid % (int)primitive.primitiveType]
= primitive.dev_verticesOut[primitive.dev_indices[iid]];
}
// TODO: other primitive types (point, line)
if (primitive.primitiveMode == TINYGLTF_MODE_POINTS) {
pid = iid / (int)primitive.primitiveType;
dev_primitives[pid + curPrimitiveBeginId].v[iid % (int)primitive.primitiveType]
= primitive.dev_verticesOut[primitive.dev_indices[iid]];
}
if (primitive.primitiveMode == TINYGLTF_MODE_LINE) {
pid = iid / (int)primitive.primitiveType;
dev_primitives[pid + curPrimitiveBeginId].v[iid % (int)primitive.primitiveType]
= primitive.dev_verticesOut[primitive.dev_indices[iid]];
}
// From Wikipedia:
// One method of implementing back-face culling is by discarding all triangles
// where the dot product of their surface normal and the camera-to-triangle vector
// is greater than or equal to zero. Assume the camera looks down z-axis.
dev_primitives[pid + curPrimitiveBeginId].isVisible =
(glm::dot(primitive.dev_verticesOut[primitive.dev_indices[iid]].eyeNor,
glm::vec3(0, 0, 1)) < 0);
}
}
__global__
void kernRasterize(int totalNumPrimitives, Primitive* dev_primitives,
Fragment* dev_fragmentBuffer, int* dev_depth, int* dev_lock,
int width, int height) {
// Get the index for this specific primitive and make sure it is valid.
int pid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pid < totalNumPrimitives) {
// Get the vertex data for the the triangle.
VertexOut first = dev_primitives[pid].v[0];
VertexOut second = dev_primitives[pid].v[1];
VertexOut third = dev_primitives[pid].v[2];
// Define the triangle as required by rasterizeTools.h helpers.
glm::vec3 firstPos3 = glm::vec3(first.pos.x, first.pos.y, first.pos.z);
glm::vec3 secondPos3 = glm::vec3(second.pos.x, second.pos.y, second.pos.z);
glm::vec3 thirdPos3 = glm::vec3(third.pos.x, third.pos.y, third.pos.z);
glm::vec3 tri[3] = { firstPos3, secondPos3, thirdPos3 };
// Check if points should be rendered.
if (RENDER_POINTS_ONLY) {
int x1 = firstPos3.x;
int x2 = secondPos3.x;
int x3 = thirdPos3.x;
int y1 = firstPos3.y;
int y2 = secondPos3.y;
int y3 = thirdPos3.y;
int indexFirst = x1 + (width * y1);
int indexSecond = x2 + (width * y2);
int indexThird = x3 + (width * y3);
dev_fragmentBuffer[indexFirst].color = first.eyeNor;
dev_fragmentBuffer[indexSecond].color = second.eyeNor;
dev_fragmentBuffer[indexThird].color = third.eyeNor;
// Check if lines should be rendered.
} else if (RENDER_LINES_ONLY) {
// Find edge length, slope, and direction.
float lengthX = secondPos3.x - firstPos3.x;
float lengthY = secondPos3.y - firstPos3.y;
float slope = glm::abs(lengthY / lengthX);
int xDir = 1;
if (lengthX < 0) {
xDir = -1;
}
int yDir = 1;
if (lengthY < 0) {
yDir = -1;
}
// Draw the edge. Triangles take shape because of varied
// interleavings, just need this one.
for (int i = 0; i < glm::abs(lengthX); i++) {
int u = (int)firstPos3.x + (i * xDir);
int v = (int)firstPos3.y + (i * yDir * slope);
int indexFirst = u + (width * v);
dev_fragmentBuffer[indexFirst].color = first.eyeNor;
}
// Render triangles.
} else {
// Find the bounding box of this triangle.
AABB bounds = getAABBForTriangle(tri);
// Scan bounding box for triangle to rasterize.
for (int x = bounds.min.x; x <= bounds.max.x; x++) {
for (int y = bounds.min.y; y <= bounds.max.y; y++) {
// Get barycentric coordinates.
glm::vec3 barycentric = calculateBarycentricCoordinate(tri, glm::vec2(x, y));
// Only fill coordinate if it's in the triangle.
bool inTriangle = isBarycentricCoordInBounds(barycentric);
if (inTriangle) {
// Attempt to lock this pixel.
int index = x + (width * y);
int lockValue = -1;
while (lockValue != 0) {
lockValue = atomicCAS(&dev_lock[index], 0, 1);
// When the pixel is locked, actually rasterize the triangle.
if (lockValue == 0) {
// Decide which fragment to write into buffer when multiple fragments overlap in pixel
// coordinates. Need atomic operation "atomicMin(&depth[idx], depth)"
float depth = 1000 * getZAtCoordinate(barycentric, tri);
if (dev_depth[index] >= depth) {
dev_depth[index] = depth;
// Update the fragment to pass on through the rasterizer.
if (CORRECT_COLOR) {
dev_fragmentBuffer[index].color = barycentric.x * first.eyeNor
+ barycentric.y * second.eyeNor + barycentric.z * third.eyeNor;
} else {
dev_fragmentBuffer[index].color = first.eyeNor;
}
dev_fragmentBuffer[index].dev_diffuseTex = first.dev_diffuseTex;
dev_fragmentBuffer[index].eyeNor = barycentric.x * first.eyeNor
+ barycentric.y * second.eyeNor + barycentric.z * third.eyeNor;
dev_fragmentBuffer[index].eyePos = barycentric.x * first.eyePos
+ barycentric.y * second.eyePos + barycentric.z * third.eyePos;
dev_fragmentBuffer[index].texcoord0 = barycentric.x * first.texcoord0
+ barycentric.y * second.texcoord0 + barycentric.z * third.texcoord0;
dev_fragmentBuffer[index].texWidth = first.texWidth;
dev_fragmentBuffer[index].texHeight = first.texHeight;
}
// Unlock the pixel.
dev_lock[index] = 0;
}
}
}
}
}
}
}
}
/**
* A predicate for the stream compaction which helps cull backfaces by partitioning
* for faces which are visible. Primitive visibility is easily checked when they're
* assembled, this predicate is just retrieving a flag.
*/
struct isVisible {
__host__ __device__
bool operator() (const Primitive primitive) {
return (primitive.isVisible);
}
};
/**
* Perform rasterization.
*/
static int iterations = 0;
void rasterize(uchar4 *pbo, const glm::mat4 & MVP, const glm::mat4 & MV, const glm::mat3 MV_normal) {
int sideLength2d = 8;
dim3 blockSize2d(sideLength2d, sideLength2d);
dim3 blockCount2d((width - 1) / blockSize2d.x + 1,
(height - 1) / blockSize2d.y + 1);
// Execute your rasterization pipeline here
// (See README for rasterization pipeline outline.)
// Vertex Process & primitive assembly
{
curPrimitiveBeginId = 0;
dim3 numThreadsPerBlock(128);
auto it = mesh2PrimitivesMap.begin();
auto itEnd = mesh2PrimitivesMap.end();
for (; it != itEnd; ++it) {
auto p = (it->second).begin(); // each primitive
auto pEnd = (it->second).end();
for (; p != pEnd; ++p) {
dim3 numBlocksForVertices((p->numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
dim3 numBlocksForIndices((p->numIndices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
_vertexTransformAndAssembly << < numBlocksForVertices, numThreadsPerBlock >> >(p->numVertices, *p, MVP, MV, MV_normal, width, height);
checkCUDAError("Vertex Processing");
hipDeviceSynchronize();
_primitiveAssembly << < numBlocksForIndices, numThreadsPerBlock >> >
(p->numIndices,
curPrimitiveBeginId,
dev_primitives,
*p);
checkCUDAError("Primitive Assembly");
curPrimitiveBeginId += p->numPrimitives;
}
}
checkCUDAError("Vertex Processing and Primitive Assembly");
}
hipMemset(dev_fragmentBuffer, 0, width * height * sizeof(Fragment));
initDepth << <blockCount2d, blockSize2d >> >(width, height, dev_depth);
// TODO: rasterize
// Parallelize by primitive (triangle). See totalNumPrimitives.
// If culling backfaces, update the primitives and totalNum.
// Taking the same approach to partition as in HW3.
StreamCompaction::Common::PerformanceTimer timer;
float rasterizationTime = 0;
if (BACKFACE_CULLING && !RENDER_POINTS_ONLY && !RENDER_LINES_ONLY) {
// Record the rasterization stage time.
timer.startGpuTimer();
// Cull the invisible faces.
Primitive* dev_visible_end = thrust::partition(thrust::device, dev_primitives,
dev_primitives + totalNumPrimitives, isVisible());
int totalVisiblePrimitives = dev_visible_end - dev_primitives;
// Rasterize.
dim3 numThreadsPerBlock(128);
dim3 numBlocksForPrimitives((totalVisiblePrimitives + numThreadsPerBlock.x - 1)
/ numThreadsPerBlock.x);
hipLaunchKernelGGL(( kernRasterize), dim3(numBlocksForPrimitives), dim3(numThreadsPerBlock), 0, 0,
totalVisiblePrimitives, dev_primitives, dev_fragmentBuffer, dev_depth,
dev_lock, width, height);
timer.endGpuTimer();
rasterizationTime += timer.getGpuElapsedTimeForPreviousOperation();
} else {
// Record the rasterization stage time.
timer.startGpuTimer();
// Rasterize.
dim3 numThreadsPerBlock(128);
dim3 numBlocksForPrimitives((totalNumPrimitives + numThreadsPerBlock.x - 1)
/ numThreadsPerBlock.x);
hipLaunchKernelGGL(( kernRasterize), dim3(numBlocksForPrimitives), dim3(numThreadsPerBlock), 0, 0,
totalNumPrimitives, dev_primitives, dev_fragmentBuffer, dev_depth,
dev_lock, width, height);
timer.endGpuTimer();
rasterizationTime += timer.getGpuElapsedTimeForPreviousOperation();
}
iterations++;
if (iterations % 600 == 0) {
printf("Rasterization time : %fms\n", (rasterizationTime / 600.f));
}
// Copy depthbuffer colors into framebuffer
render << <blockCount2d, blockSize2d >> >(width, height, dev_fragmentBuffer, dev_framebuffer);
checkCUDAError("fragment shader");
// Copy framebuffer into OpenGL buffer for OpenGL previewing
hipLaunchKernelGGL(( sendImageToPBO), dim3(blockCount2d), dim3(blockSize2d), 0, 0, pbo, width, height, dev_framebuffer);
checkCUDAError("copy render result to pbo");
}
/**
* Called once at the end of the program to free CUDA memory.
*/
void rasterizeFree() {
// deconstruct primitives attribute/indices device buffer
auto it(mesh2PrimitivesMap.begin());
auto itEnd(mesh2PrimitivesMap.end());
for (; it != itEnd; ++it) {
for (auto p = it->second.begin(); p != it->second.end(); ++p) {
hipFree(p->dev_indices);
hipFree(p->dev_position);
hipFree(p->dev_normal);
hipFree(p->dev_texcoord0);
hipFree(p->dev_diffuseTex);
hipFree(p->dev_verticesOut);
//TODO: release other attributes and materials
}
}
////////////
hipFree(dev_primitives);
dev_primitives = NULL;
hipFree(dev_fragmentBuffer);
dev_fragmentBuffer = NULL;
hipFree(dev_framebuffer);
dev_framebuffer = NULL;
hipFree(dev_lock);
dev_lock = NULL;
hipFree(dev_depth);
dev_depth = NULL;
checkCUDAError("rasterize Free");
}
|
37bc0e33a224f6783e7ce979a56e6c1e4496a850.cu
|
/**
* @file rasterize.cu
* @brief CUDA-accelerated rasterization pipeline.
* @authors Skeleton code: Yining Karl Li, Kai Ninomiya, Shuai Shao (Shrek)
* @date 2012-2016
* @copyright University of Pennsylvania & STUDENT
*/
#include <cmath>
#include <cstdio>
#include <cuda.h>
#include <cuda_runtime.h>
#include <thrust/random.h>
#include <util/checkCUDAError.h>
#include <util/tiny_gltf_loader.h>
#include "rasterizeTools.h"
#include "rasterize.h"
#include <glm/gtc/quaternion.hpp>
#include <glm/gtc/matrix_transform.hpp>
#include <thrust/execution_policy.h>
#include "stream_compaction\efficient.h"
// A setting for toggling normal-based debug coloring of objects.
#define DEBUG_RENDER false
// A setting for toggling whether or not barycentric coordinates should properly
// color values across a primitive.
#define CORRECT_COLOR true
// A setting for toggling whether we should render only points.
#define RENDER_POINTS_ONLY false
// A setting for toggling whether we should render only lines.
#define RENDER_LINES_ONLY false
// A setting for toggling whether we should cull backfaces.
#define BACKFACE_CULLING true
namespace {
typedef unsigned short VertexIndex;
typedef glm::vec3 VertexAttributePosition;
typedef glm::vec3 VertexAttributeNormal;
typedef glm::vec2 VertexAttributeTexcoord;
typedef unsigned char TextureData;
typedef unsigned char BufferByte;
enum PrimitiveType{
Point = 1,
Line = 2,
Triangle = 3
};
struct VertexOut {
glm::vec4 pos;
// TODO: add new attributes to your VertexOut
// The attributes listed below might be useful,
// but always feel free to modify on your own
glm::vec3 eyePos; // eye space position used for shading
glm::vec3 eyeNor; // eye space normal used for shading, cuz normal will go wrong after perspective transformation
glm::vec3 col;
glm::vec2 texcoord0;
TextureData* dev_diffuseTex = NULL;
int texWidth, texHeight;
};
struct Primitive {
PrimitiveType primitiveType = Triangle; // C++ 11 init
VertexOut v[3];
bool isVisible;
};
struct Fragment {
glm::vec3 color;
// TODO: add new attributes to your Fragment
// The attributes listed below might be useful,
// but always feel free to modify on your own
// Pass through everything from VertexOut
glm::vec3 eyePos; // eye space position used for shading
glm::vec3 eyeNor; // eye space normal used for shading, cuz normal will go wrong after perspective transformation
glm::vec2 texcoord0;
TextureData* dev_diffuseTex = NULL;
int texWidth, texHeight;
};
struct PrimitiveDevBufPointers {
int primitiveMode; //from tinygltfloader macro
PrimitiveType primitiveType;
int numPrimitives;
int numIndices;
int numVertices;
// Vertex In, const after loaded
VertexIndex* dev_indices;
VertexAttributePosition* dev_position;
VertexAttributeNormal* dev_normal;
VertexAttributeTexcoord* dev_texcoord0;
// Materials, add more attributes when needed
TextureData* dev_diffuseTex;
int diffuseTexWidth;
int diffuseTexHeight;
// TextureData* dev_specularTex;
// TextureData* dev_normalTex;
// ...
// Vertex Out, vertex used for rasterization, this is changing every frame
VertexOut* dev_verticesOut;
// TODO: add more attributes when needed
};
}
static std::map<std::string, std::vector<PrimitiveDevBufPointers>> mesh2PrimitivesMap;
static int width = 0;
static int height = 0;
static int totalNumPrimitives = 0;
static Primitive *dev_primitives = NULL;
static Fragment *dev_fragmentBuffer = NULL;
static glm::vec3 *dev_framebuffer = NULL;
static int * dev_depth = NULL; // you might need this buffer when doing depth test
static int * dev_lock = NULL;
/**
* Kernel that writes the image to the OpenGL PBO directly.
*/
__global__
void sendImageToPBO(uchar4 *pbo, int w, int h, glm::vec3 *image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * w);
if (x < w && y < h) {
glm::vec3 color;
color.x = glm::clamp(image[index].x, 0.0f, 1.0f) * 255.0;
color.y = glm::clamp(image[index].y, 0.0f, 1.0f) * 255.0;
color.z = glm::clamp(image[index].z, 0.0f, 1.0f) * 255.0;
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
/**
* Writes fragment colors to the framebuffer
*/
__global__
void render(int w, int h, Fragment *fragmentBuffer, glm::vec3 *framebuffer) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * w);
// Make sure the fragment is within bounds.
if (x < w && y < h) {
// Retrieve the fragment's normal and send light from eyePos.
glm::vec3 surfaceNormal = glm::normalize(fragmentBuffer[index].eyeNor);
glm::vec3 lightDirection = glm::normalize(fragmentBuffer[index].eyePos);
// If this setting is enabled, shade using surface normals.
if (DEBUG_RENDER && !RENDER_POINTS_ONLY && !RENDER_LINES_ONLY) {
framebuffer[index] = fragmentBuffer[index].color
* glm::dot(surfaceNormal, lightDirection);
// If it has a texture...
} else if (!RENDER_POINTS_ONLY && !RENDER_LINES_ONLY
&& fragmentBuffer[index].dev_diffuseTex != NULL) {
// Retrieve texture coordinates and use texture to color it.
int u = (fragmentBuffer[index].texcoord0.x * fragmentBuffer[index].texWidth);
int v = (fragmentBuffer[index].texcoord0.y * fragmentBuffer[index].texHeight);
int texIndex = u + v * fragmentBuffer[index].texWidth;
float t0 = (float)fragmentBuffer[index].dev_diffuseTex[texIndex * 3 + 0];
float t1 = (float)fragmentBuffer[index].dev_diffuseTex[texIndex * 3 + 1];
float t2 = (float)fragmentBuffer[index].dev_diffuseTex[texIndex * 3 + 2];
framebuffer[index] = glm::vec3(t0 / 255.f, t1 / 255.f, t2 / 255.f)
* glm::dot(surfaceNormal, lightDirection);
// Otherwise something might be wrong.
// Just use default of surface normals..
} else {
framebuffer[index] = fragmentBuffer[index].color;
}
}
}
/**
* Called once at the beginning of the program to allocate memory.
*/
void rasterizeInit(int w, int h) {
width = w;
height = h;
cudaFree(dev_fragmentBuffer);
cudaMalloc(&dev_fragmentBuffer, width * height * sizeof(Fragment));
cudaMemset(dev_fragmentBuffer, 0, width * height * sizeof(Fragment));
cudaFree(dev_framebuffer);
cudaMalloc(&dev_framebuffer, width * height * sizeof(glm::vec3));
cudaMemset(dev_framebuffer, 0, width * height * sizeof(glm::vec3));
cudaFree(dev_lock);
cudaMalloc(&dev_lock, width * height * sizeof(int));
cudaMemset(dev_lock, 0, width * height * sizeof(int));
cudaFree(dev_depth);
cudaMalloc(&dev_depth, width * height * sizeof(int));
checkCUDAError("rasterizeInit");
}
__global__
void initDepth(int w, int h, int * depth)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < w && y < h)
{
int index = x + (y * w);
depth[index] = INT_MAX;
}
}
/**
* kern function with support for stride to sometimes replace cudaMemcpy
* One thread is responsible for copying one component
*/
__global__
void _deviceBufferCopy(int N, BufferByte* dev_dst, const BufferByte* dev_src, int n, int byteStride, int byteOffset, int componentTypeByteSize) {
// Attribute (vec3 position)
// component (3 * float)
// byte (4 * byte)
// id of component
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < N) {
int count = i / n;
int offset = i - count * n; // which component of the attribute
for (int j = 0; j < componentTypeByteSize; j++) {
dev_dst[count * componentTypeByteSize * n
+ offset * componentTypeByteSize
+ j]
=
dev_src[byteOffset
+ count * (byteStride == 0 ? componentTypeByteSize * n : byteStride)
+ offset * componentTypeByteSize
+ j];
}
}
}
__global__
void _nodeMatrixTransform(
int numVertices,
VertexAttributePosition* position,
VertexAttributeNormal* normal,
glm::mat4 MV, glm::mat3 MV_normal) {
// vertex id
int vid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (vid < numVertices) {
position[vid] = glm::vec3(MV * glm::vec4(position[vid], 1.0f));
normal[vid] = glm::normalize(MV_normal * normal[vid]);
}
}
glm::mat4 getMatrixFromNodeMatrixVector(const tinygltf::Node & n) {
glm::mat4 curMatrix(1.0);
const std::vector<double> &m = n.matrix;
if (m.size() > 0) {
// matrix, copy it
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
curMatrix[i][j] = (float)m.at(4 * i + j);
}
}
} else {
// no matrix, use rotation, scale, translation
if (n.translation.size() > 0) {
curMatrix[3][0] = n.translation[0];
curMatrix[3][1] = n.translation[1];
curMatrix[3][2] = n.translation[2];
}
if (n.rotation.size() > 0) {
glm::mat4 R;
glm::quat q;
q[0] = n.rotation[0];
q[1] = n.rotation[1];
q[2] = n.rotation[2];
R = glm::mat4_cast(q);
curMatrix = curMatrix * R;
}
if (n.scale.size() > 0) {
curMatrix = curMatrix * glm::scale(glm::vec3(n.scale[0], n.scale[1], n.scale[2]));
}
}
return curMatrix;
}
void traverseNode (
std::map<std::string, glm::mat4> & n2m,
const tinygltf::Scene & scene,
const std::string & nodeString,
const glm::mat4 & parentMatrix
)
{
const tinygltf::Node & n = scene.nodes.at(nodeString);
glm::mat4 M = parentMatrix * getMatrixFromNodeMatrixVector(n);
n2m.insert(std::pair<std::string, glm::mat4>(nodeString, M));
auto it = n.children.begin();
auto itEnd = n.children.end();
for (; it != itEnd; ++it) {
traverseNode(n2m, scene, *it, M);
}
}
void rasterizeSetBuffers(const tinygltf::Scene & scene) {
totalNumPrimitives = 0;
std::map<std::string, BufferByte*> bufferViewDevPointers;
// 1. copy all `bufferViews` to device memory
{
std::map<std::string, tinygltf::BufferView>::const_iterator it(
scene.bufferViews.begin());
std::map<std::string, tinygltf::BufferView>::const_iterator itEnd(
scene.bufferViews.end());
for (; it != itEnd; it++) {
const std::string key = it->first;
const tinygltf::BufferView &bufferView = it->second;
if (bufferView.target == 0) {
continue; // Unsupported bufferView.
}
const tinygltf::Buffer &buffer = scene.buffers.at(bufferView.buffer);
BufferByte* dev_bufferView;
cudaMalloc(&dev_bufferView, bufferView.byteLength);
cudaMemcpy(dev_bufferView, &buffer.data.front() + bufferView.byteOffset, bufferView.byteLength, cudaMemcpyHostToDevice);
checkCUDAError("Set BufferView Device Mem");
bufferViewDevPointers.insert(std::make_pair(key, dev_bufferView));
}
}
// 2. for each mesh:
// for each primitive:
// build device buffer of indices, materail, and each attributes
// and store these pointers in a map
{
std::map<std::string, glm::mat4> nodeString2Matrix;
auto rootNodeNamesList = scene.scenes.at(scene.defaultScene);
{
auto it = rootNodeNamesList.begin();
auto itEnd = rootNodeNamesList.end();
for (; it != itEnd; ++it) {
traverseNode(nodeString2Matrix, scene, *it, glm::mat4(1.0f));
}
}
// parse through node to access mesh
auto itNode = nodeString2Matrix.begin();
auto itEndNode = nodeString2Matrix.end();
for (; itNode != itEndNode; ++itNode) {
const tinygltf::Node & N = scene.nodes.at(itNode->first);
const glm::mat4 & matrix = itNode->second;
const glm::mat3 & matrixNormal = glm::transpose(glm::inverse(glm::mat3(matrix)));
auto itMeshName = N.meshes.begin();
auto itEndMeshName = N.meshes.end();
for (; itMeshName != itEndMeshName; ++itMeshName) {
const tinygltf::Mesh & mesh = scene.meshes.at(*itMeshName);
auto res = mesh2PrimitivesMap.insert(std::pair<std::string, std::vector<PrimitiveDevBufPointers>>(mesh.name, std::vector<PrimitiveDevBufPointers>()));
std::vector<PrimitiveDevBufPointers> & primitiveVector = (res.first)->second;
// for each primitive
for (size_t i = 0; i < mesh.primitives.size(); i++) {
const tinygltf::Primitive &primitive = mesh.primitives[i];
if (primitive.indices.empty())
return;
// TODO: add new attributes for your PrimitiveDevBufPointers when you add new attributes
VertexIndex* dev_indices = NULL;
VertexAttributePosition* dev_position = NULL;
VertexAttributeNormal* dev_normal = NULL;
VertexAttributeTexcoord* dev_texcoord0 = NULL;
// ----------Indices-------------
const tinygltf::Accessor &indexAccessor = scene.accessors.at(primitive.indices);
const tinygltf::BufferView &bufferView = scene.bufferViews.at(indexAccessor.bufferView);
BufferByte* dev_bufferView = bufferViewDevPointers.at(indexAccessor.bufferView);
// assume type is SCALAR for indices
int n = 1;
int numIndices = indexAccessor.count;
int componentTypeByteSize = sizeof(VertexIndex);
int byteLength = numIndices * n * componentTypeByteSize;
dim3 numThreadsPerBlock(128);
dim3 numBlocks((numIndices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
cudaMalloc(&dev_indices, byteLength);
_deviceBufferCopy << <numBlocks, numThreadsPerBlock >> > (
numIndices,
(BufferByte*)dev_indices,
dev_bufferView,
n,
indexAccessor.byteStride,
indexAccessor.byteOffset,
componentTypeByteSize);
checkCUDAError("Set Index Buffer");
// ---------Primitive Info-------
// Warning: LINE_STRIP is not supported in tinygltfloader
int numPrimitives;
PrimitiveType primitiveType;
switch (primitive.mode) {
case TINYGLTF_MODE_TRIANGLES:
primitiveType = PrimitiveType::Triangle;
numPrimitives = numIndices / 3;
break;
case TINYGLTF_MODE_TRIANGLE_STRIP:
primitiveType = PrimitiveType::Triangle;
numPrimitives = numIndices - 2;
break;
case TINYGLTF_MODE_TRIANGLE_FAN:
primitiveType = PrimitiveType::Triangle;
numPrimitives = numIndices - 2;
break;
case TINYGLTF_MODE_LINE:
primitiveType = PrimitiveType::Line;
numPrimitives = numIndices / 2;
break;
case TINYGLTF_MODE_LINE_LOOP:
primitiveType = PrimitiveType::Line;
numPrimitives = numIndices + 1;
break;
case TINYGLTF_MODE_POINTS:
primitiveType = PrimitiveType::Point;
numPrimitives = numIndices;
break;
default:
// output error
break;
};
// ----------Attributes-------------
auto it(primitive.attributes.begin());
auto itEnd(primitive.attributes.end());
int numVertices = 0;
// for each attribute
for (; it != itEnd; it++) {
const tinygltf::Accessor &accessor = scene.accessors.at(it->second);
const tinygltf::BufferView &bufferView = scene.bufferViews.at(accessor.bufferView);
int n = 1;
if (accessor.type == TINYGLTF_TYPE_SCALAR) {
n = 1;
}
else if (accessor.type == TINYGLTF_TYPE_VEC2) {
n = 2;
}
else if (accessor.type == TINYGLTF_TYPE_VEC3) {
n = 3;
}
else if (accessor.type == TINYGLTF_TYPE_VEC4) {
n = 4;
}
BufferByte * dev_bufferView = bufferViewDevPointers.at(accessor.bufferView);
BufferByte ** dev_attribute = NULL;
numVertices = accessor.count;
int componentTypeByteSize;
// Note: since the type of our attribute array (dev_position) is static (float32)
// We assume the glTF model attribute type are 5126(FLOAT) here
if (it->first.compare("POSITION") == 0) {
componentTypeByteSize = sizeof(VertexAttributePosition) / n;
dev_attribute = (BufferByte**)&dev_position;
}
else if (it->first.compare("NORMAL") == 0) {
componentTypeByteSize = sizeof(VertexAttributeNormal) / n;
dev_attribute = (BufferByte**)&dev_normal;
}
else if (it->first.compare("TEXCOORD_0") == 0) {
componentTypeByteSize = sizeof(VertexAttributeTexcoord) / n;
dev_attribute = (BufferByte**)&dev_texcoord0;
}
std::cout << accessor.bufferView << " - " << it->second << " - " << it->first << '\n';
dim3 numThreadsPerBlock(128);
dim3 numBlocks((n * numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
int byteLength = numVertices * n * componentTypeByteSize;
cudaMalloc(dev_attribute, byteLength);
_deviceBufferCopy << <numBlocks, numThreadsPerBlock >> > (
n * numVertices,
*dev_attribute,
dev_bufferView,
n,
accessor.byteStride,
accessor.byteOffset,
componentTypeByteSize);
std::string msg = "Set Attribute Buffer: " + it->first;
checkCUDAError(msg.c_str());
}
// malloc for VertexOut
VertexOut* dev_vertexOut;
cudaMalloc(&dev_vertexOut, numVertices * sizeof(VertexOut));
checkCUDAError("Malloc VertexOut Buffer");
// ----------Materials-------------
// You can only worry about this part once you started to
// implement textures for your rasterizer
TextureData* dev_diffuseTex = NULL;
int diffuseTexWidth = 0;
int diffuseTexHeight = 0;
if (!primitive.material.empty()) {
const tinygltf::Material &mat = scene.materials.at(primitive.material);
printf("material.name = %s\n", mat.name.c_str());
if (mat.values.find("diffuse") != mat.values.end()) {
std::string diffuseTexName = mat.values.at("diffuse").string_value;
if (scene.textures.find(diffuseTexName) != scene.textures.end()) {
const tinygltf::Texture &tex = scene.textures.at(diffuseTexName);
if (scene.images.find(tex.source) != scene.images.end()) {
const tinygltf::Image &image = scene.images.at(tex.source);
size_t s = image.image.size() * sizeof(TextureData);
cudaMalloc(&dev_diffuseTex, s);
cudaMemcpy(dev_diffuseTex, &image.image.at(0), s, cudaMemcpyHostToDevice);
diffuseTexWidth = image.width;
diffuseTexHeight = image.height;
checkCUDAError("Set Texture Image data");
}
}
}
// TODO: write your code for other materails
// You may have to take a look at tinygltfloader
// You can also use the above code loading diffuse material as a start point
}
// ---------Node hierarchy transform--------
cudaDeviceSynchronize();
dim3 numBlocksNodeTransform((numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
_nodeMatrixTransform << <numBlocksNodeTransform, numThreadsPerBlock >> > (
numVertices,
dev_position,
dev_normal,
matrix,
matrixNormal);
checkCUDAError("Node hierarchy transformation");
// at the end of the for loop of primitive
// push dev pointers to map
primitiveVector.push_back(PrimitiveDevBufPointers{
primitive.mode,
primitiveType,
numPrimitives,
numIndices,
numVertices,
dev_indices,
dev_position,
dev_normal,
dev_texcoord0,
dev_diffuseTex,
diffuseTexWidth,
diffuseTexHeight,
dev_vertexOut //VertexOut
});
totalNumPrimitives += numPrimitives;
} // for each primitive
} // for each mesh
} // for each node
}
// 3. Malloc for dev_primitives
{
cudaMalloc(&dev_primitives, totalNumPrimitives * sizeof(Primitive));
}
// Finally, cudaFree raw dev_bufferViews
{
std::map<std::string, BufferByte*>::const_iterator it(bufferViewDevPointers.begin());
std::map<std::string, BufferByte*>::const_iterator itEnd(bufferViewDevPointers.end());
//bufferViewDevPointers
for (; it != itEnd; it++) {
cudaFree(it->second);
}
checkCUDAError("Free BufferView Device Mem");
}
}
__global__
void _vertexTransformAndAssembly(
int numVertices,
PrimitiveDevBufPointers primitive,
glm::mat4 MVP, glm::mat4 MV, glm::mat3 MV_normal,
int width, int height) {
// vertex id
int vid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (vid < numVertices) {
// TODO: Apply vertex transformation here
// Multiply the MVP matrix for each vertex position, this will transform everything into clipping space
// Then divide the pos by its w element to transform into NDC space
// Finally transform x and y to viewport space
glm::vec3 position = primitive.dev_position[vid];
glm::vec3 normal = primitive.dev_normal[vid];
glm::vec4 homogenous = glm::vec4(position.x, position.y, position.z, 1);
glm::vec4 projectionSpace = (MV * homogenous);
glm::vec4 clippingSpace = (MVP * homogenous);
clippingSpace = (clippingSpace / clippingSpace.w);
clippingSpace.x = 0.5f * (float)width * (clippingSpace.x + 1);
clippingSpace.y = 0.5f * (float)height * (-clippingSpace.y + 1);
// TODO: Apply vertex assembly here
// Assemble all attribute arraies into the primitive array
// Pack positions, normals, etc. into VertexOut.
primitive.dev_verticesOut[vid].pos = clippingSpace;
primitive.dev_verticesOut[vid].eyeNor = MV_normal * normal;
primitive.dev_verticesOut[vid].col = glm::vec3();
primitive.dev_verticesOut[vid].dev_diffuseTex = primitive.dev_diffuseTex;
primitive.dev_verticesOut[vid].eyePos = glm::vec3(projectionSpace.x,
projectionSpace.y, projectionSpace.z);
primitive.dev_verticesOut[vid].texcoord0 = primitive.dev_texcoord0[vid];
primitive.dev_verticesOut[vid].texHeight = primitive.diffuseTexHeight;
primitive.dev_verticesOut[vid].texWidth = primitive.diffuseTexWidth;
}
}
static int curPrimitiveBeginId = 0;
__global__
void _primitiveAssembly(int numIndices, int curPrimitiveBeginId, Primitive* dev_primitives, PrimitiveDevBufPointers primitive) {
// index id
int iid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (iid < numIndices) {
// TODO: uncomment the following code for a start
// This is primitive assembly for triangles
int pid; // id for cur primitives vector
if (primitive.primitiveMode == TINYGLTF_MODE_TRIANGLES) {
pid = iid / (int)primitive.primitiveType;
dev_primitives[pid + curPrimitiveBeginId].v[iid % (int)primitive.primitiveType]
= primitive.dev_verticesOut[primitive.dev_indices[iid]];
}
// TODO: other primitive types (point, line)
if (primitive.primitiveMode == TINYGLTF_MODE_POINTS) {
pid = iid / (int)primitive.primitiveType;
dev_primitives[pid + curPrimitiveBeginId].v[iid % (int)primitive.primitiveType]
= primitive.dev_verticesOut[primitive.dev_indices[iid]];
}
if (primitive.primitiveMode == TINYGLTF_MODE_LINE) {
pid = iid / (int)primitive.primitiveType;
dev_primitives[pid + curPrimitiveBeginId].v[iid % (int)primitive.primitiveType]
= primitive.dev_verticesOut[primitive.dev_indices[iid]];
}
// From Wikipedia:
// One method of implementing back-face culling is by discarding all triangles
// where the dot product of their surface normal and the camera-to-triangle vector
// is greater than or equal to zero. Assume the camera looks down z-axis.
dev_primitives[pid + curPrimitiveBeginId].isVisible =
(glm::dot(primitive.dev_verticesOut[primitive.dev_indices[iid]].eyeNor,
glm::vec3(0, 0, 1)) < 0);
}
}
__global__
void kernRasterize(int totalNumPrimitives, Primitive* dev_primitives,
Fragment* dev_fragmentBuffer, int* dev_depth, int* dev_lock,
int width, int height) {
// Get the index for this specific primitive and make sure it is valid.
int pid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pid < totalNumPrimitives) {
// Get the vertex data for the the triangle.
VertexOut first = dev_primitives[pid].v[0];
VertexOut second = dev_primitives[pid].v[1];
VertexOut third = dev_primitives[pid].v[2];
// Define the triangle as required by rasterizeTools.h helpers.
glm::vec3 firstPos3 = glm::vec3(first.pos.x, first.pos.y, first.pos.z);
glm::vec3 secondPos3 = glm::vec3(second.pos.x, second.pos.y, second.pos.z);
glm::vec3 thirdPos3 = glm::vec3(third.pos.x, third.pos.y, third.pos.z);
glm::vec3 tri[3] = { firstPos3, secondPos3, thirdPos3 };
// Check if points should be rendered.
if (RENDER_POINTS_ONLY) {
int x1 = firstPos3.x;
int x2 = secondPos3.x;
int x3 = thirdPos3.x;
int y1 = firstPos3.y;
int y2 = secondPos3.y;
int y3 = thirdPos3.y;
int indexFirst = x1 + (width * y1);
int indexSecond = x2 + (width * y2);
int indexThird = x3 + (width * y3);
dev_fragmentBuffer[indexFirst].color = first.eyeNor;
dev_fragmentBuffer[indexSecond].color = second.eyeNor;
dev_fragmentBuffer[indexThird].color = third.eyeNor;
// Check if lines should be rendered.
} else if (RENDER_LINES_ONLY) {
// Find edge length, slope, and direction.
float lengthX = secondPos3.x - firstPos3.x;
float lengthY = secondPos3.y - firstPos3.y;
float slope = glm::abs(lengthY / lengthX);
int xDir = 1;
if (lengthX < 0) {
xDir = -1;
}
int yDir = 1;
if (lengthY < 0) {
yDir = -1;
}
// Draw the edge. Triangles take shape because of varied
// interleavings, just need this one.
for (int i = 0; i < glm::abs(lengthX); i++) {
int u = (int)firstPos3.x + (i * xDir);
int v = (int)firstPos3.y + (i * yDir * slope);
int indexFirst = u + (width * v);
dev_fragmentBuffer[indexFirst].color = first.eyeNor;
}
// Render triangles.
} else {
// Find the bounding box of this triangle.
AABB bounds = getAABBForTriangle(tri);
// Scan bounding box for triangle to rasterize.
for (int x = bounds.min.x; x <= bounds.max.x; x++) {
for (int y = bounds.min.y; y <= bounds.max.y; y++) {
// Get barycentric coordinates.
glm::vec3 barycentric = calculateBarycentricCoordinate(tri, glm::vec2(x, y));
// Only fill coordinate if it's in the triangle.
bool inTriangle = isBarycentricCoordInBounds(barycentric);
if (inTriangle) {
// Attempt to lock this pixel.
int index = x + (width * y);
int lockValue = -1;
while (lockValue != 0) {
lockValue = atomicCAS(&dev_lock[index], 0, 1);
// When the pixel is locked, actually rasterize the triangle.
if (lockValue == 0) {
// Decide which fragment to write into buffer when multiple fragments overlap in pixel
// coordinates. Need atomic operation "atomicMin(&depth[idx], depth)"
float depth = 1000 * getZAtCoordinate(barycentric, tri);
if (dev_depth[index] >= depth) {
dev_depth[index] = depth;
// Update the fragment to pass on through the rasterizer.
if (CORRECT_COLOR) {
dev_fragmentBuffer[index].color = barycentric.x * first.eyeNor
+ barycentric.y * second.eyeNor + barycentric.z * third.eyeNor;
} else {
dev_fragmentBuffer[index].color = first.eyeNor;
}
dev_fragmentBuffer[index].dev_diffuseTex = first.dev_diffuseTex;
dev_fragmentBuffer[index].eyeNor = barycentric.x * first.eyeNor
+ barycentric.y * second.eyeNor + barycentric.z * third.eyeNor;
dev_fragmentBuffer[index].eyePos = barycentric.x * first.eyePos
+ barycentric.y * second.eyePos + barycentric.z * third.eyePos;
dev_fragmentBuffer[index].texcoord0 = barycentric.x * first.texcoord0
+ barycentric.y * second.texcoord0 + barycentric.z * third.texcoord0;
dev_fragmentBuffer[index].texWidth = first.texWidth;
dev_fragmentBuffer[index].texHeight = first.texHeight;
}
// Unlock the pixel.
dev_lock[index] = 0;
}
}
}
}
}
}
}
}
/**
* A predicate for the stream compaction which helps cull backfaces by partitioning
* for faces which are visible. Primitive visibility is easily checked when they're
* assembled, this predicate is just retrieving a flag.
*/
struct isVisible {
__host__ __device__
bool operator() (const Primitive primitive) {
return (primitive.isVisible);
}
};
/**
* Perform rasterization.
*/
static int iterations = 0;
void rasterize(uchar4 *pbo, const glm::mat4 & MVP, const glm::mat4 & MV, const glm::mat3 MV_normal) {
int sideLength2d = 8;
dim3 blockSize2d(sideLength2d, sideLength2d);
dim3 blockCount2d((width - 1) / blockSize2d.x + 1,
(height - 1) / blockSize2d.y + 1);
// Execute your rasterization pipeline here
// (See README for rasterization pipeline outline.)
// Vertex Process & primitive assembly
{
curPrimitiveBeginId = 0;
dim3 numThreadsPerBlock(128);
auto it = mesh2PrimitivesMap.begin();
auto itEnd = mesh2PrimitivesMap.end();
for (; it != itEnd; ++it) {
auto p = (it->second).begin(); // each primitive
auto pEnd = (it->second).end();
for (; p != pEnd; ++p) {
dim3 numBlocksForVertices((p->numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
dim3 numBlocksForIndices((p->numIndices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
_vertexTransformAndAssembly << < numBlocksForVertices, numThreadsPerBlock >> >(p->numVertices, *p, MVP, MV, MV_normal, width, height);
checkCUDAError("Vertex Processing");
cudaDeviceSynchronize();
_primitiveAssembly << < numBlocksForIndices, numThreadsPerBlock >> >
(p->numIndices,
curPrimitiveBeginId,
dev_primitives,
*p);
checkCUDAError("Primitive Assembly");
curPrimitiveBeginId += p->numPrimitives;
}
}
checkCUDAError("Vertex Processing and Primitive Assembly");
}
cudaMemset(dev_fragmentBuffer, 0, width * height * sizeof(Fragment));
initDepth << <blockCount2d, blockSize2d >> >(width, height, dev_depth);
// TODO: rasterize
// Parallelize by primitive (triangle). See totalNumPrimitives.
// If culling backfaces, update the primitives and totalNum.
// Taking the same approach to partition as in HW3.
StreamCompaction::Common::PerformanceTimer timer;
float rasterizationTime = 0;
if (BACKFACE_CULLING && !RENDER_POINTS_ONLY && !RENDER_LINES_ONLY) {
// Record the rasterization stage time.
timer.startGpuTimer();
// Cull the invisible faces.
Primitive* dev_visible_end = thrust::partition(thrust::device, dev_primitives,
dev_primitives + totalNumPrimitives, isVisible());
int totalVisiblePrimitives = dev_visible_end - dev_primitives;
// Rasterize.
dim3 numThreadsPerBlock(128);
dim3 numBlocksForPrimitives((totalVisiblePrimitives + numThreadsPerBlock.x - 1)
/ numThreadsPerBlock.x);
kernRasterize<<<numBlocksForPrimitives, numThreadsPerBlock>>>(
totalVisiblePrimitives, dev_primitives, dev_fragmentBuffer, dev_depth,
dev_lock, width, height);
timer.endGpuTimer();
rasterizationTime += timer.getGpuElapsedTimeForPreviousOperation();
} else {
// Record the rasterization stage time.
timer.startGpuTimer();
// Rasterize.
dim3 numThreadsPerBlock(128);
dim3 numBlocksForPrimitives((totalNumPrimitives + numThreadsPerBlock.x - 1)
/ numThreadsPerBlock.x);
kernRasterize<<<numBlocksForPrimitives, numThreadsPerBlock>>>(
totalNumPrimitives, dev_primitives, dev_fragmentBuffer, dev_depth,
dev_lock, width, height);
timer.endGpuTimer();
rasterizationTime += timer.getGpuElapsedTimeForPreviousOperation();
}
iterations++;
if (iterations % 600 == 0) {
printf("Rasterization time : %fms\n", (rasterizationTime / 600.f));
}
// Copy depthbuffer colors into framebuffer
render << <blockCount2d, blockSize2d >> >(width, height, dev_fragmentBuffer, dev_framebuffer);
checkCUDAError("fragment shader");
// Copy framebuffer into OpenGL buffer for OpenGL previewing
sendImageToPBO<<<blockCount2d, blockSize2d>>>(pbo, width, height, dev_framebuffer);
checkCUDAError("copy render result to pbo");
}
/**
* Called once at the end of the program to free CUDA memory.
*/
void rasterizeFree() {
// deconstruct primitives attribute/indices device buffer
auto it(mesh2PrimitivesMap.begin());
auto itEnd(mesh2PrimitivesMap.end());
for (; it != itEnd; ++it) {
for (auto p = it->second.begin(); p != it->second.end(); ++p) {
cudaFree(p->dev_indices);
cudaFree(p->dev_position);
cudaFree(p->dev_normal);
cudaFree(p->dev_texcoord0);
cudaFree(p->dev_diffuseTex);
cudaFree(p->dev_verticesOut);
//TODO: release other attributes and materials
}
}
////////////
cudaFree(dev_primitives);
dev_primitives = NULL;
cudaFree(dev_fragmentBuffer);
dev_fragmentBuffer = NULL;
cudaFree(dev_framebuffer);
dev_framebuffer = NULL;
cudaFree(dev_lock);
dev_lock = NULL;
cudaFree(dev_depth);
dev_depth = NULL;
checkCUDAError("rasterize Free");
}
|
809fa78c4b37b8f5b1061af8e98565b1141cd4dc.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <set>
#include <vector>
#include "cuda_utils.h"
#include "random/rng.h"
#include "test_utils.h"
namespace MLCommon {
namespace Random {
// Terminology:
// SWoR - Sample Without Replacement
template <typename T>
struct SWoRInputs {
int len, sampledLen;
int largeWeightIndex;
T largeWeight;
GeneratorType gtype;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const SWoRInputs<T>& dims) {
return os;
}
template <typename T>
class SWoRTest : public ::testing::TestWithParam<SWoRInputs<T>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<SWoRInputs<T>>::GetParam();
CUDA_CHECK(hipStreamCreate(&stream));
allocator.reset(new defaultDeviceAllocator);
Rng r(params.seed, params.gtype);
allocate(in, params.len);
allocate(wts, params.len);
allocate(out, params.sampledLen);
allocate(outIdx, params.sampledLen);
h_outIdx.resize(params.sampledLen);
r.uniform(in, params.len, T(-1.0), T(1.0), stream);
r.uniform(wts, params.len, T(1.0), T(2.0), stream);
if (params.largeWeightIndex >= 0) {
updateDevice(wts + params.largeWeightIndex, ¶ms.largeWeight, 1,
stream);
}
r.sampleWithoutReplacement(out, outIdx, in, wts, params.sampledLen,
params.len, allocator, stream);
updateHost(&(h_outIdx[0]), outIdx, params.sampledLen, stream);
}
void TearDown() override {
CUDA_CHECK(hipStreamSynchronize(stream));
CUDA_CHECK(hipStreamDestroy(stream));
CUDA_CHECK(hipFree(in));
CUDA_CHECK(hipFree(wts));
CUDA_CHECK(hipFree(out));
CUDA_CHECK(hipFree(outIdx));
}
protected:
SWoRInputs<T> params;
T *in, *out, *wts;
int* outIdx;
std::vector<int> h_outIdx;
hipStream_t stream;
std::shared_ptr<deviceAllocator> allocator;
};
typedef SWoRTest<float> SWoRTestF;
const std::vector<SWoRInputs<float>> inputsf = {
{1024, 512, -1, 0.f, GenPhilox, 1234ULL},
{1024, 1024, -1, 0.f, GenPhilox, 1234ULL},
{1024, 512 + 1, -1, 0.f, GenPhilox, 1234ULL},
{1024, 1024 - 1, -1, 0.f, GenPhilox, 1234ULL},
{1024, 512 + 2, -1, 0.f, GenPhilox, 1234ULL},
{1024, 1024 - 2, -1, 0.f, GenPhilox, 1234ULL},
{1024 + 1, 512, -1, 0.f, GenPhilox, 1234ULL},
{1024 + 1, 1024, -1, 0.f, GenPhilox, 1234ULL},
{1024 + 1, 512 + 1, -1, 0.f, GenPhilox, 1234ULL},
{1024 + 1, 1024 + 1, -1, 0.f, GenPhilox, 1234ULL},
{1024 + 1, 512 + 2, -1, 0.f, GenPhilox, 1234ULL},
{1024 + 1, 1024 - 2, -1, 0.f, GenPhilox, 1234ULL},
{1024 + 2, 512, -1, 0.f, GenPhilox, 1234ULL},
{1024 + 2, 1024, -1, 0.f, GenPhilox, 1234ULL},
{1024 + 2, 512 + 1, -1, 0.f, GenPhilox, 1234ULL},
{1024 + 2, 1024 + 1, -1, 0.f, GenPhilox, 1234ULL},
{1024 + 2, 512 + 2, -1, 0.f, GenPhilox, 1234ULL},
{1024 + 2, 1024 + 2, -1, 0.f, GenPhilox, 1234ULL},
{1024, 512, 10, 100000.f, GenPhilox, 1234ULL},
{1024, 512, -1, 0.f, GenTaps, 1234ULL},
{1024, 1024, -1, 0.f, GenTaps, 1234ULL},
{1024, 512 + 1, -1, 0.f, GenTaps, 1234ULL},
{1024, 1024 - 1, -1, 0.f, GenTaps, 1234ULL},
{1024, 512 + 2, -1, 0.f, GenTaps, 1234ULL},
{1024, 1024 - 2, -1, 0.f, GenTaps, 1234ULL},
{1024 + 1, 512, -1, 0.f, GenTaps, 1234ULL},
{1024 + 1, 1024, -1, 0.f, GenTaps, 1234ULL},
{1024 + 1, 512 + 1, -1, 0.f, GenTaps, 1234ULL},
{1024 + 1, 1024 + 1, -1, 0.f, GenTaps, 1234ULL},
{1024 + 1, 512 + 2, -1, 0.f, GenTaps, 1234ULL},
{1024 + 1, 1024 - 2, -1, 0.f, GenTaps, 1234ULL},
{1024 + 2, 512, -1, 0.f, GenTaps, 1234ULL},
{1024 + 2, 1024, -1, 0.f, GenTaps, 1234ULL},
{1024 + 2, 512 + 1, -1, 0.f, GenTaps, 1234ULL},
{1024 + 2, 1024 + 1, -1, 0.f, GenTaps, 1234ULL},
{1024 + 2, 512 + 2, -1, 0.f, GenTaps, 1234ULL},
{1024 + 2, 1024 + 2, -1, 0.f, GenTaps, 1234ULL},
{1024, 512, 10, 100000.f, GenTaps, 1234ULL},
{1024, 512, -1, 0.f, GenKiss99, 1234ULL},
{1024, 1024, -1, 0.f, GenKiss99, 1234ULL},
{1024, 512 + 1, -1, 0.f, GenKiss99, 1234ULL},
{1024, 1024 - 1, -1, 0.f, GenKiss99, 1234ULL},
{1024, 512 + 2, -1, 0.f, GenKiss99, 1234ULL},
{1024, 1024 - 2, -1, 0.f, GenKiss99, 1234ULL},
{1024 + 1, 512, -1, 0.f, GenKiss99, 1234ULL},
{1024 + 1, 1024, -1, 0.f, GenKiss99, 1234ULL},
{1024 + 1, 512 + 1, -1, 0.f, GenKiss99, 1234ULL},
{1024 + 1, 1024 + 1, -1, 0.f, GenKiss99, 1234ULL},
{1024 + 1, 512 + 2, -1, 0.f, GenKiss99, 1234ULL},
{1024 + 1, 1024 - 2, -1, 0.f, GenKiss99, 1234ULL},
{1024 + 2, 512, -1, 0.f, GenKiss99, 1234ULL},
{1024 + 2, 1024, -1, 0.f, GenKiss99, 1234ULL},
{1024 + 2, 512 + 1, -1, 0.f, GenKiss99, 1234ULL},
{1024 + 2, 1024 + 1, -1, 0.f, GenKiss99, 1234ULL},
{1024 + 2, 512 + 2, -1, 0.f, GenKiss99, 1234ULL},
{1024 + 2, 1024 + 2, -1, 0.f, GenKiss99, 1234ULL},
{1024, 512, 10, 100000.f, GenKiss99, 1234ULL},
};
TEST_P(SWoRTestF, Result) {
std::set<int> occurence;
for (int i = 0; i < params.sampledLen; ++i) {
auto val = h_outIdx[i];
// indices must be in the given range
ASSERT_TRUE(0 <= val && val < params.len)
<< "out-of-range index @i=" << i << " val=" << val
<< " sampledLen=" << params.sampledLen;
// indices should not repeat
ASSERT_TRUE(occurence.find(val) == occurence.end())
<< "repeated index @i=" << i << " idx=" << val;
occurence.insert(val);
}
// if there's a skewed distribution, the top index should correspond to the
// particular item with a large weight
if (params.largeWeightIndex >= 0) {
ASSERT_EQ(h_outIdx[0], params.largeWeightIndex);
}
}
INSTANTIATE_TEST_CASE_P(SWoRTests, SWoRTestF, ::testing::ValuesIn(inputsf));
typedef SWoRTest<double> SWoRTestD;
const std::vector<SWoRInputs<double>> inputsd = {
{1024, 512, -1, 0.0, GenPhilox, 1234ULL},
{1024, 1024, -1, 0.0, GenPhilox, 1234ULL},
{1024, 512 + 1, -1, 0.0, GenPhilox, 1234ULL},
{1024, 1024 - 1, -1, 0.0, GenPhilox, 1234ULL},
{1024, 512 + 2, -1, 0.0, GenPhilox, 1234ULL},
{1024, 1024 - 2, -1, 0.0, GenPhilox, 1234ULL},
{1024 + 1, 512, -1, 0.0, GenPhilox, 1234ULL},
{1024 + 1, 1024, -1, 0.0, GenPhilox, 1234ULL},
{1024 + 1, 512 + 1, -1, 0.0, GenPhilox, 1234ULL},
{1024 + 1, 1024 + 1, -1, 0.0, GenPhilox, 1234ULL},
{1024 + 1, 512 + 2, -1, 0.0, GenPhilox, 1234ULL},
{1024 + 1, 1024 - 2, -1, 0.0, GenPhilox, 1234ULL},
{1024 + 2, 512, -1, 0.0, GenPhilox, 1234ULL},
{1024 + 2, 1024, -1, 0.0, GenPhilox, 1234ULL},
{1024 + 2, 512 + 1, -1, 0.0, GenPhilox, 1234ULL},
{1024 + 2, 1024 + 1, -1, 0.0, GenPhilox, 1234ULL},
{1024 + 2, 512 + 2, -1, 0.0, GenPhilox, 1234ULL},
{1024 + 2, 1024 + 2, -1, 0.0, GenPhilox, 1234ULL},
{1024, 512, 10, 100000.0, GenPhilox, 1234ULL},
{1024, 512, -1, 0.0, GenTaps, 1234ULL},
{1024, 1024, -1, 0.0, GenTaps, 1234ULL},
{1024, 512 + 1, -1, 0.0, GenTaps, 1234ULL},
{1024, 1024 - 1, -1, 0.0, GenTaps, 1234ULL},
{1024, 512 + 2, -1, 0.0, GenTaps, 1234ULL},
{1024, 1024 - 2, -1, 0.0, GenTaps, 1234ULL},
{1024 + 1, 512, -1, 0.0, GenTaps, 1234ULL},
{1024 + 1, 1024, -1, 0.0, GenTaps, 1234ULL},
{1024 + 1, 512 + 1, -1, 0.0, GenTaps, 1234ULL},
{1024 + 1, 1024 + 1, -1, 0.0, GenTaps, 1234ULL},
{1024 + 1, 512 + 2, -1, 0.0, GenTaps, 1234ULL},
{1024 + 1, 1024 - 2, -1, 0.0, GenTaps, 1234ULL},
{1024 + 2, 512, -1, 0.0, GenTaps, 1234ULL},
{1024 + 2, 1024, -1, 0.0, GenTaps, 1234ULL},
{1024 + 2, 512 + 1, -1, 0.0, GenTaps, 1234ULL},
{1024 + 2, 1024 + 1, -1, 0.0, GenTaps, 1234ULL},
{1024 + 2, 512 + 2, -1, 0.0, GenTaps, 1234ULL},
{1024 + 2, 1024 + 2, -1, 0.0, GenTaps, 1234ULL},
{1024, 512, 10, 100000.0, GenTaps, 1234ULL},
{1024, 512, -1, 0.0, GenKiss99, 1234ULL},
{1024, 1024, -1, 0.0, GenKiss99, 1234ULL},
{1024, 512 + 1, -1, 0.0, GenKiss99, 1234ULL},
{1024, 1024 - 1, -1, 0.0, GenKiss99, 1234ULL},
{1024, 512 + 2, -1, 0.0, GenKiss99, 1234ULL},
{1024, 1024 - 2, -1, 0.0, GenKiss99, 1234ULL},
{1024 + 1, 512, -1, 0.0, GenKiss99, 1234ULL},
{1024 + 1, 1024, -1, 0.0, GenKiss99, 1234ULL},
{1024 + 1, 512 + 1, -1, 0.0, GenKiss99, 1234ULL},
{1024 + 1, 1024 + 1, -1, 0.0, GenKiss99, 1234ULL},
{1024 + 1, 512 + 2, -1, 0.0, GenKiss99, 1234ULL},
{1024 + 1, 1024 - 2, -1, 0.0, GenKiss99, 1234ULL},
{1024 + 2, 512, -1, 0.0, GenKiss99, 1234ULL},
{1024 + 2, 1024, -1, 0.0, GenKiss99, 1234ULL},
{1024 + 2, 512 + 1, -1, 0.0, GenKiss99, 1234ULL},
{1024 + 2, 1024 + 1, -1, 0.0, GenKiss99, 1234ULL},
{1024 + 2, 512 + 2, -1, 0.0, GenKiss99, 1234ULL},
{1024 + 2, 1024 + 2, -1, 0.0, GenKiss99, 1234ULL},
{1024, 512, 10, 100000.0, GenKiss99, 1234ULL},
};
TEST_P(SWoRTestD, Result) {
std::set<int> occurence;
for (int i = 0; i < params.sampledLen; ++i) {
auto val = h_outIdx[i];
// indices must be in the given range
ASSERT_TRUE(0 <= val && val < params.len)
<< "out-of-range index @i=" << i << " val=" << val
<< " sampledLen=" << params.sampledLen;
// indices should not repeat
ASSERT_TRUE(occurence.find(val) == occurence.end())
<< "repeated index @i=" << i << " idx=" << val;
occurence.insert(val);
}
// if there's a skewed distribution, the top index should correspond to the
// particular item with a large weight
if (params.largeWeightIndex >= 0) {
ASSERT_EQ(h_outIdx[0], params.largeWeightIndex);
}
}
INSTANTIATE_TEST_CASE_P(SWoRTests, SWoRTestD, ::testing::ValuesIn(inputsd));
} // end namespace Random
} // end namespace MLCommon
|
809fa78c4b37b8f5b1061af8e98565b1141cd4dc.cu
|
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <set>
#include <vector>
#include "cuda_utils.h"
#include "random/rng.h"
#include "test_utils.h"
namespace MLCommon {
namespace Random {
// Terminology:
// SWoR - Sample Without Replacement
template <typename T>
struct SWoRInputs {
int len, sampledLen;
int largeWeightIndex;
T largeWeight;
GeneratorType gtype;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const SWoRInputs<T>& dims) {
return os;
}
template <typename T>
class SWoRTest : public ::testing::TestWithParam<SWoRInputs<T>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<SWoRInputs<T>>::GetParam();
CUDA_CHECK(cudaStreamCreate(&stream));
allocator.reset(new defaultDeviceAllocator);
Rng r(params.seed, params.gtype);
allocate(in, params.len);
allocate(wts, params.len);
allocate(out, params.sampledLen);
allocate(outIdx, params.sampledLen);
h_outIdx.resize(params.sampledLen);
r.uniform(in, params.len, T(-1.0), T(1.0), stream);
r.uniform(wts, params.len, T(1.0), T(2.0), stream);
if (params.largeWeightIndex >= 0) {
updateDevice(wts + params.largeWeightIndex, ¶ms.largeWeight, 1,
stream);
}
r.sampleWithoutReplacement(out, outIdx, in, wts, params.sampledLen,
params.len, allocator, stream);
updateHost(&(h_outIdx[0]), outIdx, params.sampledLen, stream);
}
void TearDown() override {
CUDA_CHECK(cudaStreamSynchronize(stream));
CUDA_CHECK(cudaStreamDestroy(stream));
CUDA_CHECK(cudaFree(in));
CUDA_CHECK(cudaFree(wts));
CUDA_CHECK(cudaFree(out));
CUDA_CHECK(cudaFree(outIdx));
}
protected:
SWoRInputs<T> params;
T *in, *out, *wts;
int* outIdx;
std::vector<int> h_outIdx;
cudaStream_t stream;
std::shared_ptr<deviceAllocator> allocator;
};
typedef SWoRTest<float> SWoRTestF;
const std::vector<SWoRInputs<float>> inputsf = {
{1024, 512, -1, 0.f, GenPhilox, 1234ULL},
{1024, 1024, -1, 0.f, GenPhilox, 1234ULL},
{1024, 512 + 1, -1, 0.f, GenPhilox, 1234ULL},
{1024, 1024 - 1, -1, 0.f, GenPhilox, 1234ULL},
{1024, 512 + 2, -1, 0.f, GenPhilox, 1234ULL},
{1024, 1024 - 2, -1, 0.f, GenPhilox, 1234ULL},
{1024 + 1, 512, -1, 0.f, GenPhilox, 1234ULL},
{1024 + 1, 1024, -1, 0.f, GenPhilox, 1234ULL},
{1024 + 1, 512 + 1, -1, 0.f, GenPhilox, 1234ULL},
{1024 + 1, 1024 + 1, -1, 0.f, GenPhilox, 1234ULL},
{1024 + 1, 512 + 2, -1, 0.f, GenPhilox, 1234ULL},
{1024 + 1, 1024 - 2, -1, 0.f, GenPhilox, 1234ULL},
{1024 + 2, 512, -1, 0.f, GenPhilox, 1234ULL},
{1024 + 2, 1024, -1, 0.f, GenPhilox, 1234ULL},
{1024 + 2, 512 + 1, -1, 0.f, GenPhilox, 1234ULL},
{1024 + 2, 1024 + 1, -1, 0.f, GenPhilox, 1234ULL},
{1024 + 2, 512 + 2, -1, 0.f, GenPhilox, 1234ULL},
{1024 + 2, 1024 + 2, -1, 0.f, GenPhilox, 1234ULL},
{1024, 512, 10, 100000.f, GenPhilox, 1234ULL},
{1024, 512, -1, 0.f, GenTaps, 1234ULL},
{1024, 1024, -1, 0.f, GenTaps, 1234ULL},
{1024, 512 + 1, -1, 0.f, GenTaps, 1234ULL},
{1024, 1024 - 1, -1, 0.f, GenTaps, 1234ULL},
{1024, 512 + 2, -1, 0.f, GenTaps, 1234ULL},
{1024, 1024 - 2, -1, 0.f, GenTaps, 1234ULL},
{1024 + 1, 512, -1, 0.f, GenTaps, 1234ULL},
{1024 + 1, 1024, -1, 0.f, GenTaps, 1234ULL},
{1024 + 1, 512 + 1, -1, 0.f, GenTaps, 1234ULL},
{1024 + 1, 1024 + 1, -1, 0.f, GenTaps, 1234ULL},
{1024 + 1, 512 + 2, -1, 0.f, GenTaps, 1234ULL},
{1024 + 1, 1024 - 2, -1, 0.f, GenTaps, 1234ULL},
{1024 + 2, 512, -1, 0.f, GenTaps, 1234ULL},
{1024 + 2, 1024, -1, 0.f, GenTaps, 1234ULL},
{1024 + 2, 512 + 1, -1, 0.f, GenTaps, 1234ULL},
{1024 + 2, 1024 + 1, -1, 0.f, GenTaps, 1234ULL},
{1024 + 2, 512 + 2, -1, 0.f, GenTaps, 1234ULL},
{1024 + 2, 1024 + 2, -1, 0.f, GenTaps, 1234ULL},
{1024, 512, 10, 100000.f, GenTaps, 1234ULL},
{1024, 512, -1, 0.f, GenKiss99, 1234ULL},
{1024, 1024, -1, 0.f, GenKiss99, 1234ULL},
{1024, 512 + 1, -1, 0.f, GenKiss99, 1234ULL},
{1024, 1024 - 1, -1, 0.f, GenKiss99, 1234ULL},
{1024, 512 + 2, -1, 0.f, GenKiss99, 1234ULL},
{1024, 1024 - 2, -1, 0.f, GenKiss99, 1234ULL},
{1024 + 1, 512, -1, 0.f, GenKiss99, 1234ULL},
{1024 + 1, 1024, -1, 0.f, GenKiss99, 1234ULL},
{1024 + 1, 512 + 1, -1, 0.f, GenKiss99, 1234ULL},
{1024 + 1, 1024 + 1, -1, 0.f, GenKiss99, 1234ULL},
{1024 + 1, 512 + 2, -1, 0.f, GenKiss99, 1234ULL},
{1024 + 1, 1024 - 2, -1, 0.f, GenKiss99, 1234ULL},
{1024 + 2, 512, -1, 0.f, GenKiss99, 1234ULL},
{1024 + 2, 1024, -1, 0.f, GenKiss99, 1234ULL},
{1024 + 2, 512 + 1, -1, 0.f, GenKiss99, 1234ULL},
{1024 + 2, 1024 + 1, -1, 0.f, GenKiss99, 1234ULL},
{1024 + 2, 512 + 2, -1, 0.f, GenKiss99, 1234ULL},
{1024 + 2, 1024 + 2, -1, 0.f, GenKiss99, 1234ULL},
{1024, 512, 10, 100000.f, GenKiss99, 1234ULL},
};
TEST_P(SWoRTestF, Result) {
std::set<int> occurence;
for (int i = 0; i < params.sampledLen; ++i) {
auto val = h_outIdx[i];
// indices must be in the given range
ASSERT_TRUE(0 <= val && val < params.len)
<< "out-of-range index @i=" << i << " val=" << val
<< " sampledLen=" << params.sampledLen;
// indices should not repeat
ASSERT_TRUE(occurence.find(val) == occurence.end())
<< "repeated index @i=" << i << " idx=" << val;
occurence.insert(val);
}
// if there's a skewed distribution, the top index should correspond to the
// particular item with a large weight
if (params.largeWeightIndex >= 0) {
ASSERT_EQ(h_outIdx[0], params.largeWeightIndex);
}
}
INSTANTIATE_TEST_CASE_P(SWoRTests, SWoRTestF, ::testing::ValuesIn(inputsf));
typedef SWoRTest<double> SWoRTestD;
const std::vector<SWoRInputs<double>> inputsd = {
{1024, 512, -1, 0.0, GenPhilox, 1234ULL},
{1024, 1024, -1, 0.0, GenPhilox, 1234ULL},
{1024, 512 + 1, -1, 0.0, GenPhilox, 1234ULL},
{1024, 1024 - 1, -1, 0.0, GenPhilox, 1234ULL},
{1024, 512 + 2, -1, 0.0, GenPhilox, 1234ULL},
{1024, 1024 - 2, -1, 0.0, GenPhilox, 1234ULL},
{1024 + 1, 512, -1, 0.0, GenPhilox, 1234ULL},
{1024 + 1, 1024, -1, 0.0, GenPhilox, 1234ULL},
{1024 + 1, 512 + 1, -1, 0.0, GenPhilox, 1234ULL},
{1024 + 1, 1024 + 1, -1, 0.0, GenPhilox, 1234ULL},
{1024 + 1, 512 + 2, -1, 0.0, GenPhilox, 1234ULL},
{1024 + 1, 1024 - 2, -1, 0.0, GenPhilox, 1234ULL},
{1024 + 2, 512, -1, 0.0, GenPhilox, 1234ULL},
{1024 + 2, 1024, -1, 0.0, GenPhilox, 1234ULL},
{1024 + 2, 512 + 1, -1, 0.0, GenPhilox, 1234ULL},
{1024 + 2, 1024 + 1, -1, 0.0, GenPhilox, 1234ULL},
{1024 + 2, 512 + 2, -1, 0.0, GenPhilox, 1234ULL},
{1024 + 2, 1024 + 2, -1, 0.0, GenPhilox, 1234ULL},
{1024, 512, 10, 100000.0, GenPhilox, 1234ULL},
{1024, 512, -1, 0.0, GenTaps, 1234ULL},
{1024, 1024, -1, 0.0, GenTaps, 1234ULL},
{1024, 512 + 1, -1, 0.0, GenTaps, 1234ULL},
{1024, 1024 - 1, -1, 0.0, GenTaps, 1234ULL},
{1024, 512 + 2, -1, 0.0, GenTaps, 1234ULL},
{1024, 1024 - 2, -1, 0.0, GenTaps, 1234ULL},
{1024 + 1, 512, -1, 0.0, GenTaps, 1234ULL},
{1024 + 1, 1024, -1, 0.0, GenTaps, 1234ULL},
{1024 + 1, 512 + 1, -1, 0.0, GenTaps, 1234ULL},
{1024 + 1, 1024 + 1, -1, 0.0, GenTaps, 1234ULL},
{1024 + 1, 512 + 2, -1, 0.0, GenTaps, 1234ULL},
{1024 + 1, 1024 - 2, -1, 0.0, GenTaps, 1234ULL},
{1024 + 2, 512, -1, 0.0, GenTaps, 1234ULL},
{1024 + 2, 1024, -1, 0.0, GenTaps, 1234ULL},
{1024 + 2, 512 + 1, -1, 0.0, GenTaps, 1234ULL},
{1024 + 2, 1024 + 1, -1, 0.0, GenTaps, 1234ULL},
{1024 + 2, 512 + 2, -1, 0.0, GenTaps, 1234ULL},
{1024 + 2, 1024 + 2, -1, 0.0, GenTaps, 1234ULL},
{1024, 512, 10, 100000.0, GenTaps, 1234ULL},
{1024, 512, -1, 0.0, GenKiss99, 1234ULL},
{1024, 1024, -1, 0.0, GenKiss99, 1234ULL},
{1024, 512 + 1, -1, 0.0, GenKiss99, 1234ULL},
{1024, 1024 - 1, -1, 0.0, GenKiss99, 1234ULL},
{1024, 512 + 2, -1, 0.0, GenKiss99, 1234ULL},
{1024, 1024 - 2, -1, 0.0, GenKiss99, 1234ULL},
{1024 + 1, 512, -1, 0.0, GenKiss99, 1234ULL},
{1024 + 1, 1024, -1, 0.0, GenKiss99, 1234ULL},
{1024 + 1, 512 + 1, -1, 0.0, GenKiss99, 1234ULL},
{1024 + 1, 1024 + 1, -1, 0.0, GenKiss99, 1234ULL},
{1024 + 1, 512 + 2, -1, 0.0, GenKiss99, 1234ULL},
{1024 + 1, 1024 - 2, -1, 0.0, GenKiss99, 1234ULL},
{1024 + 2, 512, -1, 0.0, GenKiss99, 1234ULL},
{1024 + 2, 1024, -1, 0.0, GenKiss99, 1234ULL},
{1024 + 2, 512 + 1, -1, 0.0, GenKiss99, 1234ULL},
{1024 + 2, 1024 + 1, -1, 0.0, GenKiss99, 1234ULL},
{1024 + 2, 512 + 2, -1, 0.0, GenKiss99, 1234ULL},
{1024 + 2, 1024 + 2, -1, 0.0, GenKiss99, 1234ULL},
{1024, 512, 10, 100000.0, GenKiss99, 1234ULL},
};
TEST_P(SWoRTestD, Result) {
std::set<int> occurence;
for (int i = 0; i < params.sampledLen; ++i) {
auto val = h_outIdx[i];
// indices must be in the given range
ASSERT_TRUE(0 <= val && val < params.len)
<< "out-of-range index @i=" << i << " val=" << val
<< " sampledLen=" << params.sampledLen;
// indices should not repeat
ASSERT_TRUE(occurence.find(val) == occurence.end())
<< "repeated index @i=" << i << " idx=" << val;
occurence.insert(val);
}
// if there's a skewed distribution, the top index should correspond to the
// particular item with a large weight
if (params.largeWeightIndex >= 0) {
ASSERT_EQ(h_outIdx[0], params.largeWeightIndex);
}
}
INSTANTIATE_TEST_CASE_P(SWoRTests, SWoRTestD, ::testing::ValuesIn(inputsd));
} // end namespace Random
} // end namespace MLCommon
|
d461b923e94db4972b7d638514bc1796e07578d8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//*LB*
// Copyright (c) 2009, Alexander Krizhevsky
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of the University of Toronto
// nor the names of its contributors may be used to endorse or promote
// products derived from this software without specific prior written
// permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//*LE*
/*
* nvmatrix.cu
*
* Created on: 20-Jan-2009
* Author: Alex Krizhevsky ([email protected])
*/
#include <assert.h>
#include <rocblas.h>
/*#include <cutil_inline.h>*/
#include "cuv/tools/cuv_general.hpp"
#include <stdlib.h>
#include <stdio.h>
#include <fstream>
#include <iostream>
#include <algorithm>
#include "nvmatrix.cuh"
using namespace std;
hipDeviceProp_t NVMatrix::deviceProps;
unsigned int NVMatrix::hostRndMults[NUM_RND_STREAMS];
bool NVMatrix::rndInitialized = false;
/*
* Device random number generator pointers.
*/
unsigned int *NVMatrix::devRndMults;
unsigned long long *NVMatrix::devRndWords;
void NVMatrix::initDeviceProps() {
int deviceCount;
cuvSafeCall(hipGetDeviceCount(&deviceCount));
if (deviceCount == 0) {
printf("There is no device supporting CUDA\n");
exit(EXIT_FAILURE);
}
cuvSafeCall(hipGetDeviceProperties(&deviceProps, 0));
}
void NVMatrix::_init(unsigned int numRows, unsigned int numCols) {
_numRows = numRows;
_numCols = numCols;
_numElements = numRows * numCols;
_ownsData = true;
/*
* By default, new matrices are in column-major order because that's how CUBLAS likes it.
*/
_isTrans = true;
_devData = NULL;
if (_numElements > 0) {
hipblasAlloc(_numElements, sizeof(float), (void**) &_devData);
checkCublasError("!!!! device memory allocation error\n");
}
}
NVMatrix::NVMatrix() {
_init(0, 0);
}
NVMatrix::NVMatrix(bool isTrans) {
_init(0, 0);
setTrans(isTrans);
}
NVMatrix::NVMatrix(int numRows, int numCols, bool isTrans) {
_init(numRows, numCols);
setTrans(isTrans);
}
NVMatrix::NVMatrix(const Matrix& like, bool copy) {
_init(like.getNumRows(), like.getNumCols());
_isTrans = like.isTrans();
if (copy) {
copyFromHost(like);
}
}
NVMatrix::NVMatrix(const NVMatrix& like, bool copy) {
_init(like.getNumRows(), like.getNumCols());
_isTrans = like.isTrans();
if(copy) {
copyFromDevice(like);
}
}
/*
* Initializes NVMatrix with same dimensions as given matrix but
* does not copy any data.
*/
NVMatrix::NVMatrix(const NVMatrix& like) {
_init(like.getNumRows(), like.getNumCols());
_isTrans = like.isTrans();
}
/*
* Initializes NVMatrix with same dimensions as given matrix but
* does not copy any data.
*/
NVMatrix::NVMatrix(const Matrix& like) {
_init(like.getNumRows(), like.getNumCols());
_isTrans = like.isTrans();
}
NVMatrix::NVMatrix(float* devData, int numRows, int numCols, bool isTrans) {
_numRows = numRows;
_numCols = numCols;
_numElements = numRows * numCols;
_ownsData = false;
_devData = devData;
_isTrans = isTrans;
}
NVMatrix::~NVMatrix() {
if(_ownsData && _numElements > 0) {
cublasStatus status = hipblasFree(_devData);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! memory free error\n");
exit(EXIT_FAILURE);
}
}
}
void NVMatrix::copyFromHost(const Matrix& hostMatrix, bool resizeDeviceMatrix) {
if(resizeDeviceMatrix) {
resize(hostMatrix);
}
copyFromHost(hostMatrix);
}
void NVMatrix::copyFromHost(const Matrix& hostMatrix) {
assert(isSameDims(hostMatrix));
cublasStatus status = hipblasSetVector(_numElements, sizeof(float), hostMatrix.getData(), 1, _devData, 1);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (write)\n");
exit( EXIT_FAILURE);
}
_isTrans = hostMatrix.isTrans();
}
void NVMatrix::copyFromDevice(const NVMatrix& devMatrix) {
assert(isSameDims(devMatrix));
hipblasScopy(_numElements,devMatrix._devData, 1, _devData,1);
checkCublasError("hipblasScopy failed");
_isTrans = devMatrix.isTrans();
}
void NVMatrix::copyFromDevice(const NVMatrix& devMatrix, bool resizeTarget) {
if (resizeTarget) {
resize(devMatrix);
}
copyFromDevice(devMatrix);
}
void NVMatrix::copyToHost(Matrix& hostMatrix) const {
assert(isSameDims(hostMatrix));
cublasStatus status = hipblasGetVector(_numElements, sizeof(float), _devData, 1, hostMatrix.getData(), 1);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (read)\n");
exit( EXIT_FAILURE);
}
hostMatrix.setTrans(_isTrans);
}
void NVMatrix::rightMult(const NVMatrix &b, float scaleAB, NVMatrix &target) const {
// assert(&target != &b);
assert(_numCols == b.getNumRows());
if(&target != this) {
target.resize(_numRows, b.getNumCols());
}
assert(target.getNumRows() == _numRows);
assert(target.getNumCols() == b.getNumCols());
if(_numRows % 64 != 0 || _numCols % 64 != 0 || b.getNumCols() % 64 != 0) {
WARN("Matrix dimensions not divisible by 64 -- hipblasSgemm performance may suffer.");
}
hipblasSgemm(getTransChar(), b.getTransChar(), _numRows, b.getNumCols(), _numCols,
scaleAB, _devData, getLeadingDim(), b.getDevData(), b.getLeadingDim(),
0, target.getDevData(), getNumRows());
checkCublasError("hipblasSgemm failed");
target._isTrans = true; //because target is now in col-major order
}
void NVMatrix::rightMult(const NVMatrix &b, float scaleAB) {
rightMult(b, scaleAB, *this);
}
void NVMatrix::rightMult(const NVMatrix &b, NVMatrix& target) const {
rightMult(b, 1, target);
}
/*
* This will only work if this matrix is in column-major order! In other words,
* if isTrans() returns true.
*/
void NVMatrix::addProduct(const NVMatrix& a, const NVMatrix &b, float scaleThis, float scaleAB) {
assert(a.getNumCols() == b.getNumRows());
assert(this->getNumRows() == a.getNumRows());
assert(this->getNumCols() == b.getNumCols());
assert(_isTrans);
if(a.getNumRows() % 64 != 0 || a.getNumCols() % 64 != 0 || b.getNumCols() % 64 != 0) {
WARN("Matrix dimensions not divisible by 64 -- hipblasSgemm performance may suffer.");
}
hipblasSgemm(a.getTransChar(), b.getTransChar(), a.getNumRows(), b.getNumCols(), a.getNumCols(),
scaleAB, a.getDevData(), a.getLeadingDim(), b.getDevData(), b.getLeadingDim(),
scaleThis, _devData, getLeadingDim());
checkCublasError("hipblasSgemm failed");
}
void NVMatrix::addProduct(const NVMatrix& a, const NVMatrix &b) {
addProduct(a, b, 1, 1);
}
void NVMatrix::apply(NVMatrix::FUNCTIONS f, NVMatrix& target, int numBlocks, int numThreadsPerBlock) {
target.resize(*this);
target._isTrans = _isTrans;
dim3 grid( numBlocks, 1, 1);
dim3 threads( numThreadsPerBlock, 1, 1);
if(f == NVMatrix::EXP) {
hipLaunchKernelGGL(( kExp), dim3(grid), dim3(threads), 0, 0, _devData, target._devData, _numElements);
} else if (f == NVMatrix::LOGISTIC1) {
hipLaunchKernelGGL(( kLogistic1), dim3(grid), dim3(threads), 0, 0, _devData, target._devData, _numElements);
} else if (f == NVMatrix::LOGISTIC2) {
hipLaunchKernelGGL(( kLogistic2), dim3(grid), dim3(threads), 0, 0, _devData, target._devData, _numElements);
} else if (f == NVMatrix::SQUARE) {
hipLaunchKernelGGL(( kSquare), dim3(grid), dim3(threads), 0, 0, _devData, target._devData, _numElements);
} else if (f == NVMatrix::SQRT) {
hipLaunchKernelGGL(( kSqrt), dim3(grid), dim3(threads), 0, 0, _devData, target._devData, _numElements);
} else if (f == NVMatrix::ZERO) {
hipLaunchKernelGGL(( kZero), dim3(grid), dim3(threads), 0, 0, _devData, target._devData, _numElements);
} else if(f == NVMatrix::RECIPROCAL) {
hipLaunchKernelGGL(( kReciprocal), dim3(grid), dim3(threads), 0, 0, _devData, target._devData, _numElements);
} else if(f == NVMatrix::LOG) {
hipLaunchKernelGGL(( kLog), dim3(grid), dim3(threads), 0, 0, _devData, target._devData, _numElements);
} else if(f == NVMatrix::SIGN) {
hipLaunchKernelGGL(( kSign), dim3(grid), dim3(threads), 0, 0, _devData, target._devData, _numElements);
}
cuvSafeCall(hipDeviceSynchronize());
}
void NVMatrix::apply(NVMatrix::FUNCTIONS f, int numBlocks, int numThreadsPerBlock) {
apply(f, *this, numBlocks, numThreadsPerBlock);
}
/*
* The random number generator uses the multiply with carry algorithm. I got the
* multipliers from a site I can't find anymore.
*/
void NVMatrix::initRandom(unsigned int seed) {
assert(!rndInitialized);
ifstream inFile;
inFile.open(RND_MULTIPLIERS_FILE);
if(!inFile) {
std::cerr << "Unable to open file " << RND_MULTIPLIERS_FILE << std::endl;
exit(EXIT_FAILURE);
}
unsigned int mult;
for (int numRead = 0; numRead < NUM_RND_STREAMS; numRead++) {
if (!(inFile >> mult)) {
std::cerr << "Not enough numbers in file " << RND_MULTIPLIERS_FILE << std::endl;
exit(EXIT_FAILURE);
}
hostRndMults[numRead] = mult;
}
inFile.close();
cuvSafeCall(hipMalloc((void **)&devRndMults, NUM_RND_STREAMS * sizeof(unsigned int)));
cuvSafeCall(hipMalloc((void **)&devRndWords, NUM_RND_STREAMS * sizeof(unsigned long long)));
cuvSafeCall(hipMemcpy(devRndMults, hostRndMults, NUM_RND_STREAMS * sizeof(unsigned int), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( kSeedRandom), dim3(NUM_RND_BLOCKS), dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, devRndMults, devRndWords, seed);
cuvSafeCall(hipDeviceSynchronize());
rndInitialized = true;
}
void NVMatrix::destroyRandom() {
assert(rndInitialized);
cuvSafeCall(hipFree(devRndMults));
cuvSafeCall(hipFree(devRndWords));
rndInitialized = false;
}
void NVMatrix::binarizeProbs() {
assert(rndInitialized);
hipLaunchKernelGGL(( kBinarizeProbs), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, devRndMults, devRndWords, _devData,_numElements);
cuvSafeCall(hipDeviceSynchronize());
}
void NVMatrix::randomizeUniform() {
assert(rndInitialized);
hipLaunchKernelGGL(( kRandomUniform), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, devRndMults, devRndWords, _devData,_numElements);
cuvSafeCall(hipDeviceSynchronize());
}
void NVMatrix::randomizeGaussian() {
randomizeGaussian(1);
}
void NVMatrix::randomizeGaussian(float stdev) {
assert(rndInitialized);
hipLaunchKernelGGL(( kRandomGaussian), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, devRndMults, devRndWords, _devData, stdev, _numElements);
cuvSafeCall(hipDeviceSynchronize());
}
void NVMatrix::randomizeGaussian(NVMatrix& stdevs) {
assert(rndInitialized);
assert(stdevs.getNumElements() == _numElements);
assert(stdevs.isTrans() == isTrans());
hipLaunchKernelGGL(( kRandomGaussian), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, devRndMults, devRndWords, _devData, stdevs.getDevData(), _numElements);
cuvSafeCall(hipDeviceSynchronize());
}
void NVMatrix::addGaussianNoise() {
addGaussianNoise(1);
}
void NVMatrix::addGaussianNoise(float stdev) {
assert(rndInitialized);
// assert(_numElements % 2 == 0);
hipLaunchKernelGGL(( kAddGaussianNoise), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, devRndMults, devRndWords, _devData,stdev,_numElements);
cuvSafeCall(hipDeviceSynchronize());
}
void NVMatrix::addGaussianNoise(NVMatrix& stdevs) {
assert(rndInitialized);
// assert(_numElements % 2 == 0);
assert(stdevs.getNumElements() == _numElements);
assert(stdevs.isTrans() == isTrans());
hipLaunchKernelGGL(( kAddGaussianNoise), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, devRndMults, devRndWords, _devData,stdevs.getDevData(),_numElements);
cuvSafeCall(hipDeviceSynchronize());
}
void NVMatrix::biggerThanScalar(float scalar) {
biggerThanScalar(scalar, *this);
}
void NVMatrix::biggerThanScalar(float scalar, NVMatrix& target) {
target.resize(*this);
hipLaunchKernelGGL(( kBiggerThanScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, _devData,scalar,target._devData,_numElements);
cuvSafeCall(hipDeviceSynchronize());
}
void NVMatrix::inRangeInc(float lower, float upper) {
inRangeInc(lower, upper, *this);
}
void NVMatrix::inRangeInc(float lower, float upper, NVMatrix& target) {
target.resize(*this);
hipLaunchKernelGGL(( kInRangeInc), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, _devData,lower, upper,target._devData,_numElements);
cuvSafeCall(hipDeviceSynchronize());
}
void NVMatrix::inRangeExc(float lower, float upper) {
inRangeExc(lower, upper, *this);
}
void NVMatrix::inRangeExc(float lower, float upper, NVMatrix& target) {
target.resize(*this);
hipLaunchKernelGGL(( kInRangeExc), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, _devData,lower, upper,target._devData,_numElements);
cuvSafeCall(hipDeviceSynchronize());
}
void NVMatrix::smallerThanScalar(float scalar, NVMatrix& target) {
target.resize(*this);
hipLaunchKernelGGL(( kSmallerThanScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, _devData,scalar,target._devData,_numElements);
cuvSafeCall(hipDeviceSynchronize());
}
void NVMatrix::biggerThan(NVMatrix& m, NVMatrix& target, int numBlocks, int numThreadsPerBlock) {
assert(isSameDims(m));
target.resize(*this);
for (unsigned int elementsDone = 0; elementsDone < _numElements; elementsDone += numBlocks*numThreadsPerBlock) {
hipLaunchKernelGGL(( kBiggerThan), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, _devData + elementsDone,
m._devData + elementsDone, target._devData + elementsDone,
_numElements - elementsDone);
}
cuvSafeCall(hipDeviceSynchronize());
}
void NVMatrix::biggerThan(NVMatrix& m, int numBlocks, int numThreadsPerBlock) {
biggerThan(m, *this, numBlocks, numThreadsPerBlock);
}
void NVMatrix::biggerThanVector(NVMatrix& vec, NVMatrix& target) {
if(&target == &vec && &target != this) { // because we manipulate target to be like this
vec.biggerThanVector(*this);
return;
}
assert(vec.getNumRows() == 1 || vec.getNumCols() == 1);
assert(vec.getNumRows() == _numRows || vec.getNumCols() == _numCols);
target.resize(*this);
const unsigned int width = _isTrans ? _numRows : _numCols;
const unsigned int height = _isTrans ? _numCols : _numRows;
if(vec.getNumRows() == _numRows && !isTrans() || vec.getNumCols() == _numCols && isTrans()) {
hipLaunchKernelGGL(( kBiggerThanColVector), dim3(NUM_ADD_VECTOR_BLOCKS),dim3(NUM_ADD_VECTOR_THREADS_PER_BLOCK), 0, 0, _devData, vec._devData, target._devData, width, height);
} else {
hipLaunchKernelGGL(( kBiggerThanRowVector), dim3(NUM_ADD_VECTOR_BLOCKS),dim3(NUM_ADD_VECTOR_THREADS_PER_BLOCK), 0, 0, _devData, vec._devData, target._devData, width, height);
}
cuvSafeCall(hipDeviceSynchronize());
}
void NVMatrix::biggerThanVector(NVMatrix& vec) {
biggerThanVector(vec, *this);
}
void NVMatrix::_checkBounds(int startRow, int endRow, int startCol, int endCol) const {
assert(startRow >= 0 && startRow <= _numRows);
assert(endRow >= 0 && endRow <= _numRows);
assert(startCol >= 0 && startCol <= _numCols);
assert(endCol >= 0 && endCol <= _numCols);
}
NVMatrix& NVMatrix::slice(int startRow, int endRow, int startCol, int endCol) const {
endRow = endRow < 0 ? this->_numRows : endRow;
endCol = endCol < 0 ? this->_numCols : endCol;
_checkBounds(startRow, endRow, startCol, endCol);
if (!isTrans() && ((startCol == 0 && endCol == this->_numCols) || startRow == endRow - 1)) {
return *new NVMatrix(this->_devData + startRow * this->_numCols + startCol, endRow - startRow, endCol - startCol, false);
} else if(isTrans() && ((startRow == 0 & endRow == this->_numRows) || startCol == endCol - 1)) {
return *new NVMatrix(this->_devData + startCol * this->_numRows + startRow, endRow - startRow, endCol - startCol, true);
}
WARN("Slice: result will not be a view.");
NVMatrix& newSlice = *new NVMatrix(endRow - startRow, endCol - startCol);
this->copy(newSlice, startRow, endRow, startCol, endCol, 0, 0);
return newSlice;
}
/* this will NEVER return a view, unlike Matrix_slice */
void NVMatrix::slice(int startRow, int endRow, int startCol, int endCol, NVMatrix& target) const {
endRow = endRow < 0 ? this->_numRows : endRow;
endCol = endCol < 0 ? this->_numCols : endCol;
_checkBounds(startRow, endRow, startCol, endCol);
target.resize(endRow - startRow, endCol - startCol);
target._isTrans = _isTrans;
this->copy(target, startRow, endRow, startCol, endCol, 0, 0);
}
NVMatrix& NVMatrix::sliceRows(int startRow, int endRow) const {
return slice(startRow, endRow, 0, -1);
}
void NVMatrix::sliceRows(int startRow, int endRow, NVMatrix& target) const {
slice(startRow, endRow, 0, -1, target);
}
NVMatrix& NVMatrix::sliceCols(int startCol, int endCol) const {
return slice(0, -1, startCol, endCol);
}
void NVMatrix::sliceCols(int startCol, int endCol, NVMatrix& target) const {
slice(0, -1, startCol, endCol, target);
}
/*
* Guaranteed to not change the data if the number of elements doesn't change.
* So you can use this to "reshape" a matrix.
*/
bool NVMatrix::resize(int numRows, int numCols) {
bool reallocated = false;
if (numRows != _numRows || numCols != _numCols) {
assert(_ownsData);
if (_numElements != numRows * numCols) {
cublasStatus status = hipblasFree(_devData);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! memory free error\n");
exit(EXIT_FAILURE);
}
status = hipblasAlloc(numCols * numRows, sizeof(float), (void**) &_devData);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device memory allocation error\n");
exit(EXIT_FAILURE);
}
reallocated = true;
}
_numRows = numRows;
_numCols = numCols;
_numElements = numRows * numCols;
}
return reallocated;
}
bool NVMatrix::resize(const NVMatrix& like) {
bool r = resize(like.getNumRows(), like.getNumCols());
_isTrans = like._isTrans;
return r;
}
bool NVMatrix::resize(const Matrix& like) {
bool r = resize(like.getNumRows(), like.getNumCols());
_isTrans = like.isTrans();
return r;
}
void NVMatrix::reshape(int numRows, int numCols) {
assert(_numElements == numRows*numCols);
_numRows = numRows;
_numCols = numCols;
}
NVMatrix& NVMatrix::reshaped(int numRows, int numCols) {
assert(_numElements == numRows*numCols);
return *new NVMatrix(_devData, numRows, numCols, _isTrans);
}
void NVMatrix::copy(NVMatrix &dest, int srcStartRow, int srcEndRow,
int srcStartCol, int srcEndCol,
int destStartRow, int destStartCol) const {
srcEndRow = srcEndRow < 0 ? this->_numRows : srcEndRow;
srcEndCol = srcEndCol < 0 ? this->_numCols : srcEndCol;
assert(destStartRow >= 0 && destStartCol >= 0); //some range-checking
assert(srcEndRow <= _numRows && srcEndCol <= _numCols);
assert(destStartRow + srcEndRow - srcStartRow <= dest.getNumRows());
assert(destStartCol + srcEndCol - srcStartCol <= dest.getNumCols());
const int srcJumpWidth = !_isTrans ? getNumCols() : getNumRows();
const int destJumpWidth = !dest._isTrans ? dest.getNumCols() : dest.getNumRows();
float* srcStartPtr = getCellPtr(srcStartRow, srcStartCol);
float* destStartPtr = dest.getCellPtr(destStartRow, destStartCol);
if (isTrans() != dest.isTrans()) {
// copyWidth here refers to dest
const int copyWidth = !dest._isTrans ? srcEndCol - srcStartCol : srcEndRow - srcStartRow;
const int copyHeight = !dest._isTrans ? srcEndRow - srcStartRow : srcEndCol - srcStartCol;
//call copy kernel for transposed matrices
// const int width = dest.isTrans() ? _numRows : _numCols;
// const int height = dest.isTrans() ? _numCols : _numRows;
const bool checkBounds = !(copyWidth % ADD_BLOCK_SIZE == 0 && copyHeight % ADD_BLOCK_SIZE == 0);
const int numBlocksX = DIVUP(copyWidth, ADD_BLOCK_SIZE);
assert(numBlocksX < NUM_BLOCKS_MAX);
const int numBlocksY = ::max(1, ::min(DIVUP(copyHeight, ADD_BLOCK_SIZE), NUM_BLOCKS_MAX));
dim3 gridSize(numBlocksX, numBlocksY, 1);
dim3 blockSize(ADD_BLOCK_SIZE, ADD_BLOCK_SIZE, 1);
int numRowsCopied = 0;
// printf("calling kCopyTransFast\n");
while (numRowsCopied < copyHeight) {
if (checkBounds) {
hipLaunchKernelGGL(( kCopyTransFast<true>), dim3(gridSize), dim3(blockSize), 0, 0, &destStartPtr[numRowsCopied * destJumpWidth],
&srcStartPtr[numRowsCopied], copyWidth, copyHeight - numRowsCopied, destJumpWidth, srcJumpWidth);
} else {
hipLaunchKernelGGL(( kCopyTransFast<false>), dim3(gridSize), dim3(blockSize), 0, 0, &destStartPtr[numRowsCopied * destJumpWidth],
&srcStartPtr[numRowsCopied], copyWidth, copyHeight - numRowsCopied, destJumpWidth, srcJumpWidth);
}
cuvSafeCall(hipDeviceSynchronize());
numRowsCopied += gridSize.y * ADD_BLOCK_SIZE;
gridSize.y = ::max(1, min(DIVUP(copyHeight-numRowsCopied, ADD_BLOCK_SIZE), NUM_BLOCKS_MAX));
}
} else {
// copyWidth here refers to src
const int copyWidth = !_isTrans ? srcEndCol - srcStartCol : srcEndRow - srcStartRow;
const int copyHeight = !_isTrans ? srcEndRow - srcStartRow : srcEndCol - srcStartCol;
const int numToCopy = copyWidth * copyHeight;
hipLaunchKernelGGL(( kCopy), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, srcStartPtr, destStartPtr, copyWidth, srcJumpWidth, destJumpWidth, numToCopy);
}
}
NVMatrix& NVMatrix::getTranspose() {
NVMatrix* trans = new NVMatrix(_devData, _numCols, _numRows, !_isTrans);
return *trans;
}
/*
* Flips the ordering of the matrix from row-major to column-major and vice versa.
* This creates temporary storage -- not a cheap operation.
*
* This is not equivalent to a "hard transpose". The resultant matrix still has
* the same dimensions, its layout in memory just changes.
*/
void NVMatrix::flipTrans() {
NVMatrix* meTrans = new NVMatrix(*this);
// assert(_numCols % ADD_BLOCK_SIZE == 0 && _numRows % ADD_BLOCK_SIZE == 0);
const int width = isTrans() ? _numRows : _numCols;
const int height = isTrans() ? _numCols : _numRows;
const int numBlocksX = DIVUP(width, ADD_BLOCK_SIZE);
const int numBlocksY = DIVUP(height, ADD_BLOCK_SIZE);
assert(numBlocksX < NUM_BLOCKS_MAX && numBlocksY < NUM_BLOCKS_MAX);
dim3 gridSize(numBlocksX, numBlocksY, 1);
dim3 blockSize(ADD_BLOCK_SIZE, ADD_BLOCK_SIZE, 1);
hipLaunchKernelGGL(( kTranspose), dim3(gridSize), dim3(blockSize), 0, 0, _devData, meTrans->_devData, width, height);
cuvSafeCall(hipDeviceSynchronize());
copyFromDevice(*meTrans);
this->_isTrans = !this->_isTrans;
delete meTrans;
}
void NVMatrix::squaredDiff(NVMatrix& b) {
squaredDiff(b, *this);
}
void NVMatrix::squaredDiff(NVMatrix& b, NVMatrix& target) {
assert(this->isSameDims(b));
assert(&target != &b);
target.resize(*this);
const int width = isTrans() ? _numRows : _numCols;
const int height = isTrans() ? _numCols : _numRows;
if (_isTrans != b._isTrans) {
const bool checkBounds = !(width % ADD_BLOCK_SIZE == 0 && height % ADD_BLOCK_SIZE == 0);
const int numBlocksX = DIVUP(width, ADD_BLOCK_SIZE);
assert(numBlocksX < NUM_BLOCKS_MAX);
const int numBlocksY = ::max(1, ::min(DIVUP(height, ADD_BLOCK_SIZE), NUM_BLOCKS_MAX));
dim3 gridSize(numBlocksX, numBlocksY, 1);
dim3 blockSize(ADD_BLOCK_SIZE, ADD_BLOCK_SIZE, 1);
int numRowsAdded = 0;
float* aData = _devData, *bData = b._devData, *destData = target._devData;
// printf("calling trans sq diff\n");
while (numRowsAdded < height) {
if(checkBounds) {
hipLaunchKernelGGL(( kSquaredDiffTransFast<true>), dim3(gridSize), dim3(blockSize), 0, 0, aData, bData, destData, width, height - numRowsAdded, height);
} else {
hipLaunchKernelGGL(( kSquaredDiffTransFast<false>), dim3(gridSize), dim3(blockSize), 0, 0, aData, bData, destData, width, height - numRowsAdded, height);
}
cuvSafeCall(hipDeviceSynchronize());
numRowsAdded += gridSize.y * ADD_BLOCK_SIZE;
gridSize.y = ::max(1, ::min(DIVUP(height-numRowsAdded, ADD_BLOCK_SIZE), NUM_BLOCKS_MAX));
aData += numRowsAdded * width;
bData += b._isTrans != _isTrans ? numRowsAdded : numRowsAdded * width;
destData += numRowsAdded * width;
}
} else {
// printf("calling plain sq diff\n");
hipLaunchKernelGGL(( kSquaredDiff), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, _devData, b._devData, target._devData,_numElements);
cuvSafeCall(hipDeviceSynchronize());
}
}
void NVMatrix::addSum(NVMatrix& b, NVMatrix& c, float scaleThis, float scaleB, float scaleC) {
assert(this->isSameDims(b));
assert(this->isSameDims(c));
const int width = isTrans() ? _numRows : _numCols;
const int height = isTrans() ? _numCols : _numRows;
if((_isTrans != b._isTrans || _isTrans != c._isTrans) && min(_numRows, _numCols) > 1) {
bool checkBounds = !(width % ADD_BLOCK_SIZE == 0 && height % ADD_BLOCK_SIZE == 0);
const int numBlocksX = DIVUP(width, ADD_BLOCK_SIZE);
assert(numBlocksX < NUM_BLOCKS_MAX);
const int numBlocksY = ::max(1, min(DIVUP(height, ADD_BLOCK_SIZE), NUM_BLOCKS_MAX));
dim3 gridSize(numBlocksX, numBlocksY, 1);
dim3 blockSize(ADD_BLOCK_SIZE, ADD_BLOCK_SIZE, 1);
int numRowsAdded = 0;
float* aData = _devData, *bData = b._devData, *cData = c._devData;
const bool transB = b._isTrans != _isTrans, transC = c._isTrans != _isTrans;
while (numRowsAdded < height) {
if(transB) {
if(transC) {
if(checkBounds) {
hipLaunchKernelGGL(( kAddTrans3Fast<true, true, true>), dim3(gridSize), dim3(blockSize), 0, 0, aData, bData, cData,width, height - numRowsAdded, height,
scaleThis, scaleB, scaleC);
} else {
hipLaunchKernelGGL(( kAddTrans3Fast<false, true, true>), dim3(gridSize), dim3(blockSize), 0, 0, aData, bData, cData,width, height - numRowsAdded, height,
scaleThis, scaleB, scaleC);
}
} else {
if(checkBounds) {
hipLaunchKernelGGL(( kAddTrans3Fast<true, true, false>), dim3(gridSize), dim3(blockSize), 0, 0, aData, bData, cData,width, height - numRowsAdded, height,
scaleThis, scaleB, scaleC);
} else {
hipLaunchKernelGGL(( kAddTrans3Fast<false, true, false>), dim3(gridSize), dim3(blockSize), 0, 0, aData, bData, cData,width, height - numRowsAdded, height,
scaleThis, scaleB, scaleC);
}
}
} else {
if(transC) {
if(checkBounds) {
hipLaunchKernelGGL(( kAddTrans3Fast<true, false, true>), dim3(gridSize), dim3(blockSize), 0, 0, aData, bData, cData,width, height - numRowsAdded, height,
scaleThis, scaleB, scaleC);
} else {
hipLaunchKernelGGL(( kAddTrans3Fast<false, false, true>), dim3(gridSize), dim3(blockSize), 0, 0, aData, bData, cData,width, height - numRowsAdded, height,
scaleThis, scaleB, scaleC);
}
} else {
if(checkBounds) {
hipLaunchKernelGGL(( kAddTrans3Fast<true, false, false>), dim3(gridSize), dim3(blockSize), 0, 0, aData, bData, cData,width, height - numRowsAdded, height,
scaleThis, scaleB, scaleC);
} else {
hipLaunchKernelGGL(( kAddTrans3Fast<false, false, false>), dim3(gridSize), dim3(blockSize), 0, 0, aData, bData, cData,width, height - numRowsAdded, height,
scaleThis, scaleB, scaleC);
}
}
}
cuvSafeCall(hipDeviceSynchronize());
numRowsAdded += gridSize.y * ADD_BLOCK_SIZE;
gridSize.y = ::max(1, min(DIVUP((height-numRowsAdded) , ADD_BLOCK_SIZE), NUM_BLOCKS_MAX));
aData += numRowsAdded * width;
bData += b._isTrans != _isTrans ? numRowsAdded : numRowsAdded * width;
cData += c._isTrans != _isTrans ? numRowsAdded : numRowsAdded * width;
}
} else {
hipLaunchKernelGGL(( kAdd3), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, _devData, b._devData, c._devData,
_numElements, scaleThis, scaleB, scaleC);
cuvSafeCall(hipDeviceSynchronize());
}
}
void NVMatrix::add(NVMatrix& b, float scaleA, float scaleB, NVMatrix& target) {
if(&target == &b && &target != this) { // because we manipulate target to be like a
b.add(*this, scaleB, scaleA);
return;
}
assert(this->isSameDims(b));
target.resize(*this);
if (isTrans() != b.isTrans() && min(_numRows, _numCols) > 1) {
//call addition kernel for transposed matrices
const int width = isTrans() ? _numRows : _numCols;
const int height = isTrans() ? _numCols : _numRows;
const bool checkBounds = !(width % ADD_BLOCK_SIZE == 0 && height % ADD_BLOCK_SIZE == 0);
const int numBlocksX = DIVUP(width, ADD_BLOCK_SIZE);
assert(numBlocksX < NUM_BLOCKS_MAX);
const int numBlocksY = ::max(1, ::min(DIVUP(height, ADD_BLOCK_SIZE), NUM_BLOCKS_MAX));
dim3 gridSize(numBlocksX, numBlocksY, 1);
dim3 blockSize(ADD_BLOCK_SIZE, ADD_BLOCK_SIZE, 1);
int numRowsAdded = 0;
while (numRowsAdded < height) {
if (checkBounds) {
hipLaunchKernelGGL(( kAddTransFast<true>), dim3(gridSize), dim3(blockSize), 0, 0, &_devData[numRowsAdded * width],
&b._devData[numRowsAdded], &target._devData[numRowsAdded * width],
width, height - numRowsAdded, height, scaleA, scaleB);
} else {
hipLaunchKernelGGL(( kAddTransFast<false>), dim3(gridSize), dim3(blockSize), 0, 0, &_devData[numRowsAdded * width],
&b._devData[numRowsAdded], &target._devData[numRowsAdded * width],
width, height - numRowsAdded, height, scaleA, scaleB);
}
cuvSafeCall(hipDeviceSynchronize());
numRowsAdded += gridSize.y * ADD_BLOCK_SIZE;
gridSize.y = ::max(1, min(DIVUP(height-numRowsAdded, ADD_BLOCK_SIZE), NUM_BLOCKS_MAX));
}
} else {
if(scaleA == 1.0f) {
hipblasSaxpy(_numElements, scaleB, b._devData, 1, target._devData, 1);
checkCublasError("hipblasSaxpy failed");
} else {
hipLaunchKernelGGL(( kAdd), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, _devData, b._devData, target._devData,
_numElements, scaleA, scaleB);
}
}
}
void NVMatrix::add(NVMatrix& b, float scaleB, NVMatrix& target) {
add(b, 1, scaleB, target);
}
void NVMatrix::add(NVMatrix& b, NVMatrix& target) {
add(b, 1, target);
}
void NVMatrix::add(NVMatrix& b, float scaleB) {
add(b, scaleB, *this);
}
void NVMatrix::add(NVMatrix& b, float scaleA, float scaleB) {
add(b, scaleA, scaleB, *this);
}
void NVMatrix::add(NVMatrix& b) {
add(b, 1, *this);
}
void NVMatrix::subtract(NVMatrix& b, NVMatrix& target) {
add(b, -1, target);
}
void NVMatrix::subtract(NVMatrix& b) {
add(b, -1);
}
void NVMatrix::eltWiseMult(NVMatrix& b, NVMatrix& target) {
if(&target == &b && &target != this) { // because we manipulate target to be like a
b.eltWiseMult(*this);
return;
}
assert(this->isSameDims(b));
target.resize(*this);
if (isTrans() != b.isTrans() && min(_numRows, _numCols) > 1) {
//call mult kernel for transposed matrices
const int width = isTrans() ? _numRows : _numCols;
const int height = isTrans() ? _numCols : _numRows;
const bool checkBounds = !(width % ADD_BLOCK_SIZE == 0 && height % ADD_BLOCK_SIZE == 0);
// if (width % ADD_BLOCK_SIZE == 0 && height % ADD_BLOCK_SIZE == 0) {
const int numBlocksX = DIVUP(width, ADD_BLOCK_SIZE);
assert(numBlocksX < NUM_BLOCKS_MAX);
const int numBlocksY = min(DIVUP(height, ADD_BLOCK_SIZE), NUM_BLOCKS_MAX);
dim3 gridSize(numBlocksX, numBlocksY, 1);
dim3 blockSize(ADD_BLOCK_SIZE, ADD_BLOCK_SIZE, 1);
int numRowsProcessed = 0;
while (numRowsProcessed < height) {
if (checkBounds) {
hipLaunchKernelGGL(( kMultTransFast<true>), dim3(gridSize), dim3(blockSize), 0, 0, &_devData[numRowsProcessed * width],
&b._devData[numRowsProcessed], &target._devData[numRowsProcessed * width],
width, height - numRowsProcessed, height);
} else {
hipLaunchKernelGGL(( kMultTransFast<false>), dim3(gridSize), dim3(blockSize), 0, 0, &_devData[numRowsProcessed * width],
&b._devData[numRowsProcessed], &target._devData[numRowsProcessed * width],
width, height - numRowsProcessed, height);
}
cuvSafeCall(hipDeviceSynchronize());
numRowsProcessed += gridSize.y * ADD_BLOCK_SIZE;
gridSize.y = min(DIVUP(height-numRowsProcessed, ADD_BLOCK_SIZE), NUM_BLOCKS_MAX);
}
// }
} else {
hipLaunchKernelGGL(( kMult), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, _devData, b._devData, target._devData,_numElements);
cuvSafeCall(hipDeviceSynchronize());
}
}
void NVMatrix::eltWiseMult(NVMatrix& b) {
eltWiseMult(b, *this);
}
void NVMatrix::eltWiseDivide(NVMatrix& b, NVMatrix& target) {
assert(&b != this); // doable but not necessary for me
assert(this->isSameDims(b));
target.resize(*this);
if (isTrans() != b.isTrans() && min(_numRows, _numCols) > 1) {
//call mult kernel for transposed matrices
const int width = isTrans() ? _numRows : _numCols;
const int height = isTrans() ? _numCols : _numRows;
const bool checkBounds = !(width % ADD_BLOCK_SIZE == 0 && height % ADD_BLOCK_SIZE == 0);
// if (width % ADD_BLOCK_SIZE == 0 && height % ADD_BLOCK_SIZE == 0) {
const int numBlocksX = DIVUP(width, ADD_BLOCK_SIZE);
assert(numBlocksX < NUM_BLOCKS_MAX);
const int numBlocksY = min(height / ADD_BLOCK_SIZE, NUM_BLOCKS_MAX);
dim3 gridSize(numBlocksX, numBlocksY, 1);
dim3 blockSize(ADD_BLOCK_SIZE, ADD_BLOCK_SIZE, 1);
int numRowsProcessed = 0;
while (numRowsProcessed < height) {
if (checkBounds) {
hipLaunchKernelGGL(( kDivideTransFast<true>), dim3(gridSize), dim3(blockSize), 0, 0, &_devData[numRowsProcessed * width],
&b._devData[numRowsProcessed], &target._devData[numRowsProcessed * width],
width, height - numRowsProcessed, height);
} else {
hipLaunchKernelGGL(( kDivideTransFast<false>), dim3(gridSize), dim3(blockSize), 0, 0, &_devData[numRowsProcessed * width],
&b._devData[numRowsProcessed], &target._devData[numRowsProcessed * width],
width, height - numRowsProcessed, height);
}
cuvSafeCall(hipDeviceSynchronize());
numRowsProcessed += gridSize.y * ADD_BLOCK_SIZE;
gridSize.y = min(DIVUP(height-numRowsProcessed, ADD_BLOCK_SIZE), NUM_BLOCKS_MAX);
}
// }
} else {
hipLaunchKernelGGL(( kDivide), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, _devData, b._devData, target._devData,_numElements);
cuvSafeCall(hipDeviceSynchronize());
}
}
void NVMatrix::eltWiseDivide(NVMatrix& b) {
eltWiseDivide(b, *this);
}
void NVMatrix::tile(int timesY, int timesX, NVMatrix& target) {
assert(timesX > 0 && timesY > 0);
target.resize(_numRows*timesY, _numCols*timesX);
target._isTrans = _isTrans;
if(!isTrans()) {
hipLaunchKernelGGL(( kTile), dim3(NUM_APPLY_BLOCKS),dim3(NUM_APPLY_THREADS_PER_BLOCK), 0, 0, _devData, target._devData, _numCols, _numRows, target._numCols, target._numRows);
} else {
hipLaunchKernelGGL(( kTile), dim3(NUM_APPLY_BLOCKS),dim3(NUM_APPLY_THREADS_PER_BLOCK), 0, 0, _devData, target._devData, _numRows, _numCols, target._numRows, target._numCols);
}
cuvSafeCall(hipDeviceSynchronize());
}
void NVMatrix::addVector(NVMatrix& vec, float scaleVec, NVMatrix& target) {
if(&target == &vec && &target != this) { // because we manipulate target to be like this
vec.add(*this, scaleVec, 1);
return;
}
assert(vec.getNumRows() == 1 || vec.getNumCols() == 1);
assert(vec.getNumRows() == _numRows || vec.getNumCols() == _numCols);
// assert(&target != &vec);
target.resize(*this);
// const unsigned int numThreads = numBlocks*numThreadsPerBlock;
const unsigned int width = _isTrans ? _numRows : _numCols;
const unsigned int height = _isTrans ? _numCols : _numRows;
if(vec.getNumRows() == _numRows && !isTrans() || vec.getNumCols() == _numCols && isTrans()) {
hipLaunchKernelGGL(( kAddColVector), dim3(NUM_ADD_VECTOR_BLOCKS),dim3(NUM_ADD_VECTOR_THREADS_PER_BLOCK), 0, 0, _devData, vec._devData, target._devData, width, height, scaleVec);
} else {
hipLaunchKernelGGL(( kAddRowVector), dim3(NUM_ADD_VECTOR_BLOCKS),dim3(NUM_ADD_VECTOR_THREADS_PER_BLOCK), 0, 0, _devData, vec._devData, target._devData, width, height, scaleVec);
}
cuvSafeCall(hipDeviceSynchronize());
}
void NVMatrix::addVector(NVMatrix& vec) {
addVector(vec, 1, *this);
}
void NVMatrix::addVector(NVMatrix& vec, float scaleVec) {
addVector(vec, scaleVec, *this);
}
void NVMatrix::addVector(NVMatrix& vec, NVMatrix& target) {
addVector(vec, 1, target);
}
void NVMatrix::equalsVector(NVMatrix& vec, NVMatrix& target) {
if(&target == &vec && &target != this) { // because we manipulate target to be like this
vec.equalsVector(*this);
return;
}
assert(vec.getNumRows() == 1 || vec.getNumCols() == 1);
assert(vec.getNumRows() == _numRows || vec.getNumCols() == _numCols);
target.resize(*this);
const unsigned int width = _isTrans ? _numRows : _numCols;
const unsigned int height = _isTrans ? _numCols : _numRows;
if(vec.getNumRows() == _numRows && !isTrans() || vec.getNumCols() == _numCols && isTrans()) {
hipLaunchKernelGGL(( kEqualsColVector), dim3(NUM_ADD_VECTOR_BLOCKS),dim3(NUM_ADD_VECTOR_THREADS_PER_BLOCK), 0, 0, _devData, vec._devData, target._devData, width, height);
} else {
hipLaunchKernelGGL(( kEqualsRowVector), dim3(NUM_ADD_VECTOR_BLOCKS),dim3(NUM_ADD_VECTOR_THREADS_PER_BLOCK), 0, 0, _devData, vec._devData, target._devData, width, height);
}
cuvSafeCall(hipDeviceSynchronize());
}
void NVMatrix::equalsVector(NVMatrix& vec) {
equalsVector(vec, *this);
}
void NVMatrix::subtractFromScalar(float scalar, NVMatrix& target) {
target.resize(*this);
hipLaunchKernelGGL(( kSubtractFromScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, _devData, scalar, target._devData,_numElements);
cuvSafeCall(hipDeviceSynchronize());
}
void NVMatrix::subtractFromScalar(float scalar) {
subtractFromScalar(scalar, *this);
}
void NVMatrix::addScalar(float scalar, NVMatrix& target) {
target.resize(*this);
hipLaunchKernelGGL(( kAddScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, _devData, scalar, target._devData,_numElements);
cuvSafeCall(hipDeviceSynchronize());
}
void NVMatrix::addScalar(float scalar) {
addScalar(scalar, *this);
}
void NVMatrix::eltWiseMultByVector(NVMatrix& vec, NVMatrix& target) {
assert(&target != &vec); // for now
assert(vec.getNumRows() == 1 || vec.getNumCols() == 1);
assert(vec.getNumRows() == _numRows || vec.getNumCols() == _numCols);
// assert(&target != &vec);
target.resize(*this);
target._isTrans = _isTrans;
// const unsigned int numThreads = numBlocks*numThreadsPerBlock;
const unsigned int width = _isTrans ? _numRows : _numCols;
const unsigned int height = _isTrans ? _numCols : _numRows;
if(vec.getNumRows() == _numRows && !isTrans() || vec.getNumCols() == _numCols && isTrans()) {
hipLaunchKernelGGL(( kMultByColVector), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, _devData, vec._devData, target._devData, width, height);
} else {
hipLaunchKernelGGL(( kMultByRowVector), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, _devData, vec._devData, target._devData, width, height);
}
cuvSafeCall(hipDeviceSynchronize());
}
void NVMatrix::eltWiseMultByVector(NVMatrix& vec) {
eltWiseMultByVector(vec, *this);
}
void NVMatrix::eltWiseDivideByVector(NVMatrix& vec) {
eltWiseDivideByVector(vec, *this);
}
void NVMatrix::eltWiseDivideByVector(NVMatrix& vec, NVMatrix& target) {
NVMatrix* vecRecip = new NVMatrix(vec);
vec.apply(NVMatrix::RECIPROCAL, *vecRecip);
eltWiseMultByVector(*vecRecip, target);
hipDeviceSynchronize();
delete vecRecip;
}
void NVMatrix::eltWiseDivideByVector2(NVMatrix& vec) {
eltWiseDivideByVector2(vec, *this);
}
void NVMatrix::eltWiseDivideByVector2(NVMatrix& vec, NVMatrix& target) {
assert(&target != &vec); // for now
assert(vec.getNumRows() == 1 || vec.getNumCols() == 1);
assert(vec.getNumRows() == _numRows || vec.getNumCols() == _numCols);
// assert(&target != &vec);
target.resize(*this);
// const unsigned int numThreads = numBlocks*numThreadsPerBlock;
const unsigned int width = _isTrans ? _numRows : _numCols;
const unsigned int height = _isTrans ? _numCols : _numRows;
if(vec.getNumRows() == _numRows && !isTrans() || vec.getNumCols() == _numCols && isTrans()) {
hipLaunchKernelGGL(( kDivideByColVector), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, _devData, vec._devData, target._devData, width, height);
} else {
hipLaunchKernelGGL(( kDivideByRowVector), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, _devData, vec._devData, target._devData, width, height);
}
cuvSafeCall(hipDeviceSynchronize());
}
void NVMatrix::scale(float scale) {
hipblasSscal(_numElements, scale, _devData, 1);
checkCublasError("hipblasSscal failed.");
}
void NVMatrix::scale(float scale, NVMatrix& target) {
target.resize(*this);
target.copyFromDevice(*this);
target.scale(scale);
}
/*
* num threads per block is ignored when summing rows (axis=1) because
* it has to be a power of 2.
*/
void NVMatrix::aggregate(int axis, NVMatrix& target, int numThreadsPerBlock, NVMatrix::AGGREGATIONS agg) {
assert(&target != this);
unsigned int width = _isTrans ? _numRows : _numCols;
const int height = _isTrans ? _numCols : _numRows;
target.setTrans(_isTrans);
assert(width > 0);
assert(height > 0);
if(axis == 0 && !_isTrans || axis == 1 && _isTrans) { //col sum
target.resize(!_isTrans ? 1 : _numRows, !_isTrans ? _numCols : 1);
const unsigned int numBlocks = (width + numThreadsPerBlock - 1) / numThreadsPerBlock;
assert(numBlocks * numThreadsPerBlock >= width);
assert(numBlocks < NUM_BLOCKS_MAX);
// target.resize(1, width);
if(agg == NVMatrix::MAX) {
hipLaunchKernelGGL(( kDumbMaxCols), dim3(numBlocks),dim3(numThreadsPerBlock), 0, 0, _devData, target._devData, width, height);
} else if(agg == NVMatrix::SUM) {
hipLaunchKernelGGL(( kDumbSumCols), dim3(numBlocks),dim3(numThreadsPerBlock), 0, 0, _devData, target._devData, width, height);
}
cuvSafeCall(hipDeviceSynchronize());
} else { // row sum
target.resize(_isTrans ? 1 : _numRows, _isTrans ? _numCols : 1);
if (width > 1) {
NVMatrix *prevSum = this;
while (prevSum->getLeadingDim() > 1) {
int numBlocksX, numBlocksY, numThreadsX, numThreadsY;
bool doLinearAgg = height >= 16384;
// doQuickAgg = !doQuickAgg;
if(doLinearAgg) { // call the special short aggregation functions
numBlocksX = 1;
numBlocksY = DIVUP(height, AGG_SHORT_ROWS_THREADS_Y*AGG_SHORT_ROWS_LOOPS_Y);
numThreadsX = AGG_SHORT_ROWS_THREADS_X;
numThreadsY = AGG_SHORT_ROWS_THREADS_Y;
while(numBlocksY > NUM_BLOCKS_MAX) {
numBlocksY = DIVUP(numBlocksY,2);
numBlocksX *= 2;
}
} else {
numThreadsX = width <= 64 ? 32 : (width <= 128 ? 64 : (width <= 256 ? 128 : (width <= 512 ? 256 : 512)));
numThreadsY = 1;
numBlocksX = DIVUP(width, 2*numThreadsX);
numBlocksY = ::min(height, NUM_BLOCKS_MAX);
}
dim3 grid(numBlocksX, numBlocksY), threads(numThreadsX, numThreadsY);
assert(numBlocksX <= NUM_BLOCKS_MAX);
assert(numBlocksY <= NUM_BLOCKS_MAX);
// printf("%d %d %d %d %d \n", numThreadsX, numThreadsY, numBlocksX, numBlocksY, numBlocksZ);
NVMatrix *nvSumAccum = target.getFollowingDim() == height && target.getLeadingDim() == numBlocksX ? &target : new NVMatrix(height, numBlocksX, false);
// printf("target size: %dx%d\n", target.getNumRows(), target.getNumCols());
// printf("liear agg: %d, width: %d, height: %d\n", doLinearAgg, width, height);
// printf("accum is target: %d\n", nvSumAccum == &target);
if(agg == NVMatrix::MAX) {
if(doLinearAgg) {
if(width <= 16) {
if(width <= 4) {
hipLaunchKernelGGL(( kAggShortRows<AGG_MAX, 1, 4>), dim3(grid), dim3(threads), 0, 0, prevSum->_devData, nvSumAccum->_devData,width, height);
} else if(width <= 8) {
hipLaunchKernelGGL(( kAggShortRows<AGG_MAX, 1, 8>), dim3(grid), dim3(threads), 0, 0, prevSum->_devData, nvSumAccum->_devData,width, height);
} else if(width <= 12) {
hipLaunchKernelGGL(( kAggShortRows<AGG_MAX, 1, 12>), dim3(grid), dim3(threads), 0, 0, prevSum->_devData, nvSumAccum->_devData,width, height);
} else {
hipLaunchKernelGGL(( kAggShortRows<AGG_MAX, 1, 16>), dim3(grid), dim3(threads), 0, 0, prevSum->_devData, nvSumAccum->_devData,width, height);
}
} else if(width <= 32) {
hipLaunchKernelGGL(( kAggShortRows<AGG_MAX, 2, AGG_SHORT_ROWS_THREADS_X>), dim3(grid), dim3(threads), 0, 0, prevSum->_devData, nvSumAccum->_devData,width, height);
} else if(width <= 48){
hipLaunchKernelGGL(( kAggShortRows<AGG_MAX, 3, AGG_SHORT_ROWS_THREADS_X>), dim3(grid), dim3(threads), 0, 0, prevSum->_devData, nvSumAccum->_devData,width, height);
} else if(width <= 64){
hipLaunchKernelGGL(( kAggShortRows<AGG_MAX, 4, AGG_SHORT_ROWS_THREADS_X>), dim3(grid), dim3(threads), 0, 0, prevSum->_devData, nvSumAccum->_devData,width, height);
} else {
hipLaunchKernelGGL(( kAggShortRows2<AGG_MAX>), dim3(grid), dim3(threads), 0, 0, prevSum->_devData, nvSumAccum->_devData,width, height);
}
} else if(width <= 64) {
hipLaunchKernelGGL(( kMaxRows<32>), dim3(grid), dim3(threads), 0, 0, prevSum->_devData, nvSumAccum->_devData,
width, height, nvSumAccum->getLeadingDim());
} else if(width <= 128) {
hipLaunchKernelGGL(( kMaxRows<64>), dim3(grid), dim3(threads), 0, 0, prevSum->_devData, nvSumAccum->_devData,
width, height, nvSumAccum->getLeadingDim());
} else if(width <= 256) {
hipLaunchKernelGGL(( kMaxRows<128>), dim3(grid), dim3(threads), 0, 0, prevSum->_devData, nvSumAccum->_devData,
width, height, nvSumAccum->getLeadingDim());
} else if(width <= 512) {
hipLaunchKernelGGL(( kMaxRows<256>), dim3(grid), dim3(threads), 0, 0, prevSum->_devData, nvSumAccum->_devData,
width, height, nvSumAccum->getLeadingDim());
} else {
hipLaunchKernelGGL(( kMaxRows<512>), dim3(grid), dim3(threads), 0, 0, prevSum->_devData, nvSumAccum->_devData,
width, height, nvSumAccum->getLeadingDim());
}
} else if(agg == NVMatrix::SUM) {
if(doLinearAgg) {
if(width <= 16) {
if(width <= 4) {
hipLaunchKernelGGL(( kAggShortRows<AGG_SUM, 1, 4>), dim3(grid), dim3(threads), 0, 0, prevSum->_devData, nvSumAccum->_devData,width, height);
} else if(width <= 8) {
hipLaunchKernelGGL(( kAggShortRows<AGG_SUM, 1, 8>), dim3(grid), dim3(threads), 0, 0, prevSum->_devData, nvSumAccum->_devData,width, height);
} else if(width <= 12) {
hipLaunchKernelGGL(( kAggShortRows<AGG_SUM, 1, 12>), dim3(grid), dim3(threads), 0, 0, prevSum->_devData, nvSumAccum->_devData,width, height);
} else {
hipLaunchKernelGGL(( kAggShortRows<AGG_SUM, 1, 16>), dim3(grid), dim3(threads), 0, 0, prevSum->_devData, nvSumAccum->_devData,width, height);
}
} else if(width <= 32) {
hipLaunchKernelGGL(( kAggShortRows<AGG_SUM, 2, AGG_SHORT_ROWS_THREADS_X>), dim3(grid), dim3(threads), 0, 0, prevSum->_devData, nvSumAccum->_devData,width, height);
} else if(width <= 48) {
hipLaunchKernelGGL(( kAggShortRows<AGG_SUM, 3, AGG_SHORT_ROWS_THREADS_X>), dim3(grid), dim3(threads), 0, 0, prevSum->_devData, nvSumAccum->_devData,width, height);
} else if(width <= 64){
hipLaunchKernelGGL(( kAggShortRows<AGG_SUM, 4, AGG_SHORT_ROWS_THREADS_X>), dim3(grid), dim3(threads), 0, 0, prevSum->_devData, nvSumAccum->_devData,width, height);
} else {
hipLaunchKernelGGL(( kAggShortRows2<AGG_SUM>), dim3(grid), dim3(threads), 0, 0, prevSum->_devData, nvSumAccum->_devData,width, height);
}
} else if (width <= 64) {
hipLaunchKernelGGL(( kSumRows<32>), dim3(grid), dim3(threads), 0, 0, prevSum->_devData, nvSumAccum->_devData,
width, height, nvSumAccum->getLeadingDim());
} else if (width <= 128) {
hipLaunchKernelGGL(( kSumRows<64>), dim3(grid), dim3(threads), 0, 0, prevSum->_devData, nvSumAccum->_devData,
width, height, nvSumAccum->getLeadingDim());
} else if (width <= 256) {
hipLaunchKernelGGL(( kSumRows<128>), dim3(grid), dim3(threads), 0, 0, prevSum->_devData, nvSumAccum->_devData,
width, height, nvSumAccum->getLeadingDim());
} else if (width <= 512) {
hipLaunchKernelGGL(( kSumRows<256>), dim3(grid), dim3(threads), 0, 0, prevSum->_devData, nvSumAccum->_devData,
width, height, nvSumAccum->getLeadingDim());
} else {
hipLaunchKernelGGL(( kSumRows<512>), dim3(grid), dim3(threads), 0, 0, prevSum->_devData, nvSumAccum->_devData,
width, height, nvSumAccum->getLeadingDim());
}
}
cuvSafeCall(hipDeviceSynchronize());
hipDeviceSynchronize();
width = numBlocksX;
if (prevSum != this) {
delete prevSum;
}
prevSum = nvSumAccum;
}
// if (_isTrans) {
// prevSum->_numCols = prevSum->_numRows;
// prevSum->_numRows = 1;
// }
// target.copyFromDevice(*prevSum);
// delete prevSum;
} else {
target.resize(*this);
target.copyFromDevice(*this);
}
}
}
void NVMatrix::max(int axis, NVMatrix& target) {
if(axis == 0 && !_isTrans || axis == 1 && _isTrans) {
aggregate(axis, target, NUM_SUM_COLS_THREADS_PER_BLOCK, NVMatrix::MAX);
} else {
aggregate(axis, target, NUM_SUM_ROWS_THREADS_PER_BLOCK, NVMatrix::MAX);
}
}
NVMatrix& NVMatrix::max(int axis) {
NVMatrix *sumVec = new NVMatrix();
max(axis, *sumVec);
return *sumVec;
}
void NVMatrix::sum(int axis, NVMatrix& target) {
assert(axis == 0 || axis == 1);
if(axis == 0 && !_isTrans || axis == 1 && _isTrans) {
aggregate(axis, target, NUM_SUM_COLS_THREADS_PER_BLOCK, NVMatrix::SUM);
} else {
aggregate(axis, target, NUM_SUM_ROWS_THREADS_PER_BLOCK, NVMatrix::SUM);
}
}
NVMatrix& NVMatrix::sum(int axis) {
NVMatrix *sumVec = new NVMatrix();
sum(axis, *sumVec);
return *sumVec;
}
float NVMatrix::sum() {
WARN("Summing over all matrix elements first performs a sum over all columns. If your matrix has few columns, this is inefficient.");
NVMatrix devSum = NVMatrix();
sum(_isTrans && _numRows > _numCols || !_isTrans && _numRows < _numCols ? 1 : 0, devSum);
Matrix hostSum = Matrix(devSum._numRows, devSum._numCols);
hipDeviceSynchronize();
devSum.copyToHost(hostSum);
return hostSum.sum();
}
void NVMatrix::print(int startRow, int rows, int startCol, int cols) const {
hipDeviceSynchronize();
Matrix* hm = new Matrix(_numRows, _numCols);
copyToHost(*hm);
hm->print(startRow, rows, startCol, cols);
delete hm;
}
void NVMatrix::print(int rows, int cols) const {
print(0, rows, 0, cols);
}
void NVMatrix::printShape(const char* name) const {
printf("%s: %dx%d\n", name, _numRows, _numCols);
}
//========================================================
// NVMatrix but initialized with zeros instead of whatever
// happens to be in memory.
//========================================================
NVZeroMatrix::NVZeroMatrix(int numRows, int numCols, bool isTrans) : NVMatrix(numRows, numCols, isTrans) {
apply(NVMatrix::ZERO);
}
NVZeroMatrix::NVZeroMatrix(Matrix& like) : NVMatrix(like.getNumRows(), like.getNumCols()) {
apply(NVMatrix::ZERO);
}
NVZeroMatrix::NVZeroMatrix(NVMatrix& like) : NVMatrix(like.getNumRows(), like.getNumCols()) {
apply(NVMatrix::ZERO);
}
|
d461b923e94db4972b7d638514bc1796e07578d8.cu
|
//*LB*
// Copyright (c) 2009, Alexander Krizhevsky
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of the University of Toronto
// nor the names of its contributors may be used to endorse or promote
// products derived from this software without specific prior written
// permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//*LE*
/*
* nvmatrix.cu
*
* Created on: 20-Jan-2009
* Author: Alex Krizhevsky ([email protected])
*/
#include <assert.h>
#include <cublas.h>
/*#include <cutil_inline.h>*/
#include "cuv/tools/cuv_general.hpp"
#include <stdlib.h>
#include <stdio.h>
#include <fstream>
#include <iostream>
#include <algorithm>
#include "nvmatrix.cuh"
using namespace std;
cudaDeviceProp NVMatrix::deviceProps;
unsigned int NVMatrix::hostRndMults[NUM_RND_STREAMS];
bool NVMatrix::rndInitialized = false;
/*
* Device random number generator pointers.
*/
unsigned int *NVMatrix::devRndMults;
unsigned long long *NVMatrix::devRndWords;
void NVMatrix::initDeviceProps() {
int deviceCount;
cuvSafeCall(cudaGetDeviceCount(&deviceCount));
if (deviceCount == 0) {
printf("There is no device supporting CUDA\n");
exit(EXIT_FAILURE);
}
cuvSafeCall(cudaGetDeviceProperties(&deviceProps, 0));
}
void NVMatrix::_init(unsigned int numRows, unsigned int numCols) {
_numRows = numRows;
_numCols = numCols;
_numElements = numRows * numCols;
_ownsData = true;
/*
* By default, new matrices are in column-major order because that's how CUBLAS likes it.
*/
_isTrans = true;
_devData = NULL;
if (_numElements > 0) {
cublasAlloc(_numElements, sizeof(float), (void**) &_devData);
checkCublasError("!!!! device memory allocation error\n");
}
}
NVMatrix::NVMatrix() {
_init(0, 0);
}
NVMatrix::NVMatrix(bool isTrans) {
_init(0, 0);
setTrans(isTrans);
}
NVMatrix::NVMatrix(int numRows, int numCols, bool isTrans) {
_init(numRows, numCols);
setTrans(isTrans);
}
NVMatrix::NVMatrix(const Matrix& like, bool copy) {
_init(like.getNumRows(), like.getNumCols());
_isTrans = like.isTrans();
if (copy) {
copyFromHost(like);
}
}
NVMatrix::NVMatrix(const NVMatrix& like, bool copy) {
_init(like.getNumRows(), like.getNumCols());
_isTrans = like.isTrans();
if(copy) {
copyFromDevice(like);
}
}
/*
* Initializes NVMatrix with same dimensions as given matrix but
* does not copy any data.
*/
NVMatrix::NVMatrix(const NVMatrix& like) {
_init(like.getNumRows(), like.getNumCols());
_isTrans = like.isTrans();
}
/*
* Initializes NVMatrix with same dimensions as given matrix but
* does not copy any data.
*/
NVMatrix::NVMatrix(const Matrix& like) {
_init(like.getNumRows(), like.getNumCols());
_isTrans = like.isTrans();
}
NVMatrix::NVMatrix(float* devData, int numRows, int numCols, bool isTrans) {
_numRows = numRows;
_numCols = numCols;
_numElements = numRows * numCols;
_ownsData = false;
_devData = devData;
_isTrans = isTrans;
}
NVMatrix::~NVMatrix() {
if(_ownsData && _numElements > 0) {
cublasStatus status = cublasFree(_devData);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! memory free error\n");
exit(EXIT_FAILURE);
}
}
}
void NVMatrix::copyFromHost(const Matrix& hostMatrix, bool resizeDeviceMatrix) {
if(resizeDeviceMatrix) {
resize(hostMatrix);
}
copyFromHost(hostMatrix);
}
void NVMatrix::copyFromHost(const Matrix& hostMatrix) {
assert(isSameDims(hostMatrix));
cublasStatus status = cublasSetVector(_numElements, sizeof(float), hostMatrix.getData(), 1, _devData, 1);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (write)\n");
exit( EXIT_FAILURE);
}
_isTrans = hostMatrix.isTrans();
}
void NVMatrix::copyFromDevice(const NVMatrix& devMatrix) {
assert(isSameDims(devMatrix));
cublasScopy(_numElements,devMatrix._devData, 1, _devData,1);
checkCublasError("cublasScopy failed");
_isTrans = devMatrix.isTrans();
}
void NVMatrix::copyFromDevice(const NVMatrix& devMatrix, bool resizeTarget) {
if (resizeTarget) {
resize(devMatrix);
}
copyFromDevice(devMatrix);
}
void NVMatrix::copyToHost(Matrix& hostMatrix) const {
assert(isSameDims(hostMatrix));
cublasStatus status = cublasGetVector(_numElements, sizeof(float), _devData, 1, hostMatrix.getData(), 1);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (read)\n");
exit( EXIT_FAILURE);
}
hostMatrix.setTrans(_isTrans);
}
void NVMatrix::rightMult(const NVMatrix &b, float scaleAB, NVMatrix &target) const {
// assert(&target != &b);
assert(_numCols == b.getNumRows());
if(&target != this) {
target.resize(_numRows, b.getNumCols());
}
assert(target.getNumRows() == _numRows);
assert(target.getNumCols() == b.getNumCols());
if(_numRows % 64 != 0 || _numCols % 64 != 0 || b.getNumCols() % 64 != 0) {
WARN("Matrix dimensions not divisible by 64 -- cublasSgemm performance may suffer.");
}
cublasSgemm(getTransChar(), b.getTransChar(), _numRows, b.getNumCols(), _numCols,
scaleAB, _devData, getLeadingDim(), b.getDevData(), b.getLeadingDim(),
0, target.getDevData(), getNumRows());
checkCublasError("cublasSgemm failed");
target._isTrans = true; //because target is now in col-major order
}
void NVMatrix::rightMult(const NVMatrix &b, float scaleAB) {
rightMult(b, scaleAB, *this);
}
void NVMatrix::rightMult(const NVMatrix &b, NVMatrix& target) const {
rightMult(b, 1, target);
}
/*
* This will only work if this matrix is in column-major order! In other words,
* if isTrans() returns true.
*/
void NVMatrix::addProduct(const NVMatrix& a, const NVMatrix &b, float scaleThis, float scaleAB) {
assert(a.getNumCols() == b.getNumRows());
assert(this->getNumRows() == a.getNumRows());
assert(this->getNumCols() == b.getNumCols());
assert(_isTrans);
if(a.getNumRows() % 64 != 0 || a.getNumCols() % 64 != 0 || b.getNumCols() % 64 != 0) {
WARN("Matrix dimensions not divisible by 64 -- cublasSgemm performance may suffer.");
}
cublasSgemm(a.getTransChar(), b.getTransChar(), a.getNumRows(), b.getNumCols(), a.getNumCols(),
scaleAB, a.getDevData(), a.getLeadingDim(), b.getDevData(), b.getLeadingDim(),
scaleThis, _devData, getLeadingDim());
checkCublasError("cublasSgemm failed");
}
void NVMatrix::addProduct(const NVMatrix& a, const NVMatrix &b) {
addProduct(a, b, 1, 1);
}
void NVMatrix::apply(NVMatrix::FUNCTIONS f, NVMatrix& target, int numBlocks, int numThreadsPerBlock) {
target.resize(*this);
target._isTrans = _isTrans;
dim3 grid( numBlocks, 1, 1);
dim3 threads( numThreadsPerBlock, 1, 1);
if(f == NVMatrix::EXP) {
kExp<<<grid, threads>>>(_devData, target._devData, _numElements);
} else if (f == NVMatrix::LOGISTIC1) {
kLogistic1<<<grid, threads>>>(_devData, target._devData, _numElements);
} else if (f == NVMatrix::LOGISTIC2) {
kLogistic2<<<grid, threads>>>(_devData, target._devData, _numElements);
} else if (f == NVMatrix::SQUARE) {
kSquare<<<grid, threads>>>(_devData, target._devData, _numElements);
} else if (f == NVMatrix::SQRT) {
kSqrt<<<grid, threads>>>(_devData, target._devData, _numElements);
} else if (f == NVMatrix::ZERO) {
kZero<<<grid, threads>>>(_devData, target._devData, _numElements);
} else if(f == NVMatrix::RECIPROCAL) {
kReciprocal<<<grid, threads>>>(_devData, target._devData, _numElements);
} else if(f == NVMatrix::LOG) {
kLog<<<grid, threads>>>(_devData, target._devData, _numElements);
} else if(f == NVMatrix::SIGN) {
kSign<<<grid, threads>>>(_devData, target._devData, _numElements);
}
cuvSafeCall(cudaThreadSynchronize());
}
void NVMatrix::apply(NVMatrix::FUNCTIONS f, int numBlocks, int numThreadsPerBlock) {
apply(f, *this, numBlocks, numThreadsPerBlock);
}
/*
* The random number generator uses the multiply with carry algorithm. I got the
* multipliers from a site I can't find anymore.
*/
void NVMatrix::initRandom(unsigned int seed) {
assert(!rndInitialized);
ifstream inFile;
inFile.open(RND_MULTIPLIERS_FILE);
if(!inFile) {
std::cerr << "Unable to open file " << RND_MULTIPLIERS_FILE << std::endl;
exit(EXIT_FAILURE);
}
unsigned int mult;
for (int numRead = 0; numRead < NUM_RND_STREAMS; numRead++) {
if (!(inFile >> mult)) {
std::cerr << "Not enough numbers in file " << RND_MULTIPLIERS_FILE << std::endl;
exit(EXIT_FAILURE);
}
hostRndMults[numRead] = mult;
}
inFile.close();
cuvSafeCall(cudaMalloc((void **)&devRndMults, NUM_RND_STREAMS * sizeof(unsigned int)));
cuvSafeCall(cudaMalloc((void **)&devRndWords, NUM_RND_STREAMS * sizeof(unsigned long long)));
cuvSafeCall(cudaMemcpy(devRndMults, hostRndMults, NUM_RND_STREAMS * sizeof(unsigned int), cudaMemcpyHostToDevice));
kSeedRandom<<<NUM_RND_BLOCKS, NUM_RND_THREADS_PER_BLOCK>>>(devRndMults, devRndWords, seed);
cuvSafeCall(cudaThreadSynchronize());
rndInitialized = true;
}
void NVMatrix::destroyRandom() {
assert(rndInitialized);
cuvSafeCall(cudaFree(devRndMults));
cuvSafeCall(cudaFree(devRndWords));
rndInitialized = false;
}
void NVMatrix::binarizeProbs() {
assert(rndInitialized);
kBinarizeProbs<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(devRndMults, devRndWords, _devData,_numElements);
cuvSafeCall(cudaThreadSynchronize());
}
void NVMatrix::randomizeUniform() {
assert(rndInitialized);
kRandomUniform<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(devRndMults, devRndWords, _devData,_numElements);
cuvSafeCall(cudaThreadSynchronize());
}
void NVMatrix::randomizeGaussian() {
randomizeGaussian(1);
}
void NVMatrix::randomizeGaussian(float stdev) {
assert(rndInitialized);
kRandomGaussian<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(devRndMults, devRndWords, _devData, stdev, _numElements);
cuvSafeCall(cudaThreadSynchronize());
}
void NVMatrix::randomizeGaussian(NVMatrix& stdevs) {
assert(rndInitialized);
assert(stdevs.getNumElements() == _numElements);
assert(stdevs.isTrans() == isTrans());
kRandomGaussian<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(devRndMults, devRndWords, _devData, stdevs.getDevData(), _numElements);
cuvSafeCall(cudaThreadSynchronize());
}
void NVMatrix::addGaussianNoise() {
addGaussianNoise(1);
}
void NVMatrix::addGaussianNoise(float stdev) {
assert(rndInitialized);
// assert(_numElements % 2 == 0);
kAddGaussianNoise<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(devRndMults, devRndWords, _devData,stdev,_numElements);
cuvSafeCall(cudaThreadSynchronize());
}
void NVMatrix::addGaussianNoise(NVMatrix& stdevs) {
assert(rndInitialized);
// assert(_numElements % 2 == 0);
assert(stdevs.getNumElements() == _numElements);
assert(stdevs.isTrans() == isTrans());
kAddGaussianNoise<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(devRndMults, devRndWords, _devData,stdevs.getDevData(),_numElements);
cuvSafeCall(cudaThreadSynchronize());
}
void NVMatrix::biggerThanScalar(float scalar) {
biggerThanScalar(scalar, *this);
}
void NVMatrix::biggerThanScalar(float scalar, NVMatrix& target) {
target.resize(*this);
kBiggerThanScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(_devData,scalar,target._devData,_numElements);
cuvSafeCall(cudaThreadSynchronize());
}
void NVMatrix::inRangeInc(float lower, float upper) {
inRangeInc(lower, upper, *this);
}
void NVMatrix::inRangeInc(float lower, float upper, NVMatrix& target) {
target.resize(*this);
kInRangeInc<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(_devData,lower, upper,target._devData,_numElements);
cuvSafeCall(cudaThreadSynchronize());
}
void NVMatrix::inRangeExc(float lower, float upper) {
inRangeExc(lower, upper, *this);
}
void NVMatrix::inRangeExc(float lower, float upper, NVMatrix& target) {
target.resize(*this);
kInRangeExc<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(_devData,lower, upper,target._devData,_numElements);
cuvSafeCall(cudaThreadSynchronize());
}
void NVMatrix::smallerThanScalar(float scalar, NVMatrix& target) {
target.resize(*this);
kSmallerThanScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(_devData,scalar,target._devData,_numElements);
cuvSafeCall(cudaThreadSynchronize());
}
void NVMatrix::biggerThan(NVMatrix& m, NVMatrix& target, int numBlocks, int numThreadsPerBlock) {
assert(isSameDims(m));
target.resize(*this);
for (unsigned int elementsDone = 0; elementsDone < _numElements; elementsDone += numBlocks*numThreadsPerBlock) {
kBiggerThan<<<numBlocks, numThreadsPerBlock>>>(_devData + elementsDone,
m._devData + elementsDone, target._devData + elementsDone,
_numElements - elementsDone);
}
cuvSafeCall(cudaThreadSynchronize());
}
void NVMatrix::biggerThan(NVMatrix& m, int numBlocks, int numThreadsPerBlock) {
biggerThan(m, *this, numBlocks, numThreadsPerBlock);
}
void NVMatrix::biggerThanVector(NVMatrix& vec, NVMatrix& target) {
if(&target == &vec && &target != this) { // because we manipulate target to be like this
vec.biggerThanVector(*this);
return;
}
assert(vec.getNumRows() == 1 || vec.getNumCols() == 1);
assert(vec.getNumRows() == _numRows || vec.getNumCols() == _numCols);
target.resize(*this);
const unsigned int width = _isTrans ? _numRows : _numCols;
const unsigned int height = _isTrans ? _numCols : _numRows;
if(vec.getNumRows() == _numRows && !isTrans() || vec.getNumCols() == _numCols && isTrans()) {
kBiggerThanColVector<<<NUM_ADD_VECTOR_BLOCKS,NUM_ADD_VECTOR_THREADS_PER_BLOCK>>>(_devData, vec._devData, target._devData, width, height);
} else {
kBiggerThanRowVector<<<NUM_ADD_VECTOR_BLOCKS,NUM_ADD_VECTOR_THREADS_PER_BLOCK>>>(_devData, vec._devData, target._devData, width, height);
}
cuvSafeCall(cudaThreadSynchronize());
}
void NVMatrix::biggerThanVector(NVMatrix& vec) {
biggerThanVector(vec, *this);
}
void NVMatrix::_checkBounds(int startRow, int endRow, int startCol, int endCol) const {
assert(startRow >= 0 && startRow <= _numRows);
assert(endRow >= 0 && endRow <= _numRows);
assert(startCol >= 0 && startCol <= _numCols);
assert(endCol >= 0 && endCol <= _numCols);
}
NVMatrix& NVMatrix::slice(int startRow, int endRow, int startCol, int endCol) const {
endRow = endRow < 0 ? this->_numRows : endRow;
endCol = endCol < 0 ? this->_numCols : endCol;
_checkBounds(startRow, endRow, startCol, endCol);
if (!isTrans() && ((startCol == 0 && endCol == this->_numCols) || startRow == endRow - 1)) {
return *new NVMatrix(this->_devData + startRow * this->_numCols + startCol, endRow - startRow, endCol - startCol, false);
} else if(isTrans() && ((startRow == 0 & endRow == this->_numRows) || startCol == endCol - 1)) {
return *new NVMatrix(this->_devData + startCol * this->_numRows + startRow, endRow - startRow, endCol - startCol, true);
}
WARN("Slice: result will not be a view.");
NVMatrix& newSlice = *new NVMatrix(endRow - startRow, endCol - startCol);
this->copy(newSlice, startRow, endRow, startCol, endCol, 0, 0);
return newSlice;
}
/* this will NEVER return a view, unlike Matrix_slice */
void NVMatrix::slice(int startRow, int endRow, int startCol, int endCol, NVMatrix& target) const {
endRow = endRow < 0 ? this->_numRows : endRow;
endCol = endCol < 0 ? this->_numCols : endCol;
_checkBounds(startRow, endRow, startCol, endCol);
target.resize(endRow - startRow, endCol - startCol);
target._isTrans = _isTrans;
this->copy(target, startRow, endRow, startCol, endCol, 0, 0);
}
NVMatrix& NVMatrix::sliceRows(int startRow, int endRow) const {
return slice(startRow, endRow, 0, -1);
}
void NVMatrix::sliceRows(int startRow, int endRow, NVMatrix& target) const {
slice(startRow, endRow, 0, -1, target);
}
NVMatrix& NVMatrix::sliceCols(int startCol, int endCol) const {
return slice(0, -1, startCol, endCol);
}
void NVMatrix::sliceCols(int startCol, int endCol, NVMatrix& target) const {
slice(0, -1, startCol, endCol, target);
}
/*
* Guaranteed to not change the data if the number of elements doesn't change.
* So you can use this to "reshape" a matrix.
*/
bool NVMatrix::resize(int numRows, int numCols) {
bool reallocated = false;
if (numRows != _numRows || numCols != _numCols) {
assert(_ownsData);
if (_numElements != numRows * numCols) {
cublasStatus status = cublasFree(_devData);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! memory free error\n");
exit(EXIT_FAILURE);
}
status = cublasAlloc(numCols * numRows, sizeof(float), (void**) &_devData);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device memory allocation error\n");
exit(EXIT_FAILURE);
}
reallocated = true;
}
_numRows = numRows;
_numCols = numCols;
_numElements = numRows * numCols;
}
return reallocated;
}
bool NVMatrix::resize(const NVMatrix& like) {
bool r = resize(like.getNumRows(), like.getNumCols());
_isTrans = like._isTrans;
return r;
}
bool NVMatrix::resize(const Matrix& like) {
bool r = resize(like.getNumRows(), like.getNumCols());
_isTrans = like.isTrans();
return r;
}
void NVMatrix::reshape(int numRows, int numCols) {
assert(_numElements == numRows*numCols);
_numRows = numRows;
_numCols = numCols;
}
NVMatrix& NVMatrix::reshaped(int numRows, int numCols) {
assert(_numElements == numRows*numCols);
return *new NVMatrix(_devData, numRows, numCols, _isTrans);
}
void NVMatrix::copy(NVMatrix &dest, int srcStartRow, int srcEndRow,
int srcStartCol, int srcEndCol,
int destStartRow, int destStartCol) const {
srcEndRow = srcEndRow < 0 ? this->_numRows : srcEndRow;
srcEndCol = srcEndCol < 0 ? this->_numCols : srcEndCol;
assert(destStartRow >= 0 && destStartCol >= 0); //some range-checking
assert(srcEndRow <= _numRows && srcEndCol <= _numCols);
assert(destStartRow + srcEndRow - srcStartRow <= dest.getNumRows());
assert(destStartCol + srcEndCol - srcStartCol <= dest.getNumCols());
const int srcJumpWidth = !_isTrans ? getNumCols() : getNumRows();
const int destJumpWidth = !dest._isTrans ? dest.getNumCols() : dest.getNumRows();
float* srcStartPtr = getCellPtr(srcStartRow, srcStartCol);
float* destStartPtr = dest.getCellPtr(destStartRow, destStartCol);
if (isTrans() != dest.isTrans()) {
// copyWidth here refers to dest
const int copyWidth = !dest._isTrans ? srcEndCol - srcStartCol : srcEndRow - srcStartRow;
const int copyHeight = !dest._isTrans ? srcEndRow - srcStartRow : srcEndCol - srcStartCol;
//call copy kernel for transposed matrices
// const int width = dest.isTrans() ? _numRows : _numCols;
// const int height = dest.isTrans() ? _numCols : _numRows;
const bool checkBounds = !(copyWidth % ADD_BLOCK_SIZE == 0 && copyHeight % ADD_BLOCK_SIZE == 0);
const int numBlocksX = DIVUP(copyWidth, ADD_BLOCK_SIZE);
assert(numBlocksX < NUM_BLOCKS_MAX);
const int numBlocksY = std::max(1, std::min(DIVUP(copyHeight, ADD_BLOCK_SIZE), NUM_BLOCKS_MAX));
dim3 gridSize(numBlocksX, numBlocksY, 1);
dim3 blockSize(ADD_BLOCK_SIZE, ADD_BLOCK_SIZE, 1);
int numRowsCopied = 0;
// printf("calling kCopyTransFast\n");
while (numRowsCopied < copyHeight) {
if (checkBounds) {
kCopyTransFast<true><<<gridSize, blockSize>>>(&destStartPtr[numRowsCopied * destJumpWidth],
&srcStartPtr[numRowsCopied], copyWidth, copyHeight - numRowsCopied, destJumpWidth, srcJumpWidth);
} else {
kCopyTransFast<false><<<gridSize, blockSize>>>(&destStartPtr[numRowsCopied * destJumpWidth],
&srcStartPtr[numRowsCopied], copyWidth, copyHeight - numRowsCopied, destJumpWidth, srcJumpWidth);
}
cuvSafeCall(cudaThreadSynchronize());
numRowsCopied += gridSize.y * ADD_BLOCK_SIZE;
gridSize.y = std::max(1, min(DIVUP(copyHeight-numRowsCopied, ADD_BLOCK_SIZE), NUM_BLOCKS_MAX));
}
} else {
// copyWidth here refers to src
const int copyWidth = !_isTrans ? srcEndCol - srcStartCol : srcEndRow - srcStartRow;
const int copyHeight = !_isTrans ? srcEndRow - srcStartRow : srcEndCol - srcStartCol;
const int numToCopy = copyWidth * copyHeight;
kCopy<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(srcStartPtr, destStartPtr, copyWidth, srcJumpWidth, destJumpWidth, numToCopy);
}
}
NVMatrix& NVMatrix::getTranspose() {
NVMatrix* trans = new NVMatrix(_devData, _numCols, _numRows, !_isTrans);
return *trans;
}
/*
* Flips the ordering of the matrix from row-major to column-major and vice versa.
* This creates temporary storage -- not a cheap operation.
*
* This is not equivalent to a "hard transpose". The resultant matrix still has
* the same dimensions, its layout in memory just changes.
*/
void NVMatrix::flipTrans() {
NVMatrix* meTrans = new NVMatrix(*this);
// assert(_numCols % ADD_BLOCK_SIZE == 0 && _numRows % ADD_BLOCK_SIZE == 0);
const int width = isTrans() ? _numRows : _numCols;
const int height = isTrans() ? _numCols : _numRows;
const int numBlocksX = DIVUP(width, ADD_BLOCK_SIZE);
const int numBlocksY = DIVUP(height, ADD_BLOCK_SIZE);
assert(numBlocksX < NUM_BLOCKS_MAX && numBlocksY < NUM_BLOCKS_MAX);
dim3 gridSize(numBlocksX, numBlocksY, 1);
dim3 blockSize(ADD_BLOCK_SIZE, ADD_BLOCK_SIZE, 1);
kTranspose<<<gridSize, blockSize>>>(_devData, meTrans->_devData, width, height);
cuvSafeCall(cudaThreadSynchronize());
copyFromDevice(*meTrans);
this->_isTrans = !this->_isTrans;
delete meTrans;
}
void NVMatrix::squaredDiff(NVMatrix& b) {
squaredDiff(b, *this);
}
void NVMatrix::squaredDiff(NVMatrix& b, NVMatrix& target) {
assert(this->isSameDims(b));
assert(&target != &b);
target.resize(*this);
const int width = isTrans() ? _numRows : _numCols;
const int height = isTrans() ? _numCols : _numRows;
if (_isTrans != b._isTrans) {
const bool checkBounds = !(width % ADD_BLOCK_SIZE == 0 && height % ADD_BLOCK_SIZE == 0);
const int numBlocksX = DIVUP(width, ADD_BLOCK_SIZE);
assert(numBlocksX < NUM_BLOCKS_MAX);
const int numBlocksY = std::max(1, std::min(DIVUP(height, ADD_BLOCK_SIZE), NUM_BLOCKS_MAX));
dim3 gridSize(numBlocksX, numBlocksY, 1);
dim3 blockSize(ADD_BLOCK_SIZE, ADD_BLOCK_SIZE, 1);
int numRowsAdded = 0;
float* aData = _devData, *bData = b._devData, *destData = target._devData;
// printf("calling trans sq diff\n");
while (numRowsAdded < height) {
if(checkBounds) {
kSquaredDiffTransFast<true><<<gridSize, blockSize>>>(aData, bData, destData, width, height - numRowsAdded, height);
} else {
kSquaredDiffTransFast<false><<<gridSize, blockSize>>>(aData, bData, destData, width, height - numRowsAdded, height);
}
cuvSafeCall(cudaThreadSynchronize());
numRowsAdded += gridSize.y * ADD_BLOCK_SIZE;
gridSize.y = std::max(1, std::min(DIVUP(height-numRowsAdded, ADD_BLOCK_SIZE), NUM_BLOCKS_MAX));
aData += numRowsAdded * width;
bData += b._isTrans != _isTrans ? numRowsAdded : numRowsAdded * width;
destData += numRowsAdded * width;
}
} else {
// printf("calling plain sq diff\n");
kSquaredDiff<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(_devData, b._devData, target._devData,_numElements);
cuvSafeCall(cudaThreadSynchronize());
}
}
void NVMatrix::addSum(NVMatrix& b, NVMatrix& c, float scaleThis, float scaleB, float scaleC) {
assert(this->isSameDims(b));
assert(this->isSameDims(c));
const int width = isTrans() ? _numRows : _numCols;
const int height = isTrans() ? _numCols : _numRows;
if((_isTrans != b._isTrans || _isTrans != c._isTrans) && min(_numRows, _numCols) > 1) {
bool checkBounds = !(width % ADD_BLOCK_SIZE == 0 && height % ADD_BLOCK_SIZE == 0);
const int numBlocksX = DIVUP(width, ADD_BLOCK_SIZE);
assert(numBlocksX < NUM_BLOCKS_MAX);
const int numBlocksY = std::max(1, min(DIVUP(height, ADD_BLOCK_SIZE), NUM_BLOCKS_MAX));
dim3 gridSize(numBlocksX, numBlocksY, 1);
dim3 blockSize(ADD_BLOCK_SIZE, ADD_BLOCK_SIZE, 1);
int numRowsAdded = 0;
float* aData = _devData, *bData = b._devData, *cData = c._devData;
const bool transB = b._isTrans != _isTrans, transC = c._isTrans != _isTrans;
while (numRowsAdded < height) {
if(transB) {
if(transC) {
if(checkBounds) {
kAddTrans3Fast<true, true, true><<<gridSize, blockSize>>>(aData, bData, cData,width, height - numRowsAdded, height,
scaleThis, scaleB, scaleC);
} else {
kAddTrans3Fast<false, true, true><<<gridSize, blockSize>>>(aData, bData, cData,width, height - numRowsAdded, height,
scaleThis, scaleB, scaleC);
}
} else {
if(checkBounds) {
kAddTrans3Fast<true, true, false><<<gridSize, blockSize>>>(aData, bData, cData,width, height - numRowsAdded, height,
scaleThis, scaleB, scaleC);
} else {
kAddTrans3Fast<false, true, false><<<gridSize, blockSize>>>(aData, bData, cData,width, height - numRowsAdded, height,
scaleThis, scaleB, scaleC);
}
}
} else {
if(transC) {
if(checkBounds) {
kAddTrans3Fast<true, false, true><<<gridSize, blockSize>>>(aData, bData, cData,width, height - numRowsAdded, height,
scaleThis, scaleB, scaleC);
} else {
kAddTrans3Fast<false, false, true><<<gridSize, blockSize>>>(aData, bData, cData,width, height - numRowsAdded, height,
scaleThis, scaleB, scaleC);
}
} else {
if(checkBounds) {
kAddTrans3Fast<true, false, false><<<gridSize, blockSize>>>(aData, bData, cData,width, height - numRowsAdded, height,
scaleThis, scaleB, scaleC);
} else {
kAddTrans3Fast<false, false, false><<<gridSize, blockSize>>>(aData, bData, cData,width, height - numRowsAdded, height,
scaleThis, scaleB, scaleC);
}
}
}
cuvSafeCall(cudaThreadSynchronize());
numRowsAdded += gridSize.y * ADD_BLOCK_SIZE;
gridSize.y = std::max(1, min(DIVUP((height-numRowsAdded) , ADD_BLOCK_SIZE), NUM_BLOCKS_MAX));
aData += numRowsAdded * width;
bData += b._isTrans != _isTrans ? numRowsAdded : numRowsAdded * width;
cData += c._isTrans != _isTrans ? numRowsAdded : numRowsAdded * width;
}
} else {
kAdd3<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(_devData, b._devData, c._devData,
_numElements, scaleThis, scaleB, scaleC);
cuvSafeCall(cudaThreadSynchronize());
}
}
void NVMatrix::add(NVMatrix& b, float scaleA, float scaleB, NVMatrix& target) {
if(&target == &b && &target != this) { // because we manipulate target to be like a
b.add(*this, scaleB, scaleA);
return;
}
assert(this->isSameDims(b));
target.resize(*this);
if (isTrans() != b.isTrans() && min(_numRows, _numCols) > 1) {
//call addition kernel for transposed matrices
const int width = isTrans() ? _numRows : _numCols;
const int height = isTrans() ? _numCols : _numRows;
const bool checkBounds = !(width % ADD_BLOCK_SIZE == 0 && height % ADD_BLOCK_SIZE == 0);
const int numBlocksX = DIVUP(width, ADD_BLOCK_SIZE);
assert(numBlocksX < NUM_BLOCKS_MAX);
const int numBlocksY = std::max(1, std::min(DIVUP(height, ADD_BLOCK_SIZE), NUM_BLOCKS_MAX));
dim3 gridSize(numBlocksX, numBlocksY, 1);
dim3 blockSize(ADD_BLOCK_SIZE, ADD_BLOCK_SIZE, 1);
int numRowsAdded = 0;
while (numRowsAdded < height) {
if (checkBounds) {
kAddTransFast<true><<<gridSize, blockSize>>>(&_devData[numRowsAdded * width],
&b._devData[numRowsAdded], &target._devData[numRowsAdded * width],
width, height - numRowsAdded, height, scaleA, scaleB);
} else {
kAddTransFast<false><<<gridSize, blockSize>>>(&_devData[numRowsAdded * width],
&b._devData[numRowsAdded], &target._devData[numRowsAdded * width],
width, height - numRowsAdded, height, scaleA, scaleB);
}
cuvSafeCall(cudaThreadSynchronize());
numRowsAdded += gridSize.y * ADD_BLOCK_SIZE;
gridSize.y = std::max(1, min(DIVUP(height-numRowsAdded, ADD_BLOCK_SIZE), NUM_BLOCKS_MAX));
}
} else {
if(scaleA == 1.0f) {
cublasSaxpy(_numElements, scaleB, b._devData, 1, target._devData, 1);
checkCublasError("cublasSaxpy failed");
} else {
kAdd<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(_devData, b._devData, target._devData,
_numElements, scaleA, scaleB);
}
}
}
void NVMatrix::add(NVMatrix& b, float scaleB, NVMatrix& target) {
add(b, 1, scaleB, target);
}
void NVMatrix::add(NVMatrix& b, NVMatrix& target) {
add(b, 1, target);
}
void NVMatrix::add(NVMatrix& b, float scaleB) {
add(b, scaleB, *this);
}
void NVMatrix::add(NVMatrix& b, float scaleA, float scaleB) {
add(b, scaleA, scaleB, *this);
}
void NVMatrix::add(NVMatrix& b) {
add(b, 1, *this);
}
void NVMatrix::subtract(NVMatrix& b, NVMatrix& target) {
add(b, -1, target);
}
void NVMatrix::subtract(NVMatrix& b) {
add(b, -1);
}
void NVMatrix::eltWiseMult(NVMatrix& b, NVMatrix& target) {
if(&target == &b && &target != this) { // because we manipulate target to be like a
b.eltWiseMult(*this);
return;
}
assert(this->isSameDims(b));
target.resize(*this);
if (isTrans() != b.isTrans() && min(_numRows, _numCols) > 1) {
//call mult kernel for transposed matrices
const int width = isTrans() ? _numRows : _numCols;
const int height = isTrans() ? _numCols : _numRows;
const bool checkBounds = !(width % ADD_BLOCK_SIZE == 0 && height % ADD_BLOCK_SIZE == 0);
// if (width % ADD_BLOCK_SIZE == 0 && height % ADD_BLOCK_SIZE == 0) {
const int numBlocksX = DIVUP(width, ADD_BLOCK_SIZE);
assert(numBlocksX < NUM_BLOCKS_MAX);
const int numBlocksY = min(DIVUP(height, ADD_BLOCK_SIZE), NUM_BLOCKS_MAX);
dim3 gridSize(numBlocksX, numBlocksY, 1);
dim3 blockSize(ADD_BLOCK_SIZE, ADD_BLOCK_SIZE, 1);
int numRowsProcessed = 0;
while (numRowsProcessed < height) {
if (checkBounds) {
kMultTransFast<true><<<gridSize, blockSize>>>(&_devData[numRowsProcessed * width],
&b._devData[numRowsProcessed], &target._devData[numRowsProcessed * width],
width, height - numRowsProcessed, height);
} else {
kMultTransFast<false><<<gridSize, blockSize>>>(&_devData[numRowsProcessed * width],
&b._devData[numRowsProcessed], &target._devData[numRowsProcessed * width],
width, height - numRowsProcessed, height);
}
cuvSafeCall(cudaThreadSynchronize());
numRowsProcessed += gridSize.y * ADD_BLOCK_SIZE;
gridSize.y = min(DIVUP(height-numRowsProcessed, ADD_BLOCK_SIZE), NUM_BLOCKS_MAX);
}
// }
} else {
kMult<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(_devData, b._devData, target._devData,_numElements);
cuvSafeCall(cudaThreadSynchronize());
}
}
void NVMatrix::eltWiseMult(NVMatrix& b) {
eltWiseMult(b, *this);
}
void NVMatrix::eltWiseDivide(NVMatrix& b, NVMatrix& target) {
assert(&b != this); // doable but not necessary for me
assert(this->isSameDims(b));
target.resize(*this);
if (isTrans() != b.isTrans() && min(_numRows, _numCols) > 1) {
//call mult kernel for transposed matrices
const int width = isTrans() ? _numRows : _numCols;
const int height = isTrans() ? _numCols : _numRows;
const bool checkBounds = !(width % ADD_BLOCK_SIZE == 0 && height % ADD_BLOCK_SIZE == 0);
// if (width % ADD_BLOCK_SIZE == 0 && height % ADD_BLOCK_SIZE == 0) {
const int numBlocksX = DIVUP(width, ADD_BLOCK_SIZE);
assert(numBlocksX < NUM_BLOCKS_MAX);
const int numBlocksY = min(height / ADD_BLOCK_SIZE, NUM_BLOCKS_MAX);
dim3 gridSize(numBlocksX, numBlocksY, 1);
dim3 blockSize(ADD_BLOCK_SIZE, ADD_BLOCK_SIZE, 1);
int numRowsProcessed = 0;
while (numRowsProcessed < height) {
if (checkBounds) {
kDivideTransFast<true><<<gridSize, blockSize>>>(&_devData[numRowsProcessed * width],
&b._devData[numRowsProcessed], &target._devData[numRowsProcessed * width],
width, height - numRowsProcessed, height);
} else {
kDivideTransFast<false><<<gridSize, blockSize>>>(&_devData[numRowsProcessed * width],
&b._devData[numRowsProcessed], &target._devData[numRowsProcessed * width],
width, height - numRowsProcessed, height);
}
cuvSafeCall(cudaThreadSynchronize());
numRowsProcessed += gridSize.y * ADD_BLOCK_SIZE;
gridSize.y = min(DIVUP(height-numRowsProcessed, ADD_BLOCK_SIZE), NUM_BLOCKS_MAX);
}
// }
} else {
kDivide<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(_devData, b._devData, target._devData,_numElements);
cuvSafeCall(cudaThreadSynchronize());
}
}
void NVMatrix::eltWiseDivide(NVMatrix& b) {
eltWiseDivide(b, *this);
}
void NVMatrix::tile(int timesY, int timesX, NVMatrix& target) {
assert(timesX > 0 && timesY > 0);
target.resize(_numRows*timesY, _numCols*timesX);
target._isTrans = _isTrans;
if(!isTrans()) {
kTile<<<NUM_APPLY_BLOCKS,NUM_APPLY_THREADS_PER_BLOCK>>>(_devData, target._devData, _numCols, _numRows, target._numCols, target._numRows);
} else {
kTile<<<NUM_APPLY_BLOCKS,NUM_APPLY_THREADS_PER_BLOCK>>>(_devData, target._devData, _numRows, _numCols, target._numRows, target._numCols);
}
cuvSafeCall(cudaThreadSynchronize());
}
void NVMatrix::addVector(NVMatrix& vec, float scaleVec, NVMatrix& target) {
if(&target == &vec && &target != this) { // because we manipulate target to be like this
vec.add(*this, scaleVec, 1);
return;
}
assert(vec.getNumRows() == 1 || vec.getNumCols() == 1);
assert(vec.getNumRows() == _numRows || vec.getNumCols() == _numCols);
// assert(&target != &vec);
target.resize(*this);
// const unsigned int numThreads = numBlocks*numThreadsPerBlock;
const unsigned int width = _isTrans ? _numRows : _numCols;
const unsigned int height = _isTrans ? _numCols : _numRows;
if(vec.getNumRows() == _numRows && !isTrans() || vec.getNumCols() == _numCols && isTrans()) {
kAddColVector<<<NUM_ADD_VECTOR_BLOCKS,NUM_ADD_VECTOR_THREADS_PER_BLOCK>>>(_devData, vec._devData, target._devData, width, height, scaleVec);
} else {
kAddRowVector<<<NUM_ADD_VECTOR_BLOCKS,NUM_ADD_VECTOR_THREADS_PER_BLOCK>>>(_devData, vec._devData, target._devData, width, height, scaleVec);
}
cuvSafeCall(cudaThreadSynchronize());
}
void NVMatrix::addVector(NVMatrix& vec) {
addVector(vec, 1, *this);
}
void NVMatrix::addVector(NVMatrix& vec, float scaleVec) {
addVector(vec, scaleVec, *this);
}
void NVMatrix::addVector(NVMatrix& vec, NVMatrix& target) {
addVector(vec, 1, target);
}
void NVMatrix::equalsVector(NVMatrix& vec, NVMatrix& target) {
if(&target == &vec && &target != this) { // because we manipulate target to be like this
vec.equalsVector(*this);
return;
}
assert(vec.getNumRows() == 1 || vec.getNumCols() == 1);
assert(vec.getNumRows() == _numRows || vec.getNumCols() == _numCols);
target.resize(*this);
const unsigned int width = _isTrans ? _numRows : _numCols;
const unsigned int height = _isTrans ? _numCols : _numRows;
if(vec.getNumRows() == _numRows && !isTrans() || vec.getNumCols() == _numCols && isTrans()) {
kEqualsColVector<<<NUM_ADD_VECTOR_BLOCKS,NUM_ADD_VECTOR_THREADS_PER_BLOCK>>>(_devData, vec._devData, target._devData, width, height);
} else {
kEqualsRowVector<<<NUM_ADD_VECTOR_BLOCKS,NUM_ADD_VECTOR_THREADS_PER_BLOCK>>>(_devData, vec._devData, target._devData, width, height);
}
cuvSafeCall(cudaThreadSynchronize());
}
void NVMatrix::equalsVector(NVMatrix& vec) {
equalsVector(vec, *this);
}
void NVMatrix::subtractFromScalar(float scalar, NVMatrix& target) {
target.resize(*this);
kSubtractFromScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(_devData, scalar, target._devData,_numElements);
cuvSafeCall(cudaThreadSynchronize());
}
void NVMatrix::subtractFromScalar(float scalar) {
subtractFromScalar(scalar, *this);
}
void NVMatrix::addScalar(float scalar, NVMatrix& target) {
target.resize(*this);
kAddScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(_devData, scalar, target._devData,_numElements);
cuvSafeCall(cudaThreadSynchronize());
}
void NVMatrix::addScalar(float scalar) {
addScalar(scalar, *this);
}
void NVMatrix::eltWiseMultByVector(NVMatrix& vec, NVMatrix& target) {
assert(&target != &vec); // for now
assert(vec.getNumRows() == 1 || vec.getNumCols() == 1);
assert(vec.getNumRows() == _numRows || vec.getNumCols() == _numCols);
// assert(&target != &vec);
target.resize(*this);
target._isTrans = _isTrans;
// const unsigned int numThreads = numBlocks*numThreadsPerBlock;
const unsigned int width = _isTrans ? _numRows : _numCols;
const unsigned int height = _isTrans ? _numCols : _numRows;
if(vec.getNumRows() == _numRows && !isTrans() || vec.getNumCols() == _numCols && isTrans()) {
kMultByColVector<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(_devData, vec._devData, target._devData, width, height);
} else {
kMultByRowVector<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(_devData, vec._devData, target._devData, width, height);
}
cuvSafeCall(cudaThreadSynchronize());
}
void NVMatrix::eltWiseMultByVector(NVMatrix& vec) {
eltWiseMultByVector(vec, *this);
}
void NVMatrix::eltWiseDivideByVector(NVMatrix& vec) {
eltWiseDivideByVector(vec, *this);
}
void NVMatrix::eltWiseDivideByVector(NVMatrix& vec, NVMatrix& target) {
NVMatrix* vecRecip = new NVMatrix(vec);
vec.apply(NVMatrix::RECIPROCAL, *vecRecip);
eltWiseMultByVector(*vecRecip, target);
cudaThreadSynchronize();
delete vecRecip;
}
void NVMatrix::eltWiseDivideByVector2(NVMatrix& vec) {
eltWiseDivideByVector2(vec, *this);
}
void NVMatrix::eltWiseDivideByVector2(NVMatrix& vec, NVMatrix& target) {
assert(&target != &vec); // for now
assert(vec.getNumRows() == 1 || vec.getNumCols() == 1);
assert(vec.getNumRows() == _numRows || vec.getNumCols() == _numCols);
// assert(&target != &vec);
target.resize(*this);
// const unsigned int numThreads = numBlocks*numThreadsPerBlock;
const unsigned int width = _isTrans ? _numRows : _numCols;
const unsigned int height = _isTrans ? _numCols : _numRows;
if(vec.getNumRows() == _numRows && !isTrans() || vec.getNumCols() == _numCols && isTrans()) {
kDivideByColVector<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(_devData, vec._devData, target._devData, width, height);
} else {
kDivideByRowVector<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(_devData, vec._devData, target._devData, width, height);
}
cuvSafeCall(cudaThreadSynchronize());
}
void NVMatrix::scale(float scale) {
cublasSscal(_numElements, scale, _devData, 1);
checkCublasError("cublasSscal failed.");
}
void NVMatrix::scale(float scale, NVMatrix& target) {
target.resize(*this);
target.copyFromDevice(*this);
target.scale(scale);
}
/*
* num threads per block is ignored when summing rows (axis=1) because
* it has to be a power of 2.
*/
void NVMatrix::aggregate(int axis, NVMatrix& target, int numThreadsPerBlock, NVMatrix::AGGREGATIONS agg) {
assert(&target != this);
unsigned int width = _isTrans ? _numRows : _numCols;
const int height = _isTrans ? _numCols : _numRows;
target.setTrans(_isTrans);
assert(width > 0);
assert(height > 0);
if(axis == 0 && !_isTrans || axis == 1 && _isTrans) { //col sum
target.resize(!_isTrans ? 1 : _numRows, !_isTrans ? _numCols : 1);
const unsigned int numBlocks = (width + numThreadsPerBlock - 1) / numThreadsPerBlock;
assert(numBlocks * numThreadsPerBlock >= width);
assert(numBlocks < NUM_BLOCKS_MAX);
// target.resize(1, width);
if(agg == NVMatrix::MAX) {
kDumbMaxCols<<<numBlocks,numThreadsPerBlock>>>(_devData, target._devData, width, height);
} else if(agg == NVMatrix::SUM) {
kDumbSumCols<<<numBlocks,numThreadsPerBlock>>>(_devData, target._devData, width, height);
}
cuvSafeCall(cudaThreadSynchronize());
} else { // row sum
target.resize(_isTrans ? 1 : _numRows, _isTrans ? _numCols : 1);
if (width > 1) {
NVMatrix *prevSum = this;
while (prevSum->getLeadingDim() > 1) {
int numBlocksX, numBlocksY, numThreadsX, numThreadsY;
bool doLinearAgg = height >= 16384;
// doQuickAgg = !doQuickAgg;
if(doLinearAgg) { // call the special short aggregation functions
numBlocksX = 1;
numBlocksY = DIVUP(height, AGG_SHORT_ROWS_THREADS_Y*AGG_SHORT_ROWS_LOOPS_Y);
numThreadsX = AGG_SHORT_ROWS_THREADS_X;
numThreadsY = AGG_SHORT_ROWS_THREADS_Y;
while(numBlocksY > NUM_BLOCKS_MAX) {
numBlocksY = DIVUP(numBlocksY,2);
numBlocksX *= 2;
}
} else {
numThreadsX = width <= 64 ? 32 : (width <= 128 ? 64 : (width <= 256 ? 128 : (width <= 512 ? 256 : 512)));
numThreadsY = 1;
numBlocksX = DIVUP(width, 2*numThreadsX);
numBlocksY = std::min(height, NUM_BLOCKS_MAX);
}
dim3 grid(numBlocksX, numBlocksY), threads(numThreadsX, numThreadsY);
assert(numBlocksX <= NUM_BLOCKS_MAX);
assert(numBlocksY <= NUM_BLOCKS_MAX);
// printf("%d %d %d %d %d \n", numThreadsX, numThreadsY, numBlocksX, numBlocksY, numBlocksZ);
NVMatrix *nvSumAccum = target.getFollowingDim() == height && target.getLeadingDim() == numBlocksX ? &target : new NVMatrix(height, numBlocksX, false);
// printf("target size: %dx%d\n", target.getNumRows(), target.getNumCols());
// printf("liear agg: %d, width: %d, height: %d\n", doLinearAgg, width, height);
// printf("accum is target: %d\n", nvSumAccum == &target);
if(agg == NVMatrix::MAX) {
if(doLinearAgg) {
if(width <= 16) {
if(width <= 4) {
kAggShortRows<AGG_MAX, 1, 4><<<grid, threads>>>(prevSum->_devData, nvSumAccum->_devData,width, height);
} else if(width <= 8) {
kAggShortRows<AGG_MAX, 1, 8><<<grid, threads>>>(prevSum->_devData, nvSumAccum->_devData,width, height);
} else if(width <= 12) {
kAggShortRows<AGG_MAX, 1, 12><<<grid, threads>>>(prevSum->_devData, nvSumAccum->_devData,width, height);
} else {
kAggShortRows<AGG_MAX, 1, 16><<<grid, threads>>>(prevSum->_devData, nvSumAccum->_devData,width, height);
}
} else if(width <= 32) {
kAggShortRows<AGG_MAX, 2, AGG_SHORT_ROWS_THREADS_X><<<grid, threads>>>(prevSum->_devData, nvSumAccum->_devData,width, height);
} else if(width <= 48){
kAggShortRows<AGG_MAX, 3, AGG_SHORT_ROWS_THREADS_X><<<grid, threads>>>(prevSum->_devData, nvSumAccum->_devData,width, height);
} else if(width <= 64){
kAggShortRows<AGG_MAX, 4, AGG_SHORT_ROWS_THREADS_X><<<grid, threads>>>(prevSum->_devData, nvSumAccum->_devData,width, height);
} else {
kAggShortRows2<AGG_MAX><<<grid, threads>>>(prevSum->_devData, nvSumAccum->_devData,width, height);
}
} else if(width <= 64) {
kMaxRows<32><<<grid, threads>>>(prevSum->_devData, nvSumAccum->_devData,
width, height, nvSumAccum->getLeadingDim());
} else if(width <= 128) {
kMaxRows<64><<<grid, threads>>>(prevSum->_devData, nvSumAccum->_devData,
width, height, nvSumAccum->getLeadingDim());
} else if(width <= 256) {
kMaxRows<128><<<grid, threads>>>(prevSum->_devData, nvSumAccum->_devData,
width, height, nvSumAccum->getLeadingDim());
} else if(width <= 512) {
kMaxRows<256><<<grid, threads>>>(prevSum->_devData, nvSumAccum->_devData,
width, height, nvSumAccum->getLeadingDim());
} else {
kMaxRows<512><<<grid, threads>>>(prevSum->_devData, nvSumAccum->_devData,
width, height, nvSumAccum->getLeadingDim());
}
} else if(agg == NVMatrix::SUM) {
if(doLinearAgg) {
if(width <= 16) {
if(width <= 4) {
kAggShortRows<AGG_SUM, 1, 4><<<grid, threads>>>(prevSum->_devData, nvSumAccum->_devData,width, height);
} else if(width <= 8) {
kAggShortRows<AGG_SUM, 1, 8><<<grid, threads>>>(prevSum->_devData, nvSumAccum->_devData,width, height);
} else if(width <= 12) {
kAggShortRows<AGG_SUM, 1, 12><<<grid, threads>>>(prevSum->_devData, nvSumAccum->_devData,width, height);
} else {
kAggShortRows<AGG_SUM, 1, 16><<<grid, threads>>>(prevSum->_devData, nvSumAccum->_devData,width, height);
}
} else if(width <= 32) {
kAggShortRows<AGG_SUM, 2, AGG_SHORT_ROWS_THREADS_X><<<grid, threads>>>(prevSum->_devData, nvSumAccum->_devData,width, height);
} else if(width <= 48) {
kAggShortRows<AGG_SUM, 3, AGG_SHORT_ROWS_THREADS_X><<<grid, threads>>>(prevSum->_devData, nvSumAccum->_devData,width, height);
} else if(width <= 64){
kAggShortRows<AGG_SUM, 4, AGG_SHORT_ROWS_THREADS_X><<<grid, threads>>>(prevSum->_devData, nvSumAccum->_devData,width, height);
} else {
kAggShortRows2<AGG_SUM><<<grid, threads>>>(prevSum->_devData, nvSumAccum->_devData,width, height);
}
} else if (width <= 64) {
kSumRows<32><<<grid, threads>>>(prevSum->_devData, nvSumAccum->_devData,
width, height, nvSumAccum->getLeadingDim());
} else if (width <= 128) {
kSumRows<64><<<grid, threads>>>(prevSum->_devData, nvSumAccum->_devData,
width, height, nvSumAccum->getLeadingDim());
} else if (width <= 256) {
kSumRows<128><<<grid, threads>>>(prevSum->_devData, nvSumAccum->_devData,
width, height, nvSumAccum->getLeadingDim());
} else if (width <= 512) {
kSumRows<256><<<grid, threads>>>(prevSum->_devData, nvSumAccum->_devData,
width, height, nvSumAccum->getLeadingDim());
} else {
kSumRows<512><<<grid, threads>>>(prevSum->_devData, nvSumAccum->_devData,
width, height, nvSumAccum->getLeadingDim());
}
}
cuvSafeCall(cudaThreadSynchronize());
cudaThreadSynchronize();
width = numBlocksX;
if (prevSum != this) {
delete prevSum;
}
prevSum = nvSumAccum;
}
// if (_isTrans) {
// prevSum->_numCols = prevSum->_numRows;
// prevSum->_numRows = 1;
// }
// target.copyFromDevice(*prevSum);
// delete prevSum;
} else {
target.resize(*this);
target.copyFromDevice(*this);
}
}
}
void NVMatrix::max(int axis, NVMatrix& target) {
if(axis == 0 && !_isTrans || axis == 1 && _isTrans) {
aggregate(axis, target, NUM_SUM_COLS_THREADS_PER_BLOCK, NVMatrix::MAX);
} else {
aggregate(axis, target, NUM_SUM_ROWS_THREADS_PER_BLOCK, NVMatrix::MAX);
}
}
NVMatrix& NVMatrix::max(int axis) {
NVMatrix *sumVec = new NVMatrix();
max(axis, *sumVec);
return *sumVec;
}
void NVMatrix::sum(int axis, NVMatrix& target) {
assert(axis == 0 || axis == 1);
if(axis == 0 && !_isTrans || axis == 1 && _isTrans) {
aggregate(axis, target, NUM_SUM_COLS_THREADS_PER_BLOCK, NVMatrix::SUM);
} else {
aggregate(axis, target, NUM_SUM_ROWS_THREADS_PER_BLOCK, NVMatrix::SUM);
}
}
NVMatrix& NVMatrix::sum(int axis) {
NVMatrix *sumVec = new NVMatrix();
sum(axis, *sumVec);
return *sumVec;
}
float NVMatrix::sum() {
WARN("Summing over all matrix elements first performs a sum over all columns. If your matrix has few columns, this is inefficient.");
NVMatrix devSum = NVMatrix();
sum(_isTrans && _numRows > _numCols || !_isTrans && _numRows < _numCols ? 1 : 0, devSum);
Matrix hostSum = Matrix(devSum._numRows, devSum._numCols);
cudaThreadSynchronize();
devSum.copyToHost(hostSum);
return hostSum.sum();
}
void NVMatrix::print(int startRow, int rows, int startCol, int cols) const {
cudaThreadSynchronize();
Matrix* hm = new Matrix(_numRows, _numCols);
copyToHost(*hm);
hm->print(startRow, rows, startCol, cols);
delete hm;
}
void NVMatrix::print(int rows, int cols) const {
print(0, rows, 0, cols);
}
void NVMatrix::printShape(const char* name) const {
printf("%s: %dx%d\n", name, _numRows, _numCols);
}
//========================================================
// NVMatrix but initialized with zeros instead of whatever
// happens to be in memory.
//========================================================
NVZeroMatrix::NVZeroMatrix(int numRows, int numCols, bool isTrans) : NVMatrix(numRows, numCols, isTrans) {
apply(NVMatrix::ZERO);
}
NVZeroMatrix::NVZeroMatrix(Matrix& like) : NVMatrix(like.getNumRows(), like.getNumCols()) {
apply(NVMatrix::ZERO);
}
NVZeroMatrix::NVZeroMatrix(NVMatrix& like) : NVMatrix(like.getNumRows(), like.getNumCols()) {
apply(NVMatrix::ZERO);
}
|
aa6a2d3d8196d612bddfc8f66f554a071559c2b8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <spray/util/cuda_assert.hpp>
#include <spray/core/color.hpp>
#include <thrust/device_vector.h>
#include <thrust/device_ptr.h>
namespace spray
{
namespace core
{
surface<void, cudaSurfaceType2D> surf_ref;
__global__
void show_image_kernel(const std::size_t width, const std::size_t height,
thrust::device_ptr<const spray::core::color> scene)
{
const int x = threadIdx.x + blockIdx.x * blockDim.x;
const int y = threadIdx.y + blockIdx.y * blockDim.y;
if(x >= width || y >= height) {return;}
const std::size_t offset = x + y * width;
const uchar4 pixel = spray::core::make_pixel(scene[offset]);
surf2Dwrite(pixel, surf_ref, x * sizeof(uchar4), y, hipBoundaryModeZero);
return;
}
void show_image(const dim3 blocks, const dim3 threads,
const hipStream_t stream, const hipArray_const_t& buf,
const std::size_t width, const std::size_t height,
thrust::device_ptr<const spray::core::color> scene)
{
spray::util::cuda_assert(hipBindSurfaceToArray(surf_ref, buf));
hipLaunchKernelGGL(( show_image_kernel), dim3(blocks), dim3(threads), 0, stream, width, height, scene);
return;
}
} // core
} // spray
|
aa6a2d3d8196d612bddfc8f66f554a071559c2b8.cu
|
#include <spray/util/cuda_assert.hpp>
#include <spray/core/color.hpp>
#include <thrust/device_vector.h>
#include <thrust/device_ptr.h>
namespace spray
{
namespace core
{
surface<void, cudaSurfaceType2D> surf_ref;
__global__
void show_image_kernel(const std::size_t width, const std::size_t height,
thrust::device_ptr<const spray::core::color> scene)
{
const int x = threadIdx.x + blockIdx.x * blockDim.x;
const int y = threadIdx.y + blockIdx.y * blockDim.y;
if(x >= width || y >= height) {return;}
const std::size_t offset = x + y * width;
const uchar4 pixel = spray::core::make_pixel(scene[offset]);
surf2Dwrite(pixel, surf_ref, x * sizeof(uchar4), y, cudaBoundaryModeZero);
return;
}
void show_image(const dim3 blocks, const dim3 threads,
const cudaStream_t stream, const cudaArray_const_t& buf,
const std::size_t width, const std::size_t height,
thrust::device_ptr<const spray::core::color> scene)
{
spray::util::cuda_assert(cudaBindSurfaceToArray(surf_ref, buf));
show_image_kernel<<<blocks, threads, 0, stream>>>(width, height, scene);
return;
}
} // core
} // spray
|
b83a2741db4a2721c1ca20b2c7613570c86925ea.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//This file is take from torrvision/crfasrnn
#define BLOCK_SIZE 64
#include <stdio.h>
#include "include/modified_permutohedral.h"
#include "include/cuda_macros.h"
#include "hash_helper.cu"
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void gpu_set(const int N, const Dtype alpha, Dtype* Y) {
if (alpha == 0) {
CUDA_CHECK(hipMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( set_kernel<Dtype>), dim3(CUDA_GET_BLOCKS(N)), dim3(CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template void gpu_set<int>(const int N, const int alpha, int* Y);
template void gpu_set<float>(const int N, const float alpha, float* Y);
template void gpu_set<double>(const int N, const double alpha, double* Y);
static void swapHashTableValues(float* oldValues, float *newValues, float* table_values,size_t size) {
CUDA_CHECK(hipMemcpy(oldValues,table_values,size,hipMemcpyDeviceToDevice));
CUDA_CHECK(hipMemcpy(table_values,newValues,size,hipMemcpyDeviceToDevice));
CUDA_CHECK(hipMemcpy(newValues,oldValues,size,hipMemcpyDeviceToDevice));
// Works but give poorer results
//oldValues = table_values;
//table_values = newValues;
//newValues = oldValues;
}
template<int pd>
__global__ static void createMatrix(const int w, const int h,
const float *positions,
int *table_entries,
int table_capacity,
signed short* table_keys,
const float *scaleFactor,
MatrixEntry *matrix)
{
// scanline order
//const int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x;
//const bool outOfBounds = (idx>=num_points) ;
//const int threadId = idx;
// 8x8 blocks
const int x = threadIdx.x + blockIdx.x * blockDim.x;
const int y = threadIdx.y + blockIdx.y * blockDim.y;
const int threadId = threadIdx.y*blockDim.x + threadIdx.x;
const int idx = y*w + x;
const bool outOfBounds = (x >= w) || (y >= h);
float myElevated[pd+1];
const float *myPosition = positions + idx*pd;
int myGreedy[pd+1];
int myRank[pd+1];
float myBarycentric[pd+2];
__shared__ short keys[pd*BLOCK_SIZE];
short *myKey = keys + threadId * pd;
if (!outOfBounds) {
myElevated[pd] = -pd*(myPosition[pd-1])*scaleFactor[pd-1];
for (int i = pd-1; i > 0; i--) {
myElevated[i] = (myElevated[i+1] -
i*(myPosition[i-1])*scaleFactor[i-1] +
(i+2)*(myPosition[i])*scaleFactor[i]);
}
myElevated[0] = myElevated[1] + 2*(myPosition[0])*scaleFactor[0];
// find the closest zero-colored lattice point
// greedily search for the closest zero-colored lattice point
signed short sum = 0;
for (int i = 0; i <= pd; i++) {
float v = myElevated[i]*(1.0f/(pd+1));
float up = ceilf(v) * (pd+1);
float down = floorf(v) * (pd+1);
if (up - myElevated[i] < myElevated[i] - down) {
myGreedy[i] = (signed short)up;
} else {
myGreedy[i] = (signed short)down;
}
sum += myGreedy[i];
}
sum /= pd+1;
// sort differential to find the permutation between this simplex and the canonical one
for (int i = 0; i <= pd; i++) {
myRank[i] = 0;
for (int j = 0; j <= pd; j++) {
if (myElevated[i] - myGreedy[i] < myElevated[j] - myGreedy[j] ||
(myElevated[i] - myGreedy[i] == myElevated[j] - myGreedy[j]
&& i > j)) {
myRank[i]++;
}
}
}
if (sum > 0) { // sum too large, need to bring down the ones with the smallest differential
for (int i = 0; i <= pd; i++) {
if (myRank[i] >= pd + 1 - sum) {
myGreedy[i] -= pd+1;
myRank[i] += sum - (pd+1);
} else {
myRank[i] += sum;
}
}
} else if (sum < 0) { // sum too small, need to bring up the ones with largest differential
for (int i = 0; i <= pd; i++) {
if (myRank[i] < -sum) {
myGreedy[i] += pd+1;
myRank[i] += (pd+1) + sum;
} else {
myRank[i] += sum;
}
}
}
// turn delta into barycentric coords
for (int i = 0; i <= pd+1; i++) {
myBarycentric[i] = 0;
}
for (int i = 0; i <= pd; i++) {
float delta = (myElevated[i] - myGreedy[i]) * (1.0f/(pd+1));
myBarycentric[pd-myRank[i]] += delta;
myBarycentric[pd+1-myRank[i]] -= delta;
}
myBarycentric[0] += 1.0f + myBarycentric[pd+1];
}
for (int color = 0; color <= pd; color++) {
// Compute the location of the lattice point explicitly (all but
// the last coordinate - it's redundant because they sum to zero)
if (!outOfBounds) {
for (int i = 0; i < pd; i++) {
myKey[i] = myGreedy[i] + color;
if (myRank[i] > pd-color) myKey[i] -= (pd+1);
}
}
if (!outOfBounds) {
MatrixEntry r;
r.index = hashTableInsert<pd>(myKey, table_keys, table_entries,
table_capacity, idx*(pd+1)+color);
r.weight = myBarycentric[color];
matrix[idx*(pd+1) + color] = r;
}
}
}
template<int kd>
__global__ static void cleanHashTable(const int n,
int *table_entries,
int table_capacity,
signed short* table_keys)
{
const int idx = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x * blockDim.y + threadIdx.x;
if (idx >= n) return;
// find my hash table entry
int *e = table_entries + idx;
// Check if I created my own key in the previous phase
if (*e >= 0) {
// Rehash my key and reset the pointer in order to merge with
// any other pixel that created a different entry under the
// same key. If the computation was serial this would never
// happen, but sometimes race conditions can make the same key
// be inserted twice. hashTableRetrieve always returns the
// earlier, so it's no problem as long as we rehash now.
*e = hashTableRetrieve<kd>(table_keys + *e*kd,
table_entries, table_keys, table_capacity);
}
}
template<int pd>
__global__ static void resetIndex(const int w, const int h,
MatrixEntry *matrix,
int *table_entries)
{
const int x = threadIdx.x + blockIdx.x * blockDim.x;
const int y = threadIdx.y + (blockIdx.y/(pd+1)) * blockDim.y;
const int color = blockIdx.y % (pd+1);
const int idx = y*w + x;
const bool outOfBounds = (x >= w) || (y >= h);
if (!outOfBounds){
MatrixEntry r = matrix[idx*(pd+1)+color];
matrix[idx*(pd+1)+color].index = table_entries[r.index];
}
}
template<int pd, typename Dtype>
__global__ static void splatCache(const int w, const int h, const int vd,
const Dtype *values,
const MatrixEntry *matrix,
float *table_values)
{
const int x = threadIdx.x + blockIdx.x * blockDim.x;
const int y = threadIdx.y + (blockIdx.y/(pd+1)) * blockDim.y;
const int threadId = threadIdx.y*blockDim.x + threadIdx.x;
const int color = blockIdx.y % (pd+1);
const int idx = y*w + x;
const bool outOfBounds = (x >= w) || (y >= h);
__shared__ int sharedOffsets[BLOCK_SIZE];
extern __shared__ float sharedValues[];
int myOffset = -1;
float *myValue = sharedValues + threadId*(vd+1);
if (!outOfBounds) {
const Dtype *value = values + idx;
MatrixEntry r = matrix[idx*(pd+1)+color];
// convert the matrix entry from a pointer into the entries array to a pointer into the keys/values array
//matrix[idx*(pd+1)+color].index = r.index = table_entries[r.index];
// record the offset into the keys/values array in shared space
myOffset = sharedOffsets[threadId] = r.index*(vd+1);
for (int j = 0; j < vd; j++) {
myValue[j] = (float)value[j*w*h]*r.weight;
}
myValue[vd] = r.weight;
} else {
sharedOffsets[threadId] = -1;
}
__syncthreads();
// am I the first thread in this block to care about this key?
if (outOfBounds) return;
for (int i = 0; i < BLOCK_SIZE; i++) {
if (i < threadId) {
if (myOffset == sharedOffsets[i]) {
// somebody else with higher priority cares about this key
return;
}
} else if (i > threadId) {
if (myOffset == sharedOffsets[i]) {
// someone else with lower priority cares about this key, accumulate it into mine
for (int j = 0; j <= vd; j++) {
sharedValues[threadId*(vd+1) + j] += sharedValues[i*(vd+1) + j];
}
}
}
}
// only the threads with something to write to main memory are still going
float *val = table_values + myOffset;
for (int j = 0; j <= vd; j++) {
atomicAdd(val+j, myValue[j]);
}
}
template<int pd>
__global__ static void blur(int n, float *newValues,
const MatrixEntry *matrix,
const int *table_entries,
const signed short *table_keys,
const int table_capacity,
float *table_values,
int color,
const int vd)
{
const int idx = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x * blockDim.y + threadIdx.x;
if (idx >= n) return;
// Check if I'm valid
if (matrix[idx].index != idx) return;
// find my key and the keys of my neighbours
short myKey[pd+1];
short np[pd+1];
short nm[pd+1];
for (int i = 0; i < pd; i++) {
myKey[i] = table_keys[idx*pd+i];
np[i] = myKey[i]+1;
nm[i] = myKey[i]-1;
}
np[color] -= pd+1;
nm[color] += pd+1;
int offNp = hashTableRetrieve<pd>(np, table_entries, table_keys, table_capacity);
int offNm = hashTableRetrieve<pd>(nm, table_entries, table_keys, table_capacity);
float *valMe = table_values + (vd+1)*idx;
float *valNp = table_values + (vd+1)*offNp;
float *valNm = table_values + (vd+1)*offNm;
float *valOut = newValues + (vd+1)*idx;
if (offNp >= 0 && offNm >= 0) {
for (int i = 0; i <= vd; i++) {
valOut[i] = (valNp[i] + (valMe[i]*2) + valNm[i])/2;
}
} else if (offNp >= 0) {
for (int i = 0; i <= vd; i++) {
valOut[i] = (valNp[i] + (valMe[i]*2))/2;
}
} else if (offNm >= 0) {
for (int i = 0; i <= vd; i++) {
valOut[i] = (valNm[i] + (valMe[i]*2))/2;
}
} else {
for (int i = 0; i <= vd; i++) {
valOut[i] = valMe[i];
}
}
}
template<int pd, typename Dtype>
__global__ static void slice(const int w, const int h, const int vd,
Dtype *values,
const MatrixEntry *matrix,
float *table_values,
bool add) {
//const int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x;
const int x = threadIdx.x + blockIdx.x * blockDim.x;
const int y = threadIdx.y + blockIdx.y * blockDim.y;
const int threadId = threadIdx.y*blockDim.x + threadIdx.x;
const int idx = y*w + x;
const bool outOfBounds = (x >= w) || (y >= h);
if (outOfBounds) return;
extern __shared__ float localValue[];
float *myValue = localValue + threadId*vd;
float myWeight = 0;
for (int i = 0; i < vd; i++) {
myValue[i] = 0;
}
for (int i = 0; i <= pd; i++) {
MatrixEntry r = matrix[idx*(pd+1) + i];
const float *val = table_values + r.index*(vd+1);
for (int j = 0; j < vd; j++) {
myValue[j] += r.weight*val[j];
}
myWeight += r.weight*val[vd];
}
//myWeight = 1.0f/myWeight;
float alpha = 1.0f / (1+powf(2, -pd));
for (int j = 0; j < vd; j++){
if(!add){
values[j*w*h + idx] = 0;
}
values[j*w*h + idx] += myValue[j]*alpha;
}
}
template<int pd>
void gpu_init(const float* features,
HashTable* table,
MatrixEntry* matrix,
const int w, const int h)
{
int num_points = w*h ;
// Scan line order
//unsigned int blocks = (num_points-1)/64 + 1;
//unsigned int blockSize = 64;
dim3 blocks((w-1)/8+1, (h-1)/8+1, 1);
dim3 blockSize(8, 8, 1);
float blurVariance = 0.5 ;
float * scaleFactor;
float* scaleFactorHost = new float[pd];
// Create Scale factor vector and give it to GPU
// num_dimensions is likely to be low so do that
// on the CPU
for (int i = 0; i < pd; i++) {
scaleFactorHost[i] = (pd+1)*sqrtf((1.0/6 + blurVariance)/((i+1)*(i+2)));
}
CUDA_CHECK(hipMalloc((void**)&scaleFactor, sizeof(float)*pd));
CUDA_CHECK(hipMemcpy(scaleFactor, scaleFactorHost, sizeof(float)*pd, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( createMatrix<pd>), dim3(blocks), dim3(blockSize), 0, 0, w, h,
features,
table->table_entries,
table->table_capacity,
table->table_keys,
scaleFactor,
matrix);
CUDA_POST_KERNEL_CHECK;
// fix duplicate hash table entries
int cleanBlockSize = 32;
dim3 cleanBlocks((num_points-1)/cleanBlockSize+1, 2*(pd+1), 1);
hipLaunchKernelGGL(( cleanHashTable<pd>), dim3(cleanBlocks), dim3(cleanBlockSize), 0, 0, 2*num_points*(pd+1),
table->table_entries, table->table_capacity, table->table_keys);
CUDA_POST_KERNEL_CHECK;
blocks.y *= pd+1;
hipLaunchKernelGGL(( resetIndex<pd>), dim3(blocks), dim3(blockSize), 0, 0, w, h, matrix, table->table_entries) ;
CUDA_POST_KERNEL_CHECK;
// Clean intermediate variables
delete[] scaleFactorHost;
CUDA_CHECK(hipFree(scaleFactor));
}
template<int pd, typename Dtype>
void gpu_compute(Dtype* out, const Dtype* in, const HashTable &table,
const MatrixEntry* matrix,
int w, int h, int vd,
bool reverse, bool add){
// Create table_values
int num_points = w*h ;
float *table_values ;
CUDA_CHECK(hipMalloc((void**)&table_values, sizeof(float)*(vd+1)*num_points*(pd+1))) ;
gpu_set<float>(num_points*(vd+1)*(pd+1), 0, table_values) ;
dim3 blocks((w-1)/8+1, (h-1)/8+1, 1);
dim3 blockSize(8, 8, 1);
// splat splits by color, so extend the y coordinate to our blocks to represent that
blocks.y *= pd+1;
hipLaunchKernelGGL(( splatCache<pd, Dtype>), dim3(blocks), dim3(blockSize), BLOCK_SIZE*(vd+1)*sizeof(float), 0, w, h, vd,
in,
matrix,
table_values);
CUDA_POST_KERNEL_CHECK;
// blur
int cleanBlockSize = 32;
dim3 cleanBlocks((num_points-1)/cleanBlockSize+1, 2*(pd+1), 1);
float *newValues;
float *oldValues;
size_t size = num_points*(pd+1)*(vd+1)*sizeof(float);
CUDA_CHECK(hipMalloc((void**)&(newValues), size));
CUDA_CHECK(hipMalloc((void**)&(oldValues), size));
gpu_set<float>(num_points*(vd+1)*(pd+1), 0, newValues) ;
for (int color = reverse?pd:0; color <= pd && color>=0; reverse?color--:color++) {
hipLaunchKernelGGL(( blur<pd>), dim3(cleanBlocks), dim3(cleanBlockSize), 0, 0, num_points*(pd+1), newValues,
matrix,
table.table_entries,
table.table_keys,
table.table_capacity,
table_values,
color,
vd);
CUDA_POST_KERNEL_CHECK;
// swap pointers does not seem to work...
swapHashTableValues(oldValues, newValues, table_values, size);
}
// slice
blocks.y /= (pd+1);
hipLaunchKernelGGL(( slice<pd, Dtype>), dim3(blocks), dim3(blockSize), sizeof(float)*BLOCK_SIZE*vd, 0, w, h, vd, out, matrix, table_values, add);
CUDA_POST_KERNEL_CHECK;
// Free memory
CUDA_CHECK(hipFree(table_values)) ;
CUDA_CHECK(hipFree(newValues)) ;
CUDA_CHECK(hipFree(oldValues)) ;
}
void ModifiedPermutohedral::init_gpu(const float* features, int num_dimensions, int w, int h) {
//Initialize Hash table
if(!is_init){
table.createHashTable(w*h*(num_dimensions+1), num_dimensions);
CUDA_CHECK(hipMalloc((void **)&matrix, sizeof(MatrixEntry)*(w*h*(num_dimensions+1))));
} else {
table.resetHashTable(w_*h_*(d_+1), d_);
}
w_ = w ;
h_ = h ;
d_ = num_dimensions ;
N_ = w*h ;
switch(num_dimensions){
case 2:
gpu_init<2>(features, &table, matrix, w_, h_);
break;
case 5:
gpu_init<5>(features, &table, matrix, w_, h_);
break;
default:
std::cout << "num_dimensions should be 2 or 5";
}
is_init = true;
}
void ModifiedPermutohedral::compute_gpu(float* out, const float* in, int value_size, bool reverse, bool add) const {
// Losing time by dynamically allocating memory but more general function
if(!is_init)
std::cout << "Initialize lattice before doing any computing";
switch(d_){
case 2:
gpu_compute<2, float>(out, in, table, matrix, w_, h_, value_size, reverse, add);
break;
case 5:
gpu_compute<5, float>(out, in, table, matrix, w_, h_, value_size, reverse, add);
break;
default:
std::cout << "num_dimensions should be 2 or 5";
}
}
|
b83a2741db4a2721c1ca20b2c7613570c86925ea.cu
|
//This file is take from torrvision/crfasrnn
#define BLOCK_SIZE 64
#include <stdio.h>
#include "include/modified_permutohedral.h"
#include "include/cuda_macros.h"
#include "hash_helper.cu"
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void gpu_set(const int N, const Dtype alpha, Dtype* Y) {
if (alpha == 0) {
CUDA_CHECK(cudaMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
set_kernel<Dtype><<<CUDA_GET_BLOCKS(N), CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template void gpu_set<int>(const int N, const int alpha, int* Y);
template void gpu_set<float>(const int N, const float alpha, float* Y);
template void gpu_set<double>(const int N, const double alpha, double* Y);
static void swapHashTableValues(float* oldValues, float *newValues, float* table_values,size_t size) {
CUDA_CHECK(cudaMemcpy(oldValues,table_values,size,cudaMemcpyDeviceToDevice));
CUDA_CHECK(cudaMemcpy(table_values,newValues,size,cudaMemcpyDeviceToDevice));
CUDA_CHECK(cudaMemcpy(newValues,oldValues,size,cudaMemcpyDeviceToDevice));
// Works but give poorer results
//oldValues = table_values;
//table_values = newValues;
//newValues = oldValues;
}
template<int pd>
__global__ static void createMatrix(const int w, const int h,
const float *positions,
int *table_entries,
int table_capacity,
signed short* table_keys,
const float *scaleFactor,
MatrixEntry *matrix)
{
// scanline order
//const int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x;
//const bool outOfBounds = (idx>=num_points) ;
//const int threadId = idx;
// 8x8 blocks
const int x = threadIdx.x + blockIdx.x * blockDim.x;
const int y = threadIdx.y + blockIdx.y * blockDim.y;
const int threadId = threadIdx.y*blockDim.x + threadIdx.x;
const int idx = y*w + x;
const bool outOfBounds = (x >= w) || (y >= h);
float myElevated[pd+1];
const float *myPosition = positions + idx*pd;
int myGreedy[pd+1];
int myRank[pd+1];
float myBarycentric[pd+2];
__shared__ short keys[pd*BLOCK_SIZE];
short *myKey = keys + threadId * pd;
if (!outOfBounds) {
myElevated[pd] = -pd*(myPosition[pd-1])*scaleFactor[pd-1];
for (int i = pd-1; i > 0; i--) {
myElevated[i] = (myElevated[i+1] -
i*(myPosition[i-1])*scaleFactor[i-1] +
(i+2)*(myPosition[i])*scaleFactor[i]);
}
myElevated[0] = myElevated[1] + 2*(myPosition[0])*scaleFactor[0];
// find the closest zero-colored lattice point
// greedily search for the closest zero-colored lattice point
signed short sum = 0;
for (int i = 0; i <= pd; i++) {
float v = myElevated[i]*(1.0f/(pd+1));
float up = ceilf(v) * (pd+1);
float down = floorf(v) * (pd+1);
if (up - myElevated[i] < myElevated[i] - down) {
myGreedy[i] = (signed short)up;
} else {
myGreedy[i] = (signed short)down;
}
sum += myGreedy[i];
}
sum /= pd+1;
// sort differential to find the permutation between this simplex and the canonical one
for (int i = 0; i <= pd; i++) {
myRank[i] = 0;
for (int j = 0; j <= pd; j++) {
if (myElevated[i] - myGreedy[i] < myElevated[j] - myGreedy[j] ||
(myElevated[i] - myGreedy[i] == myElevated[j] - myGreedy[j]
&& i > j)) {
myRank[i]++;
}
}
}
if (sum > 0) { // sum too large, need to bring down the ones with the smallest differential
for (int i = 0; i <= pd; i++) {
if (myRank[i] >= pd + 1 - sum) {
myGreedy[i] -= pd+1;
myRank[i] += sum - (pd+1);
} else {
myRank[i] += sum;
}
}
} else if (sum < 0) { // sum too small, need to bring up the ones with largest differential
for (int i = 0; i <= pd; i++) {
if (myRank[i] < -sum) {
myGreedy[i] += pd+1;
myRank[i] += (pd+1) + sum;
} else {
myRank[i] += sum;
}
}
}
// turn delta into barycentric coords
for (int i = 0; i <= pd+1; i++) {
myBarycentric[i] = 0;
}
for (int i = 0; i <= pd; i++) {
float delta = (myElevated[i] - myGreedy[i]) * (1.0f/(pd+1));
myBarycentric[pd-myRank[i]] += delta;
myBarycentric[pd+1-myRank[i]] -= delta;
}
myBarycentric[0] += 1.0f + myBarycentric[pd+1];
}
for (int color = 0; color <= pd; color++) {
// Compute the location of the lattice point explicitly (all but
// the last coordinate - it's redundant because they sum to zero)
if (!outOfBounds) {
for (int i = 0; i < pd; i++) {
myKey[i] = myGreedy[i] + color;
if (myRank[i] > pd-color) myKey[i] -= (pd+1);
}
}
if (!outOfBounds) {
MatrixEntry r;
r.index = hashTableInsert<pd>(myKey, table_keys, table_entries,
table_capacity, idx*(pd+1)+color);
r.weight = myBarycentric[color];
matrix[idx*(pd+1) + color] = r;
}
}
}
template<int kd>
__global__ static void cleanHashTable(const int n,
int *table_entries,
int table_capacity,
signed short* table_keys)
{
const int idx = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x * blockDim.y + threadIdx.x;
if (idx >= n) return;
// find my hash table entry
int *e = table_entries + idx;
// Check if I created my own key in the previous phase
if (*e >= 0) {
// Rehash my key and reset the pointer in order to merge with
// any other pixel that created a different entry under the
// same key. If the computation was serial this would never
// happen, but sometimes race conditions can make the same key
// be inserted twice. hashTableRetrieve always returns the
// earlier, so it's no problem as long as we rehash now.
*e = hashTableRetrieve<kd>(table_keys + *e*kd,
table_entries, table_keys, table_capacity);
}
}
template<int pd>
__global__ static void resetIndex(const int w, const int h,
MatrixEntry *matrix,
int *table_entries)
{
const int x = threadIdx.x + blockIdx.x * blockDim.x;
const int y = threadIdx.y + (blockIdx.y/(pd+1)) * blockDim.y;
const int color = blockIdx.y % (pd+1);
const int idx = y*w + x;
const bool outOfBounds = (x >= w) || (y >= h);
if (!outOfBounds){
MatrixEntry r = matrix[idx*(pd+1)+color];
matrix[idx*(pd+1)+color].index = table_entries[r.index];
}
}
template<int pd, typename Dtype>
__global__ static void splatCache(const int w, const int h, const int vd,
const Dtype *values,
const MatrixEntry *matrix,
float *table_values)
{
const int x = threadIdx.x + blockIdx.x * blockDim.x;
const int y = threadIdx.y + (blockIdx.y/(pd+1)) * blockDim.y;
const int threadId = threadIdx.y*blockDim.x + threadIdx.x;
const int color = blockIdx.y % (pd+1);
const int idx = y*w + x;
const bool outOfBounds = (x >= w) || (y >= h);
__shared__ int sharedOffsets[BLOCK_SIZE];
extern __shared__ float sharedValues[];
int myOffset = -1;
float *myValue = sharedValues + threadId*(vd+1);
if (!outOfBounds) {
const Dtype *value = values + idx;
MatrixEntry r = matrix[idx*(pd+1)+color];
// convert the matrix entry from a pointer into the entries array to a pointer into the keys/values array
//matrix[idx*(pd+1)+color].index = r.index = table_entries[r.index];
// record the offset into the keys/values array in shared space
myOffset = sharedOffsets[threadId] = r.index*(vd+1);
for (int j = 0; j < vd; j++) {
myValue[j] = (float)value[j*w*h]*r.weight;
}
myValue[vd] = r.weight;
} else {
sharedOffsets[threadId] = -1;
}
__syncthreads();
// am I the first thread in this block to care about this key?
if (outOfBounds) return;
for (int i = 0; i < BLOCK_SIZE; i++) {
if (i < threadId) {
if (myOffset == sharedOffsets[i]) {
// somebody else with higher priority cares about this key
return;
}
} else if (i > threadId) {
if (myOffset == sharedOffsets[i]) {
// someone else with lower priority cares about this key, accumulate it into mine
for (int j = 0; j <= vd; j++) {
sharedValues[threadId*(vd+1) + j] += sharedValues[i*(vd+1) + j];
}
}
}
}
// only the threads with something to write to main memory are still going
float *val = table_values + myOffset;
for (int j = 0; j <= vd; j++) {
atomicAdd(val+j, myValue[j]);
}
}
template<int pd>
__global__ static void blur(int n, float *newValues,
const MatrixEntry *matrix,
const int *table_entries,
const signed short *table_keys,
const int table_capacity,
float *table_values,
int color,
const int vd)
{
const int idx = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x * blockDim.y + threadIdx.x;
if (idx >= n) return;
// Check if I'm valid
if (matrix[idx].index != idx) return;
// find my key and the keys of my neighbours
short myKey[pd+1];
short np[pd+1];
short nm[pd+1];
for (int i = 0; i < pd; i++) {
myKey[i] = table_keys[idx*pd+i];
np[i] = myKey[i]+1;
nm[i] = myKey[i]-1;
}
np[color] -= pd+1;
nm[color] += pd+1;
int offNp = hashTableRetrieve<pd>(np, table_entries, table_keys, table_capacity);
int offNm = hashTableRetrieve<pd>(nm, table_entries, table_keys, table_capacity);
float *valMe = table_values + (vd+1)*idx;
float *valNp = table_values + (vd+1)*offNp;
float *valNm = table_values + (vd+1)*offNm;
float *valOut = newValues + (vd+1)*idx;
if (offNp >= 0 && offNm >= 0) {
for (int i = 0; i <= vd; i++) {
valOut[i] = (valNp[i] + (valMe[i]*2) + valNm[i])/2;
}
} else if (offNp >= 0) {
for (int i = 0; i <= vd; i++) {
valOut[i] = (valNp[i] + (valMe[i]*2))/2;
}
} else if (offNm >= 0) {
for (int i = 0; i <= vd; i++) {
valOut[i] = (valNm[i] + (valMe[i]*2))/2;
}
} else {
for (int i = 0; i <= vd; i++) {
valOut[i] = valMe[i];
}
}
}
template<int pd, typename Dtype>
__global__ static void slice(const int w, const int h, const int vd,
Dtype *values,
const MatrixEntry *matrix,
float *table_values,
bool add) {
//const int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x;
const int x = threadIdx.x + blockIdx.x * blockDim.x;
const int y = threadIdx.y + blockIdx.y * blockDim.y;
const int threadId = threadIdx.y*blockDim.x + threadIdx.x;
const int idx = y*w + x;
const bool outOfBounds = (x >= w) || (y >= h);
if (outOfBounds) return;
extern __shared__ float localValue[];
float *myValue = localValue + threadId*vd;
float myWeight = 0;
for (int i = 0; i < vd; i++) {
myValue[i] = 0;
}
for (int i = 0; i <= pd; i++) {
MatrixEntry r = matrix[idx*(pd+1) + i];
const float *val = table_values + r.index*(vd+1);
for (int j = 0; j < vd; j++) {
myValue[j] += r.weight*val[j];
}
myWeight += r.weight*val[vd];
}
//myWeight = 1.0f/myWeight;
float alpha = 1.0f / (1+powf(2, -pd));
for (int j = 0; j < vd; j++){
if(!add){
values[j*w*h + idx] = 0;
}
values[j*w*h + idx] += myValue[j]*alpha;
}
}
template<int pd>
void gpu_init(const float* features,
HashTable* table,
MatrixEntry* matrix,
const int w, const int h)
{
int num_points = w*h ;
// Scan line order
//unsigned int blocks = (num_points-1)/64 + 1;
//unsigned int blockSize = 64;
dim3 blocks((w-1)/8+1, (h-1)/8+1, 1);
dim3 blockSize(8, 8, 1);
float blurVariance = 0.5 ;
float * scaleFactor;
float* scaleFactorHost = new float[pd];
// Create Scale factor vector and give it to GPU
// num_dimensions is likely to be low so do that
// on the CPU
for (int i = 0; i < pd; i++) {
scaleFactorHost[i] = (pd+1)*sqrtf((1.0/6 + blurVariance)/((i+1)*(i+2)));
}
CUDA_CHECK(cudaMalloc((void**)&scaleFactor, sizeof(float)*pd));
CUDA_CHECK(cudaMemcpy(scaleFactor, scaleFactorHost, sizeof(float)*pd, cudaMemcpyHostToDevice));
createMatrix<pd><<<blocks, blockSize>>>(w, h,
features,
table->table_entries,
table->table_capacity,
table->table_keys,
scaleFactor,
matrix);
CUDA_POST_KERNEL_CHECK;
// fix duplicate hash table entries
int cleanBlockSize = 32;
dim3 cleanBlocks((num_points-1)/cleanBlockSize+1, 2*(pd+1), 1);
cleanHashTable<pd><<<cleanBlocks, cleanBlockSize>>>(2*num_points*(pd+1),
table->table_entries, table->table_capacity, table->table_keys);
CUDA_POST_KERNEL_CHECK;
blocks.y *= pd+1;
resetIndex<pd><<<blocks, blockSize>>>(w, h, matrix, table->table_entries) ;
CUDA_POST_KERNEL_CHECK;
// Clean intermediate variables
delete[] scaleFactorHost;
CUDA_CHECK(cudaFree(scaleFactor));
}
template<int pd, typename Dtype>
void gpu_compute(Dtype* out, const Dtype* in, const HashTable &table,
const MatrixEntry* matrix,
int w, int h, int vd,
bool reverse, bool add){
// Create table_values
int num_points = w*h ;
float *table_values ;
CUDA_CHECK(cudaMalloc((void**)&table_values, sizeof(float)*(vd+1)*num_points*(pd+1))) ;
gpu_set<float>(num_points*(vd+1)*(pd+1), 0, table_values) ;
dim3 blocks((w-1)/8+1, (h-1)/8+1, 1);
dim3 blockSize(8, 8, 1);
// splat splits by color, so extend the y coordinate to our blocks to represent that
blocks.y *= pd+1;
splatCache<pd, Dtype><<<blocks, blockSize, BLOCK_SIZE*(vd+1)*sizeof(float)>>>(w, h, vd,
in,
matrix,
table_values);
CUDA_POST_KERNEL_CHECK;
// blur
int cleanBlockSize = 32;
dim3 cleanBlocks((num_points-1)/cleanBlockSize+1, 2*(pd+1), 1);
float *newValues;
float *oldValues;
size_t size = num_points*(pd+1)*(vd+1)*sizeof(float);
CUDA_CHECK(cudaMalloc((void**)&(newValues), size));
CUDA_CHECK(cudaMalloc((void**)&(oldValues), size));
gpu_set<float>(num_points*(vd+1)*(pd+1), 0, newValues) ;
for (int color = reverse?pd:0; color <= pd && color>=0; reverse?color--:color++) {
blur<pd><<<cleanBlocks, cleanBlockSize>>>(num_points*(pd+1), newValues,
matrix,
table.table_entries,
table.table_keys,
table.table_capacity,
table_values,
color,
vd);
CUDA_POST_KERNEL_CHECK;
// swap pointers does not seem to work...
swapHashTableValues(oldValues, newValues, table_values, size);
}
// slice
blocks.y /= (pd+1);
slice<pd, Dtype><<<blocks, blockSize, sizeof(float)*BLOCK_SIZE*vd>>>(w, h, vd, out, matrix, table_values, add);
CUDA_POST_KERNEL_CHECK;
// Free memory
CUDA_CHECK(cudaFree(table_values)) ;
CUDA_CHECK(cudaFree(newValues)) ;
CUDA_CHECK(cudaFree(oldValues)) ;
}
void ModifiedPermutohedral::init_gpu(const float* features, int num_dimensions, int w, int h) {
//Initialize Hash table
if(!is_init){
table.createHashTable(w*h*(num_dimensions+1), num_dimensions);
CUDA_CHECK(cudaMalloc((void **)&matrix, sizeof(MatrixEntry)*(w*h*(num_dimensions+1))));
} else {
table.resetHashTable(w_*h_*(d_+1), d_);
}
w_ = w ;
h_ = h ;
d_ = num_dimensions ;
N_ = w*h ;
switch(num_dimensions){
case 2:
gpu_init<2>(features, &table, matrix, w_, h_);
break;
case 5:
gpu_init<5>(features, &table, matrix, w_, h_);
break;
default:
std::cout << "num_dimensions should be 2 or 5";
}
is_init = true;
}
void ModifiedPermutohedral::compute_gpu(float* out, const float* in, int value_size, bool reverse, bool add) const {
// Losing time by dynamically allocating memory but more general function
if(!is_init)
std::cout << "Initialize lattice before doing any computing";
switch(d_){
case 2:
gpu_compute<2, float>(out, in, table, matrix, w_, h_, value_size, reverse, add);
break;
case 5:
gpu_compute<5, float>(out, in, table, matrix, w_, h_, value_size, reverse, add);
break;
default:
std::cout << "num_dimensions should be 2 or 5";
}
}
|
a14486c7e5c4ab1bf3225ecd965e2f39e5191afe.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../shared/globals.hpp"
#include "../shared/timer.hpp"
#include "../shared/argument_parsing.cuh"
#include "../shared/graph.cuh"
#include "../shared/subgraph.cuh"
#include "../shared/partitioner.cuh"
#include "../shared/subgraph_generator.cuh"
#include "../shared/gpu_error_check.cuh"
#include "../shared/gpu_kernels.cuh"
#include "../shared/subway_utilities.hpp"
int main(int argc, char** argv)
{
hipFree(0);
ArgumentParser arguments(argc, argv, true, false);
Timer timer;
timer.Start();
Graph<OutEdgeWeighted> graph(arguments.input, true);
graph.ReadGraph();
float readtime = timer.Finish();
cout << "Graph Reading finished in " << readtime/1000 << " (s).\n";
//for(unsigned int i=0; i<100; i++)
// cout << graph.edgeList[i].end << " " << graph.edgeList[i].w8;
for(unsigned int i=0; i<graph.num_nodes; i++)
{
graph.value[i] = 0;
graph.label1[i] = true;
graph.label2[i] = false;
}
graph.value[arguments.sourceNode] = DIST_INFINITY;
//graph.label[arguments.sourceNode] = true;
gpuErrorcheck(hipMemcpy(graph.d_outDegree, graph.outDegree, graph.num_nodes * sizeof(unsigned int), hipMemcpyHostToDevice));
gpuErrorcheck(hipMemcpy(graph.d_value, graph.value, graph.num_nodes * sizeof(unsigned int), hipMemcpyHostToDevice));
gpuErrorcheck(hipMemcpy(graph.d_label1, graph.label1, graph.num_nodes * sizeof(bool), hipMemcpyHostToDevice));
gpuErrorcheck(hipMemcpy(graph.d_label2, graph.label2, graph.num_nodes * sizeof(bool), hipMemcpyHostToDevice));
Subgraph<OutEdgeWeighted> subgraph(graph.num_nodes, graph.num_edges);
SubgraphGenerator<OutEdgeWeighted> subgen(graph);
subgen.generate(graph, subgraph);
for(unsigned int i=0; i<graph.num_nodes; i++)
{
graph.label1[i] = false;
}
graph.label1[arguments.sourceNode] = true;
gpuErrorcheck(hipMemcpy(graph.d_label1, graph.label1, graph.num_nodes * sizeof(bool), hipMemcpyHostToDevice));
Partitioner<OutEdgeWeighted> partitioner;
timer.Start();
uint gItr = 0;
bool finished;
bool *d_finished;
gpuErrorcheck(hipMalloc(&d_finished, sizeof(bool)));
while (subgraph.numActiveNodes>0)
{
gItr++;
partitioner.partition(subgraph, subgraph.numActiveNodes);
// a super iteration
for(int i=0; i<partitioner.numPartitions; i++)
{
hipDeviceSynchronize();
gpuErrorcheck(hipMemcpy(subgraph.d_activeEdgeList, subgraph.activeEdgeList + partitioner.fromEdge[i], (partitioner.partitionEdgeSize[i]) * sizeof(OutEdgeWeighted), hipMemcpyHostToDevice));
hipDeviceSynchronize();
//moveUpLabels<<< partitioner.partitionNodeSize[i]/512 + 1 , 512 >>>(subgraph.d_activeNodes, graph.d_label, partitioner.partitionNodeSize[i], partitioner.fromNode[i]);
hipLaunchKernelGGL(( mixLabels), dim3(partitioner.partitionNodeSize[i]/512 + 1) , dim3(512), 0, 0, subgraph.d_activeNodes, graph.d_label1, graph.d_label2, partitioner.partitionNodeSize[i], partitioner.fromNode[i]);
uint itr = 0;
do
{
cout << "\t\tIteration " << ++itr << endl;
finished = true;
gpuErrorcheck(hipMemcpy(d_finished, &finished, sizeof(bool), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( sswp_async), dim3(partitioner.partitionNodeSize[i]/512 + 1) , dim3(512) , 0, 0, partitioner.partitionNodeSize[i],
partitioner.fromNode[i],
partitioner.fromEdge[i],
subgraph.d_activeNodes,
subgraph.d_activeNodesPointer,
subgraph.d_activeEdgeList,
graph.d_outDegree,
graph.d_value,
d_finished,
(itr%2==1) ? graph.d_label1 : graph.d_label2,
(itr%2==1) ? graph.d_label2 : graph.d_label1);
hipDeviceSynchronize();
gpuErrorcheck( hipPeekAtLastError() );
gpuErrorcheck(hipMemcpy(&finished, d_finished, sizeof(bool), hipMemcpyDeviceToHost));
}while(!(finished));
cout << itr << ((itr>1) ? " Inner Iterations" : " Inner Iteration") << " in Global Iteration " << gItr << ", Partition " << i << endl;
}
subgen.generate(graph, subgraph);
}
float runtime = timer.Finish();
cout << "Processing finished in " << runtime/1000 << " (s).\n";
gpuErrorcheck(hipMemcpy(graph.value, graph.d_value, graph.num_nodes*sizeof(uint), hipMemcpyDeviceToHost));
utilities::PrintResults(graph.value, min(30, graph.num_nodes));
if(arguments.hasOutput)
utilities::SaveResults(arguments.output, graph.value, graph.num_nodes);
}
|
a14486c7e5c4ab1bf3225ecd965e2f39e5191afe.cu
|
#include "../shared/globals.hpp"
#include "../shared/timer.hpp"
#include "../shared/argument_parsing.cuh"
#include "../shared/graph.cuh"
#include "../shared/subgraph.cuh"
#include "../shared/partitioner.cuh"
#include "../shared/subgraph_generator.cuh"
#include "../shared/gpu_error_check.cuh"
#include "../shared/gpu_kernels.cuh"
#include "../shared/subway_utilities.hpp"
int main(int argc, char** argv)
{
cudaFree(0);
ArgumentParser arguments(argc, argv, true, false);
Timer timer;
timer.Start();
Graph<OutEdgeWeighted> graph(arguments.input, true);
graph.ReadGraph();
float readtime = timer.Finish();
cout << "Graph Reading finished in " << readtime/1000 << " (s).\n";
//for(unsigned int i=0; i<100; i++)
// cout << graph.edgeList[i].end << " " << graph.edgeList[i].w8;
for(unsigned int i=0; i<graph.num_nodes; i++)
{
graph.value[i] = 0;
graph.label1[i] = true;
graph.label2[i] = false;
}
graph.value[arguments.sourceNode] = DIST_INFINITY;
//graph.label[arguments.sourceNode] = true;
gpuErrorcheck(cudaMemcpy(graph.d_outDegree, graph.outDegree, graph.num_nodes * sizeof(unsigned int), cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemcpy(graph.d_value, graph.value, graph.num_nodes * sizeof(unsigned int), cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemcpy(graph.d_label1, graph.label1, graph.num_nodes * sizeof(bool), cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemcpy(graph.d_label2, graph.label2, graph.num_nodes * sizeof(bool), cudaMemcpyHostToDevice));
Subgraph<OutEdgeWeighted> subgraph(graph.num_nodes, graph.num_edges);
SubgraphGenerator<OutEdgeWeighted> subgen(graph);
subgen.generate(graph, subgraph);
for(unsigned int i=0; i<graph.num_nodes; i++)
{
graph.label1[i] = false;
}
graph.label1[arguments.sourceNode] = true;
gpuErrorcheck(cudaMemcpy(graph.d_label1, graph.label1, graph.num_nodes * sizeof(bool), cudaMemcpyHostToDevice));
Partitioner<OutEdgeWeighted> partitioner;
timer.Start();
uint gItr = 0;
bool finished;
bool *d_finished;
gpuErrorcheck(cudaMalloc(&d_finished, sizeof(bool)));
while (subgraph.numActiveNodes>0)
{
gItr++;
partitioner.partition(subgraph, subgraph.numActiveNodes);
// a super iteration
for(int i=0; i<partitioner.numPartitions; i++)
{
cudaDeviceSynchronize();
gpuErrorcheck(cudaMemcpy(subgraph.d_activeEdgeList, subgraph.activeEdgeList + partitioner.fromEdge[i], (partitioner.partitionEdgeSize[i]) * sizeof(OutEdgeWeighted), cudaMemcpyHostToDevice));
cudaDeviceSynchronize();
//moveUpLabels<<< partitioner.partitionNodeSize[i]/512 + 1 , 512 >>>(subgraph.d_activeNodes, graph.d_label, partitioner.partitionNodeSize[i], partitioner.fromNode[i]);
mixLabels<<<partitioner.partitionNodeSize[i]/512 + 1 , 512>>>(subgraph.d_activeNodes, graph.d_label1, graph.d_label2, partitioner.partitionNodeSize[i], partitioner.fromNode[i]);
uint itr = 0;
do
{
cout << "\t\tIteration " << ++itr << endl;
finished = true;
gpuErrorcheck(cudaMemcpy(d_finished, &finished, sizeof(bool), cudaMemcpyHostToDevice));
sswp_async<<< partitioner.partitionNodeSize[i]/512 + 1 , 512 >>>(partitioner.partitionNodeSize[i],
partitioner.fromNode[i],
partitioner.fromEdge[i],
subgraph.d_activeNodes,
subgraph.d_activeNodesPointer,
subgraph.d_activeEdgeList,
graph.d_outDegree,
graph.d_value,
d_finished,
(itr%2==1) ? graph.d_label1 : graph.d_label2,
(itr%2==1) ? graph.d_label2 : graph.d_label1);
cudaDeviceSynchronize();
gpuErrorcheck( cudaPeekAtLastError() );
gpuErrorcheck(cudaMemcpy(&finished, d_finished, sizeof(bool), cudaMemcpyDeviceToHost));
}while(!(finished));
cout << itr << ((itr>1) ? " Inner Iterations" : " Inner Iteration") << " in Global Iteration " << gItr << ", Partition " << i << endl;
}
subgen.generate(graph, subgraph);
}
float runtime = timer.Finish();
cout << "Processing finished in " << runtime/1000 << " (s).\n";
gpuErrorcheck(cudaMemcpy(graph.value, graph.d_value, graph.num_nodes*sizeof(uint), cudaMemcpyDeviceToHost));
utilities::PrintResults(graph.value, min(30, graph.num_nodes));
if(arguments.hasOutput)
utilities::SaveResults(arguments.output, graph.value, graph.num_nodes);
}
|
35e69b9f2f9f8b2cc74e349c27bbd91f707d7b99.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void find_all_sums_hub_kernel(int* hub, int nhub, float *node_weight, int *neighbor, int *neighbor_start, float *neighbor_accum_weight_result, float *sum_weight_result){
int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < nhub) {
int nid = hub[x];
float sum = 0.0;
for (int eid = neighbor_start[nid]; eid < neighbor_start[nid+1]; eid++) { // this eid is just index of the neighbor in the neighbor array
sum += node_weight[neighbor[eid]];
neighbor_accum_weight_result[eid] = sum;
}
sum_weight_result[nid] = sum;
}
}
|
35e69b9f2f9f8b2cc74e349c27bbd91f707d7b99.cu
|
#include "includes.h"
__global__ void find_all_sums_hub_kernel(int* hub, int nhub, float *node_weight, int *neighbor, int *neighbor_start, float *neighbor_accum_weight_result, float *sum_weight_result){
int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < nhub) {
int nid = hub[x];
float sum = 0.0;
for (int eid = neighbor_start[nid]; eid < neighbor_start[nid+1]; eid++) { // this eid is just index of the neighbor in the neighbor array
sum += node_weight[neighbor[eid]];
neighbor_accum_weight_result[eid] = sum;
}
sum_weight_result[nid] = sum;
}
}
|
69a49949a4d2267f33cc29481dd1995404dbca50.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zlarfbx.cu normal z -> c, Tue Sep 2 12:38:15 2014
*/
#include "common_magma.h"
#include "commonblas_c.h"
#include "magma_templates.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
//==============================================================================
extern "C"
__global__ void
magma_cgemv_kernel1(int m, const magmaFloatComplex * __restrict__ V, int ldv,
const magmaFloatComplex * __restrict__ c,
magmaFloatComplex *dwork)
{
const int i = threadIdx.x;
const magmaFloatComplex *dV = V + (blockIdx.x) * ldv;
__shared__ magmaFloatComplex sum[ BLOCK_SIZE ];
magmaFloatComplex lsum;
/* lsum := v' * C */
lsum = MAGMA_C_ZERO;
for( int j = i; j < m; j += BLOCK_SIZE )
lsum += MAGMA_C_MUL( MAGMA_C_CNJG( dV[j] ), c[j] );
sum[i] = lsum;
magma_sum_reduce< BLOCK_SIZE >( i, sum );
__syncthreads();
if (i==0)
dwork [blockIdx.x] = sum[0];
}
//==============================================================================
/* -----------------------------------------------------------------------------
Call
magma_cgemv_kernel3<<< n, BLOCK_SIZE>>>(m, V, ldv, c, dwork, tau)
to compute
CGEMV( "Conjugate transpose", m, n, -tau[0], V, ldv, c, 1, zero, dwork, 1)
and to set c[0] to 1.
i.e.,
work = -tau[0] V' c
----------------------------------------------------------------------------- */
extern "C"
__global__ void
magma_cgemv_kernel3(int m, const magmaFloatComplex * __restrict__ V, int ldv, magmaFloatComplex *c,
magmaFloatComplex *dwork, magmaFloatComplex *tau)
{
const int i = threadIdx.x;
const magmaFloatComplex *dV = V + (blockIdx.x) * ldv;
__shared__ magmaFloatComplex sum[ BLOCK_SIZE ];
magmaFloatComplex lsum;
if (i==0)
c[0] = MAGMA_C_ONE;
/* lsum := v' * C */
lsum = MAGMA_C_ZERO;
for( int j = i; j < m; j += BLOCK_SIZE )
lsum += MAGMA_C_MUL( MAGMA_C_CNJG( dV[j] ), c[j] );
sum[i] = lsum;
magma_sum_reduce< BLOCK_SIZE >( i, sum );
__syncthreads();
if (i==0)
dwork [blockIdx.x] = -tau[0]*sum[0];
}
//==============================================================================
extern "C"
__global__ void
magma_cgemv_kernel2(int m, int n, const magmaFloatComplex * __restrict__ V, int ldv,
const magmaFloatComplex * __restrict__ x, magmaFloatComplex *c)
{
const int i = threadIdx.x;
const int j = i + BLOCK_SIZE * blockIdx.x;
magmaFloatComplex lsum;
V += j;
lsum = MAGMA_C_ZERO;
if (j < m){
for(int k=0; k<n; k++)
lsum += MAGMA_C_MUL( V[k*ldv], x[k]);
c[j] -= lsum;
}
}
//==============================================================================
/*
Apply a complex block reflector H to a complex vector C from the left
(i.e., C = H C). H is represented in the form
H = I - V T V'
where T is the complex k-by-k upper triangular matrix in the
representation of the block reflector, and V is a complex block of
k elementary reflectors.
*/
extern "C" void
magma_clarfbx_gpu(magma_int_t m, magma_int_t k, magmaFloatComplex *V, magma_int_t ldv,
magmaFloatComplex *T, magma_int_t ldt, magmaFloatComplex *c,
magmaFloatComplex *dwork)
{
/* dwork = V' c */
hipLaunchKernelGGL(( magma_cgemv_kernel1), dim3(k), dim3(BLOCK_SIZE), 0, magma_stream , m, V, ldv, c, dwork);
/* dwork = T' dwork */
hipLaunchKernelGGL(( magma_ctrmv_tkernel), dim3(k), dim3(k), 0, magma_stream , T, ldt, dwork, dwork+k);
/* c = c - V dwork */
dim3 blocks3( (m + BLOCK_SIZE-1) / BLOCK_SIZE );
dim3 threads3( BLOCK_SIZE );
hipLaunchKernelGGL(( magma_cgemv_kernel2), dim3(blocks3), dim3(threads3), 0, magma_stream , m, k, V, ldv, dwork+k, c);
}
//==============================================================================
|
69a49949a4d2267f33cc29481dd1995404dbca50.cu
|
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zlarfbx.cu normal z -> c, Tue Sep 2 12:38:15 2014
*/
#include "common_magma.h"
#include "commonblas_c.h"
#include "magma_templates.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
//==============================================================================
extern "C"
__global__ void
magma_cgemv_kernel1(int m, const magmaFloatComplex * __restrict__ V, int ldv,
const magmaFloatComplex * __restrict__ c,
magmaFloatComplex *dwork)
{
const int i = threadIdx.x;
const magmaFloatComplex *dV = V + (blockIdx.x) * ldv;
__shared__ magmaFloatComplex sum[ BLOCK_SIZE ];
magmaFloatComplex lsum;
/* lsum := v' * C */
lsum = MAGMA_C_ZERO;
for( int j = i; j < m; j += BLOCK_SIZE )
lsum += MAGMA_C_MUL( MAGMA_C_CNJG( dV[j] ), c[j] );
sum[i] = lsum;
magma_sum_reduce< BLOCK_SIZE >( i, sum );
__syncthreads();
if (i==0)
dwork [blockIdx.x] = sum[0];
}
//==============================================================================
/* -----------------------------------------------------------------------------
Call
magma_cgemv_kernel3<<< n, BLOCK_SIZE>>>(m, V, ldv, c, dwork, tau)
to compute
CGEMV( "Conjugate transpose", m, n, -tau[0], V, ldv, c, 1, zero, dwork, 1)
and to set c[0] to 1.
i.e.,
work = -tau[0] V' c
----------------------------------------------------------------------------- */
extern "C"
__global__ void
magma_cgemv_kernel3(int m, const magmaFloatComplex * __restrict__ V, int ldv, magmaFloatComplex *c,
magmaFloatComplex *dwork, magmaFloatComplex *tau)
{
const int i = threadIdx.x;
const magmaFloatComplex *dV = V + (blockIdx.x) * ldv;
__shared__ magmaFloatComplex sum[ BLOCK_SIZE ];
magmaFloatComplex lsum;
if (i==0)
c[0] = MAGMA_C_ONE;
/* lsum := v' * C */
lsum = MAGMA_C_ZERO;
for( int j = i; j < m; j += BLOCK_SIZE )
lsum += MAGMA_C_MUL( MAGMA_C_CNJG( dV[j] ), c[j] );
sum[i] = lsum;
magma_sum_reduce< BLOCK_SIZE >( i, sum );
__syncthreads();
if (i==0)
dwork [blockIdx.x] = -tau[0]*sum[0];
}
//==============================================================================
extern "C"
__global__ void
magma_cgemv_kernel2(int m, int n, const magmaFloatComplex * __restrict__ V, int ldv,
const magmaFloatComplex * __restrict__ x, magmaFloatComplex *c)
{
const int i = threadIdx.x;
const int j = i + BLOCK_SIZE * blockIdx.x;
magmaFloatComplex lsum;
V += j;
lsum = MAGMA_C_ZERO;
if (j < m){
for(int k=0; k<n; k++)
lsum += MAGMA_C_MUL( V[k*ldv], x[k]);
c[j] -= lsum;
}
}
//==============================================================================
/*
Apply a complex block reflector H to a complex vector C from the left
(i.e., C = H C). H is represented in the form
H = I - V T V'
where T is the complex k-by-k upper triangular matrix in the
representation of the block reflector, and V is a complex block of
k elementary reflectors.
*/
extern "C" void
magma_clarfbx_gpu(magma_int_t m, magma_int_t k, magmaFloatComplex *V, magma_int_t ldv,
magmaFloatComplex *T, magma_int_t ldt, magmaFloatComplex *c,
magmaFloatComplex *dwork)
{
/* dwork = V' c */
magma_cgemv_kernel1<<< k, BLOCK_SIZE, 0, magma_stream >>>(m, V, ldv, c, dwork);
/* dwork = T' dwork */
magma_ctrmv_tkernel<<< k, k, 0, magma_stream >>>( T, ldt, dwork, dwork+k);
/* c = c - V dwork */
dim3 blocks3( (m + BLOCK_SIZE-1) / BLOCK_SIZE );
dim3 threads3( BLOCK_SIZE );
magma_cgemv_kernel2<<< blocks3, threads3, 0, magma_stream >>>( m, k, V, ldv, dwork+k, c);
}
//==============================================================================
|
e348fc133666aff0653de714a4c238c62e3df970.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "headerfile.h"
#include "getvariables.h"
#include "initialise_and_addnoise.h"
#include "cudafiles_1.h"
#include "cudafiles_2.h"
#include "graph_terms.h"
// #include "diffusion.cuh"
int main(){
//set the initial latice spacing.
latice_spacing_z = 1.0;
previous_latice_spacing_z = latice_spacing_z;
//collect variables
getvariables();
//allocate memory on the machine
phi_A = (double*) malloc(volume*sizeof(double));
phi_B = (double*) malloc(volume*sizeof(double));
phi_C = (double*) malloc(volume*sizeof(double));
delta_phi_A_delta_tau = (double*) malloc(volume*sizeof(double));
delta_phi_B_delta_tau = (double*) malloc(volume*sizeof(double));
delta_phi_C_delta_tau = (double*) malloc(volume*sizeof(double));
lattice_height = (double*) malloc(volume*sizeof(double));
//create grid
initialise();
// gives percentage of a or b polymer at each grid point
addnoise();
//initialise_solvent();
//this function adds the solvent to the system
//start calculation of system
phi_diffusion_calc();
//confirm completion
cout << " " << endl;
cout << "Finished. Press enter to close window" << endl;
// pause
cin.get();
//end
}
void phi_diffusion_calc()
{
hipSetDevice(0);
// allocate memory on the device
hipMalloc((void **) &device_phi_A, volume*sizeof(double));
hipMalloc((void **) &device_phi_B, volume*sizeof(double));
hipMalloc((void **) &device_mu1_A, volume*sizeof(double));
hipMalloc((void **) &device_mu2_A, volume*sizeof(double));
hipMalloc((void **) &device_mu1_B, volume*sizeof(double));
hipMalloc((void **) &device_mu2_B, volume*sizeof(double));
hipMalloc((void **) &device_mu1_A_grad_lambda_x, volume*sizeof(double));
hipMalloc((void **) &device_mu2_A_grad_lambda_x, volume*sizeof(double));
hipMalloc((void **) &device_mu1_B_grad_lambda_x, volume*sizeof(double));
hipMalloc((void **) &device_mu2_B_grad_lambda_x, volume*sizeof(double));
hipMalloc((void **) &device_mu1_A_grad_lambda_y, volume*sizeof(double));
hipMalloc((void **) &device_mu2_A_grad_lambda_y, volume*sizeof(double));
hipMalloc((void **) &device_mu1_B_grad_lambda_y, volume*sizeof(double));
hipMalloc((void **) &device_mu2_B_grad_lambda_y, volume*sizeof(double));
hipMalloc((void **) &device_mu1_A_grad_lambda_z, volume*sizeof(double));
hipMalloc((void **) &device_mu2_A_grad_lambda_z, volume*sizeof(double));
hipMalloc((void **) &device_mu1_B_grad_lambda_z, volume*sizeof(double));
hipMalloc((void **) &device_mu2_B_grad_lambda_z, volume*sizeof(double));
hipMalloc((void **) &device_delta_phi_A_delta_tau, volume*sizeof(double));
hipMalloc((void **) &device_delta_phi_B_delta_tau, volume*sizeof(double));
hipMalloc((void **) &device_deltas, volume*sizeof(double));
hipMalloc((void **) &device_lattice_height, volume*sizeof(double));
// copy phi matrice from the host to the device
hipMemcpy(device_phi_A, phi_A, volume*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(device_phi_B, phi_B, volume*sizeof(double), hipMemcpyHostToDevice);
// defines the number of grids and threads to be used in the parallel computation
// both grids and threads can have 3 dimensions, but number of threads is limited to 512
// so best to use two block dimensions of size length each and one thread dimension of size length
dim3 phi_diffusion_grid(phi_diffusion_num_blocks_x, phi_diffusion_num_blocks_y, 1);
dim3 phi_diffusion_threads(phi_diffusion_num_threads_x, 1, 1);
dim3 phi_surface_grid(phi_diffusion_num_blocks_x, 1, 1);
dim3 phi_surface_threads(phi_diffusion_num_threads_x, 1, 1);
dim3 two_d_threads(phi_diffusion_num_blocks_x, 1, 1);
dim3 two_d_blocks(phi_diffusion_num_blocks_y, 1, 1);
//load variables that will run time estimation can be made
time_t start,end;
double dif;
double timesofar = 0;
double runtime;
double percentcomplete;
//apply solvent to entire system
hipLaunchKernelGGL(( initialise_solvent) , dim3(phi_diffusion_grid), dim3(phi_diffusion_threads), 0 , 0, device_phi_A, device_phi_B, solvent);
//give each point a delta_z value depending on its solvent concentration
hipLaunchKernelGGL(( make_deltas) , dim3(phi_diffusion_grid), dim3(phi_diffusion_threads), 0 , 0, delta_x, device_phi_A, device_phi_B, device_deltas, solvent);
//find the height of each point using the delta_z values
//WARNING!!! THIS BIT OF CODE IS INEFFICIENT
hipLaunchKernelGGL(( find_z_height) , dim3(phi_diffusion_grid), dim3(phi_diffusion_threads), 0 , 0, device_deltas, device_lattice_height);
//copy array from GPU to CPU
hipMemcpy(phi_A, device_phi_A, volume*sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(phi_B, device_phi_B, volume*sizeof(double), hipMemcpyDeviceToHost);
//copy lattice heights from GPU to CPU
hipMemcpy(lattice_height, device_lattice_height, volume*sizeof(double), hipMemcpyDeviceToHost);
//store the initital data to files and subfolders
store_data();
new_store_data();
check_data();
//store_minimum_data();
//begin the main loop
for (counter = counter_start+1; counter < countmax; counter++)
{
//record time so time estimation can be made
time (&start);
//begin inner loop that iterates the system between each data output
for (int loop_counter = 0; loop_counter < num_saves; loop_counter++)
{
//remove a small amount of solvent from the top layer
hipLaunchKernelGGL(( remove_top_solvent) , dim3(phi_diffusion_grid), dim3(phi_diffusion_threads), 0 , 0, device_phi_A, device_phi_B, evaporation);
//give each point a delta_z value depending on its solvent concentration
hipLaunchKernelGGL(( make_deltas) , dim3(phi_diffusion_grid), dim3(phi_diffusion_threads), 0 , 0, delta_x, device_phi_A, device_phi_B, device_deltas, solvent);
//employ surface conditions on the system
hipLaunchKernelGGL(( phi_surf) , dim3(phi_surface_grid), dim3(phi_surface_threads), 0 , 0, device_phi_A, device_phi_B, device_deltas);
//calcualte the mu values
hipLaunchKernelGGL(( mu_calc) , dim3(phi_diffusion_grid), dim3(phi_diffusion_threads), 0 , 0,
device_phi_A,
device_phi_B,
device_mu1_A,
device_mu2_A,
device_mu1_B,
device_mu2_B,
delta_x,
delta_y,
kappa,
chi_scale,
pbc_x,
pbc_y,
pbc_z,
deg_polyA,
deg_polyB,
deg_polyC,
kappa,
kai_AB,
kai_AC,
kai_BC,
device_deltas
);
//employ surface conditions on the mu array to match the system
hipLaunchKernelGGL(( mu_surf) , dim3(phi_surface_grid), dim3(phi_surface_threads), 0 , 0,
device_phi_A,
device_phi_B,
device_mu1_A,
device_mu2_A,
device_mu1_B,
device_mu2_B,
g_upper,
h_upper,
g_lower,
h_lower
);
//begin the diffusion calculation
hipLaunchKernelGGL(( new_phi_diffusion_1) , dim3(phi_diffusion_grid), dim3(phi_diffusion_threads), 0 , 0,
device_phi_A,
device_phi_B,
device_mu1_A,
device_mu2_A,
device_mu1_B,
device_mu2_B,
device_mu1_A_grad_lambda_x,
device_mu2_A_grad_lambda_x,
device_mu1_B_grad_lambda_x,
device_mu2_B_grad_lambda_x,
device_mu1_A_grad_lambda_y,
device_mu2_A_grad_lambda_y,
device_mu1_B_grad_lambda_y,
device_mu2_B_grad_lambda_y,
device_mu1_A_grad_lambda_z,
device_mu2_A_grad_lambda_z,
device_mu1_B_grad_lambda_z,
device_mu2_B_grad_lambda_z,
delta_x,
delta_y,
delta_t,
mobility,
pbc_x,
pbc_y,
pbc_z,
kappa,
device_deltas
);
//finish the diffusion calculation
hipLaunchKernelGGL(( new_phi_diffusion_2) , dim3(phi_diffusion_grid), dim3(phi_diffusion_threads), 0 , 0,
device_phi_A,
device_phi_B,
device_mu1_A_grad_lambda_x,
device_mu2_A_grad_lambda_x,
device_mu1_B_grad_lambda_x,
device_mu2_B_grad_lambda_x,
device_mu1_A_grad_lambda_y,
device_mu2_A_grad_lambda_y,
device_mu1_B_grad_lambda_y,
device_mu2_B_grad_lambda_y,
device_mu1_A_grad_lambda_z,
device_mu2_A_grad_lambda_z,
device_mu1_B_grad_lambda_z,
device_mu2_B_grad_lambda_z,
delta_x,
delta_y,
delta_t,
mobility,
pbc_x,
pbc_y,
pbc_z,
kappa,
device_delta_phi_A_delta_tau,
device_delta_phi_B_delta_tau,
device_deltas
);
//find the height of each lattice point
hipLaunchKernelGGL(( find_z_height) , dim3(phi_diffusion_grid), dim3(phi_diffusion_threads), 0 , 0, device_deltas, device_lattice_height);
//put the new array into the interpolated array
}
// copy the phi matrix values back from the device to the host
hipMemcpy(phi_A, device_phi_A, volume*sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(phi_B, device_phi_B, volume*sizeof(double), hipMemcpyDeviceToHost);
// copy lattice height values back from the device to the host
hipMemcpy(lattice_height, device_lattice_height, volume*sizeof(double), hipMemcpyDeviceToHost);
//store the initital data to files and subfolders
store_data();
new_store_data();
check_data();
//store_minimum_data();
/*
//this code outputs the can between each output values and is useful for debugging.
hipMemcpy(delta_phi_A_delta_tau, device_delta_phi_A_delta_tau, volume*sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(delta_phi_B_delta_tau, device_delta_phi_B_delta_tau, volume*sizeof(double), hipMemcpyDeviceToHost);
store_deltas();
*/
//work out time estimation
time (&end);
dif = difftime (end,start) / 60;
runtime = dif*(countmax-1);
timesofar += dif;
percentcomplete = 100 * timesofar/runtime;
//print time estimation to screen
cout << "loop " << counter << " of " << countmax-1 << " took " << dif <<" minutes." << endl;
cout << "times run so far is " << timesofar << " minutes of a estimated time of " << runtime << " minutes." << endl;
cout << percentcomplete << "% complete..." << endl;
//end the outer loop
}
//free the memory on the GPU
hipFree(device_phi_A);
hipFree(device_phi_B);
hipFree(device_mu1_A);
hipFree(device_mu2_A);
hipFree(device_mu1_B);
hipFree(device_mu2_B);
hipFree(device_mu1_A_grad_lambda_x);
hipFree(device_mu2_A_grad_lambda_x);
hipFree(device_mu1_B_grad_lambda_x);
hipFree(device_mu2_B_grad_lambda_x);
hipFree(device_mu1_A_grad_lambda_y);
hipFree(device_mu2_A_grad_lambda_y);
hipFree(device_mu1_B_grad_lambda_y);
hipFree(device_mu2_B_grad_lambda_y);
hipFree(device_mu1_A_grad_lambda_z);
hipFree(device_mu2_A_grad_lambda_z);
hipFree(device_mu1_B_grad_lambda_z);
hipFree(device_mu2_B_grad_lambda_z);
hipFree(device_deltas);
//free the allocted memory on the host
free(phi_A);
free(phi_B);
free(phi_C);
free(array_delta_z);
free(lattice_height);
//thats the end
}
|
e348fc133666aff0653de714a4c238c62e3df970.cu
|
#include "headerfile.h"
#include "getvariables.h"
#include "initialise_and_addnoise.h"
#include "cudafiles_1.h"
#include "cudafiles_2.h"
#include "graph_terms.h"
// #include "diffusion.cuh"
int main(){
//set the initial latice spacing.
latice_spacing_z = 1.0;
previous_latice_spacing_z = latice_spacing_z;
//collect variables
getvariables();
//allocate memory on the machine
phi_A = (double*) malloc(volume*sizeof(double));
phi_B = (double*) malloc(volume*sizeof(double));
phi_C = (double*) malloc(volume*sizeof(double));
delta_phi_A_delta_tau = (double*) malloc(volume*sizeof(double));
delta_phi_B_delta_tau = (double*) malloc(volume*sizeof(double));
delta_phi_C_delta_tau = (double*) malloc(volume*sizeof(double));
lattice_height = (double*) malloc(volume*sizeof(double));
//create grid
initialise();
// gives percentage of a or b polymer at each grid point
addnoise();
//initialise_solvent();
//this function adds the solvent to the system
//start calculation of system
phi_diffusion_calc();
//confirm completion
cout << " " << endl;
cout << "Finished. Press enter to close window" << endl;
// pause
cin.get();
//end
}
void phi_diffusion_calc()
{
cudaSetDevice(0);
// allocate memory on the device
cudaMalloc((void **) &device_phi_A, volume*sizeof(double));
cudaMalloc((void **) &device_phi_B, volume*sizeof(double));
cudaMalloc((void **) &device_mu1_A, volume*sizeof(double));
cudaMalloc((void **) &device_mu2_A, volume*sizeof(double));
cudaMalloc((void **) &device_mu1_B, volume*sizeof(double));
cudaMalloc((void **) &device_mu2_B, volume*sizeof(double));
cudaMalloc((void **) &device_mu1_A_grad_lambda_x, volume*sizeof(double));
cudaMalloc((void **) &device_mu2_A_grad_lambda_x, volume*sizeof(double));
cudaMalloc((void **) &device_mu1_B_grad_lambda_x, volume*sizeof(double));
cudaMalloc((void **) &device_mu2_B_grad_lambda_x, volume*sizeof(double));
cudaMalloc((void **) &device_mu1_A_grad_lambda_y, volume*sizeof(double));
cudaMalloc((void **) &device_mu2_A_grad_lambda_y, volume*sizeof(double));
cudaMalloc((void **) &device_mu1_B_grad_lambda_y, volume*sizeof(double));
cudaMalloc((void **) &device_mu2_B_grad_lambda_y, volume*sizeof(double));
cudaMalloc((void **) &device_mu1_A_grad_lambda_z, volume*sizeof(double));
cudaMalloc((void **) &device_mu2_A_grad_lambda_z, volume*sizeof(double));
cudaMalloc((void **) &device_mu1_B_grad_lambda_z, volume*sizeof(double));
cudaMalloc((void **) &device_mu2_B_grad_lambda_z, volume*sizeof(double));
cudaMalloc((void **) &device_delta_phi_A_delta_tau, volume*sizeof(double));
cudaMalloc((void **) &device_delta_phi_B_delta_tau, volume*sizeof(double));
cudaMalloc((void **) &device_deltas, volume*sizeof(double));
cudaMalloc((void **) &device_lattice_height, volume*sizeof(double));
// copy phi matrice from the host to the device
cudaMemcpy(device_phi_A, phi_A, volume*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(device_phi_B, phi_B, volume*sizeof(double), cudaMemcpyHostToDevice);
// defines the number of grids and threads to be used in the parallel computation
// both grids and threads can have 3 dimensions, but number of threads is limited to 512
// so best to use two block dimensions of size length each and one thread dimension of size length
dim3 phi_diffusion_grid(phi_diffusion_num_blocks_x, phi_diffusion_num_blocks_y, 1);
dim3 phi_diffusion_threads(phi_diffusion_num_threads_x, 1, 1);
dim3 phi_surface_grid(phi_diffusion_num_blocks_x, 1, 1);
dim3 phi_surface_threads(phi_diffusion_num_threads_x, 1, 1);
dim3 two_d_threads(phi_diffusion_num_blocks_x, 1, 1);
dim3 two_d_blocks(phi_diffusion_num_blocks_y, 1, 1);
//load variables that will run time estimation can be made
time_t start,end;
double dif;
double timesofar = 0;
double runtime;
double percentcomplete;
//apply solvent to entire system
initialise_solvent <<< phi_diffusion_grid, phi_diffusion_threads, 0 >>>(device_phi_A, device_phi_B, solvent);
//give each point a delta_z value depending on its solvent concentration
make_deltas <<< phi_diffusion_grid, phi_diffusion_threads, 0 >>>(delta_x, device_phi_A, device_phi_B, device_deltas, solvent);
//find the height of each point using the delta_z values
//WARNING!!! THIS BIT OF CODE IS INEFFICIENT
find_z_height <<< phi_diffusion_grid, phi_diffusion_threads, 0 >>>(device_deltas, device_lattice_height);
//copy array from GPU to CPU
cudaMemcpy(phi_A, device_phi_A, volume*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(phi_B, device_phi_B, volume*sizeof(double), cudaMemcpyDeviceToHost);
//copy lattice heights from GPU to CPU
cudaMemcpy(lattice_height, device_lattice_height, volume*sizeof(double), cudaMemcpyDeviceToHost);
//store the initital data to files and subfolders
store_data();
new_store_data();
check_data();
//store_minimum_data();
//begin the main loop
for (counter = counter_start+1; counter < countmax; counter++)
{
//record time so time estimation can be made
time (&start);
//begin inner loop that iterates the system between each data output
for (int loop_counter = 0; loop_counter < num_saves; loop_counter++)
{
//remove a small amount of solvent from the top layer
remove_top_solvent <<< phi_diffusion_grid, phi_diffusion_threads, 0 >>>(device_phi_A, device_phi_B, evaporation);
//give each point a delta_z value depending on its solvent concentration
make_deltas <<< phi_diffusion_grid, phi_diffusion_threads, 0 >>>(delta_x, device_phi_A, device_phi_B, device_deltas, solvent);
//employ surface conditions on the system
phi_surf <<< phi_surface_grid, phi_surface_threads, 0 >>>(device_phi_A, device_phi_B, device_deltas);
//calcualte the mu values
mu_calc <<< phi_diffusion_grid, phi_diffusion_threads, 0 >>>
(device_phi_A,
device_phi_B,
device_mu1_A,
device_mu2_A,
device_mu1_B,
device_mu2_B,
delta_x,
delta_y,
kappa,
chi_scale,
pbc_x,
pbc_y,
pbc_z,
deg_polyA,
deg_polyB,
deg_polyC,
kappa,
kai_AB,
kai_AC,
kai_BC,
device_deltas
);
//employ surface conditions on the mu array to match the system
mu_surf <<< phi_surface_grid, phi_surface_threads, 0 >>>
(device_phi_A,
device_phi_B,
device_mu1_A,
device_mu2_A,
device_mu1_B,
device_mu2_B,
g_upper,
h_upper,
g_lower,
h_lower
);
//begin the diffusion calculation
new_phi_diffusion_1 <<< phi_diffusion_grid, phi_diffusion_threads, 0 >>>
(device_phi_A,
device_phi_B,
device_mu1_A,
device_mu2_A,
device_mu1_B,
device_mu2_B,
device_mu1_A_grad_lambda_x,
device_mu2_A_grad_lambda_x,
device_mu1_B_grad_lambda_x,
device_mu2_B_grad_lambda_x,
device_mu1_A_grad_lambda_y,
device_mu2_A_grad_lambda_y,
device_mu1_B_grad_lambda_y,
device_mu2_B_grad_lambda_y,
device_mu1_A_grad_lambda_z,
device_mu2_A_grad_lambda_z,
device_mu1_B_grad_lambda_z,
device_mu2_B_grad_lambda_z,
delta_x,
delta_y,
delta_t,
mobility,
pbc_x,
pbc_y,
pbc_z,
kappa,
device_deltas
);
//finish the diffusion calculation
new_phi_diffusion_2 <<< phi_diffusion_grid, phi_diffusion_threads, 0 >>>
(device_phi_A,
device_phi_B,
device_mu1_A_grad_lambda_x,
device_mu2_A_grad_lambda_x,
device_mu1_B_grad_lambda_x,
device_mu2_B_grad_lambda_x,
device_mu1_A_grad_lambda_y,
device_mu2_A_grad_lambda_y,
device_mu1_B_grad_lambda_y,
device_mu2_B_grad_lambda_y,
device_mu1_A_grad_lambda_z,
device_mu2_A_grad_lambda_z,
device_mu1_B_grad_lambda_z,
device_mu2_B_grad_lambda_z,
delta_x,
delta_y,
delta_t,
mobility,
pbc_x,
pbc_y,
pbc_z,
kappa,
device_delta_phi_A_delta_tau,
device_delta_phi_B_delta_tau,
device_deltas
);
//find the height of each lattice point
find_z_height <<< phi_diffusion_grid, phi_diffusion_threads, 0 >>>(device_deltas, device_lattice_height);
//put the new array into the interpolated array
}
// copy the phi matrix values back from the device to the host
cudaMemcpy(phi_A, device_phi_A, volume*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(phi_B, device_phi_B, volume*sizeof(double), cudaMemcpyDeviceToHost);
// copy lattice height values back from the device to the host
cudaMemcpy(lattice_height, device_lattice_height, volume*sizeof(double), cudaMemcpyDeviceToHost);
//store the initital data to files and subfolders
store_data();
new_store_data();
check_data();
//store_minimum_data();
/*
//this code outputs the can between each output values and is useful for debugging.
cudaMemcpy(delta_phi_A_delta_tau, device_delta_phi_A_delta_tau, volume*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(delta_phi_B_delta_tau, device_delta_phi_B_delta_tau, volume*sizeof(double), cudaMemcpyDeviceToHost);
store_deltas();
*/
//work out time estimation
time (&end);
dif = difftime (end,start) / 60;
runtime = dif*(countmax-1);
timesofar += dif;
percentcomplete = 100 * timesofar/runtime;
//print time estimation to screen
cout << "loop " << counter << " of " << countmax-1 << " took " << dif <<" minutes." << endl;
cout << "times run so far is " << timesofar << " minutes of a estimated time of " << runtime << " minutes." << endl;
cout << percentcomplete << "% complete..." << endl;
//end the outer loop
}
//free the memory on the GPU
cudaFree(device_phi_A);
cudaFree(device_phi_B);
cudaFree(device_mu1_A);
cudaFree(device_mu2_A);
cudaFree(device_mu1_B);
cudaFree(device_mu2_B);
cudaFree(device_mu1_A_grad_lambda_x);
cudaFree(device_mu2_A_grad_lambda_x);
cudaFree(device_mu1_B_grad_lambda_x);
cudaFree(device_mu2_B_grad_lambda_x);
cudaFree(device_mu1_A_grad_lambda_y);
cudaFree(device_mu2_A_grad_lambda_y);
cudaFree(device_mu1_B_grad_lambda_y);
cudaFree(device_mu2_B_grad_lambda_y);
cudaFree(device_mu1_A_grad_lambda_z);
cudaFree(device_mu2_A_grad_lambda_z);
cudaFree(device_mu1_B_grad_lambda_z);
cudaFree(device_mu2_B_grad_lambda_z);
cudaFree(device_deltas);
//free the allocted memory on the host
free(phi_A);
free(phi_B);
free(phi_C);
free(array_delta_z);
free(lattice_height);
//thats the end
}
|
e4fbfe6fccbf00dbec5e136becfaa0a346e69d28.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <ATen/Context.h>
#include <ATen/hip/HIPContext.h>
#include <c10/util/Exception.h>
#include <ATen/hip/Exceptions.h>
#include <ATen/native/sparse/hip/SparseHIPBlas.cuh>
#include <ATen/hip/impl/HIPCachingAllocatorMasqueradingAsCUDA.h>
#include <TH/THGeneral.h>
#include <hipsparse.h>
// LIMITATION (hipsparseSpMM):
// The generic APIs are available on all platforms on CUDA 11.0
// For CUDA 10.1+ it is available for all platforms except Windows.
// Using these APIs in any other systems will result in compile-time or run-time failures.
// Their support will be extended in the next releases.
#if defined(__HIPCC__) && (CUSPARSE_VERSION >= 11000 || (!defined(_MSC_VER) && CUSPARSE_VERSION >= 10301))
#define IS_SPMM_AVAILABLE() 1
#else
#define IS_SPMM_AVAILABLE() 0
#endif
#if IS_SPMM_AVAILABLE()
#include <hip/library_types.h>
#endif
#if !defined(CUSPARSE_VERSION) || (CUSPARSE_VERSION < 10100)
const char* hipsparseGetErrorString(hipsparseStatus_t status) {
switch(status)
{
case HIPSPARSE_STATUS_SUCCESS:
return "success";
case HIPSPARSE_STATUS_NOT_INITIALIZED:
return "library not initialized";
case HIPSPARSE_STATUS_ALLOC_FAILED:
return "resource allocation failed";
case HIPSPARSE_STATUS_INVALID_VALUE:
return "an invalid numeric value was used as an argument";
case HIPSPARSE_STATUS_ARCH_MISMATCH:
return "an absent device architectural feature is required";
case HIPSPARSE_STATUS_MAPPING_ERROR:
return "an access to GPU memory space failed";
case HIPSPARSE_STATUS_EXECUTION_FAILED:
return "the GPU program failed to execute";
case HIPSPARSE_STATUS_INTERNAL_ERROR:
return "an internal operation failed";
case HIPSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "the matrix type is not supported by this function";
case HIPSPARSE_STATUS_ZERO_PIVOT:
return "an entry of the matrix is either structural zero or numerical zero (singular block)";
default:
return "unknown error";
}
}
#endif
namespace at { namespace native { namespace sparse { namespace cuda {
void Xcoo2csr(const int *coorowind, int64_t nnz, int64_t m, int *csrrowptr) {
TORCH_CHECK((m <= INT_MAX) && (nnz <= INT_MAX),
"hipsparseXcoo2csr only supports m, nnz with the bound [val] <= ",
INT_MAX);
int i_nnz = (int)nnz;
int i_m = (int)m;
auto handle = at::cuda::getCurrentCUDASparseHandle();
TORCH_CUDASPARSE_CHECK(hipsparseXcoo2csr(handle, coorowind, i_nnz, i_m, csrrowptr, HIPSPARSE_INDEX_BASE_ZERO));
}
hipsparseOperation_t convertTransToCusparseOperation(char trans) {
if (trans == 't') return HIPSPARSE_OPERATION_TRANSPOSE;
else if (trans == 'n') return HIPSPARSE_OPERATION_NON_TRANSPOSE;
else if (trans == 'c') return HIPSPARSE_OPERATION_CONJUGATE_TRANSPOSE;
else {
AT_ERROR("trans must be one of: t, n, c");
}
}
#if IS_SPMM_AVAILABLE()
template<typename T>
void csrmm2(
char transa, char transb,
int64_t m, int64_t n, int64_t k, int64_t nnz,
T alpha, T *csrvala, int *csrrowptra, int *csrcolinda,
T *b, int64_t ldb, T beta, T *c, int64_t ldc)
{
static_assert(std::is_same<float, T>::value || std::is_same<double, T>::value, "csrmm2 only supports float and double value types");
constexpr auto cusparse_value_type = std::is_same<float, T>::value ? HIP_R_32F : HIP_R_64F;
if (csrvala == nullptr || b == nullptr || c == nullptr) return;
hipsparseOperation_t opa = convertTransToCusparseOperation(transa);
hipsparseOperation_t opb = convertTransToCusparseOperation(transb);
// hipsparseSpMM actually supports int64_t.
// In order to support int64 here, index pointers csrrowptra, csrcolinda have to be passed as int64_t.
TORCH_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (k <= INT_MAX) && (nnz <= INT_MAX) && (ldb <= INT_MAX) && (ldc <= INT_MAX),
"At the moment, hipsparseSpMM only supports m, n, k, nnz, ldb, ldc with the bound [val] <= ", INT_MAX, ".",
"If you need this, please file an issue on GitHub."
);
int64_t ma = m, ka = k;
if (transa != 'n') std::swap(ma, ka);
hipsparseSpMatDescr_t descA;
TORCH_CUDASPARSE_CHECK(hipsparseCreateCsr(
&descA, /* output */
ma, ka, nnz, /* rows, cols, number of non zero elements */
csrrowptra, /* row offsets of the sparse matrix, size = rows +1 */
csrcolinda, /* column indices of the sparse matrix, size = nnz */
csrvala, /* values of the sparse matrix, size = nnz */
HIPSPARSE_INDEX_32I, /* data type of row offsets index */
HIPSPARSE_INDEX_32I, /* data type of col indices */
HIPSPARSE_INDEX_BASE_ZERO, /* base index of row offset and col indes */
cusparse_value_type /* data type of values */
));
int64_t kb = k, nb = n;
if (transb != 'n') std::swap(kb, nb);
hipsparseDnMatDescr_t descB;
TORCH_CUDASPARSE_CHECK(hipsparseCreateDnMat(
&descB, /* output */
kb, nb, ldb, /* rows, cols, leading dimension */
b, /* values */
cusparse_value_type, /* data type of values */
HIPSPARSE_ORDER_COL /* memory layout, ONLY column-major is supported now */
));
hipsparseDnMatDescr_t descC;
TORCH_CUDASPARSE_CHECK(hipsparseCreateDnMat(
&descC, /* output */
m, n, ldc, /* rows, cols, leading dimension */
c, /* values */
cusparse_value_type, /* data type of values */
HIPSPARSE_ORDER_COL /* memory layout, ONLY column-major is supported now */
));
auto handle = at::cuda::getCurrentCUDASparseHandle();
// hipsparseSpMM_bufferSize returns the bufferSize that can be used by hipsparseSpMM
size_t bufferSize;
TORCH_CUDASPARSE_CHECK(hipsparseSpMM_bufferSize(
handle, opa, opb,
&alpha,
descA, descB,
&beta,
descC,
cusparse_value_type, /* data type in which the computation is executed */
HIPSPARSE_CSRMM_ALG1, /* default computing algorithm for CSR sparse matrix format */
&bufferSize /* output */
));
auto& allocator = *c10::hip::HIPCachingAllocatorMasqueradingAsCUDA::get();
auto dataPtr = allocator.allocate(bufferSize);
TORCH_CUDASPARSE_CHECK(hipsparseSpMM(
handle, opa, opb,
&alpha,
descA, descB,
&beta,
descC,
cusparse_value_type, /* data type in which the computation is executed */
HIPSPARSE_CSRMM_ALG1, /* default computing algorithm for CSR sparse matrix format */
dataPtr.get() /* external buffer */
));
TORCH_CUDASPARSE_CHECK(hipsparseDestroySpMat(descA));
TORCH_CUDASPARSE_CHECK(hipsparseDestroyDnMat(descB));
TORCH_CUDASPARSE_CHECK(hipsparseDestroyDnMat(descC));
// TODO: Proper fix is to create real descriptor classes
}
template void csrmm2<float>(
char transa, char transb,
int64_t m, int64_t n, int64_t k, int64_t nnz,
float alpha, float *csrvala, int *csrrowptra, int *csrcolinda,
float *b, int64_t ldb, float beta, float *c, int64_t ldc);
template void csrmm2<double>(
char transa, char transb,
int64_t m, int64_t n, int64_t k, int64_t nnz,
double alpha, double *csrvala, int *csrrowptra, int *csrcolinda,
double *b, int64_t ldb, double beta, double *c, int64_t ldc);
#else
void adjustLd(char transb, int64_t m, int64_t n, int64_t k, int64_t *ldb, int64_t *ldc)
{
int transb_ = ((transb == 't') || (transb == 'T'));
if(n == 1)
*ldc = m;
if(transb_)
{
if(k == 1)
*ldb = n;
}
else
{
if(n == 1)
*ldb = k;
}
}
void Scsrmm2(char transa, char transb, int64_t m, int64_t n, int64_t k, int64_t nnz, float alpha, float *csrvala, int *csrrowptra, int *csrcolinda, float *b, int64_t ldb, float beta, float *c, int64_t ldc)
{
adjustLd(transb, m, n, k, &ldb, &ldc);
hipsparseOperation_t opa = convertTransToCusparseOperation(transa);
hipsparseOperation_t opb = convertTransToCusparseOperation(transb);
TORCH_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (k <= INT_MAX) && (nnz <= INT_MAX) && (ldb <= INT_MAX) && (ldc <= INT_MAX),
"hipsparseScsrmm2 only supports m, n, k, nnz, ldb, ldc with the bound [val] <= ", INT_MAX);
int i_m = (int)m;
int i_n = (int)n;
int i_k = (int)k;
int i_nnz = (int)nnz;
int i_ldb = (int)ldb;
int i_ldc = (int)ldc;
auto handle = at::cuda::getCurrentCUDASparseHandle();
hipsparseMatDescr_t desc;
hipsparseCreateMatDescr(&desc);
TORCH_CUDASPARSE_CHECK(hipsparseScsrmm2(handle, opa, opb, i_m, i_n, i_k, i_nnz, &alpha, desc, csrvala, csrrowptra, csrcolinda, b, i_ldb, &beta, c, i_ldc));
TORCH_CUDASPARSE_CHECK(hipsparseDestroyMatDescr(desc));
}
void Dcsrmm2(char transa, char transb, int64_t m, int64_t n, int64_t k, int64_t nnz, double alpha, double *csrvala, int *csrrowptra, int *csrcolinda, double *b, int64_t ldb, double beta, double *c, int64_t ldc)
{
adjustLd(transb, m, n, k, &ldb, &ldc);
hipsparseOperation_t opa = convertTransToCusparseOperation(transa);
hipsparseOperation_t opb = convertTransToCusparseOperation(transb);
TORCH_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (k <= INT_MAX) && (nnz <= INT_MAX) && (ldb <= INT_MAX) && (ldc <= INT_MAX),
"hipsparseDcsrmm2 only supports m, n, k, nnz, ldb, ldc with the bound [val] <= ", INT_MAX);
int i_m = (int)m;
int i_n = (int)n;
int i_k = (int)k;
int i_nnz = (int)nnz;
int i_ldb = (int)ldb;
int i_ldc = (int)ldc;
auto handle = at::cuda::getCurrentCUDASparseHandle();
hipsparseMatDescr_t desc;
hipsparseCreateMatDescr(&desc);
TORCH_CUDASPARSE_CHECK(hipsparseDcsrmm2(handle, opa, opb, i_m, i_n, i_k, i_nnz, &alpha, desc, csrvala, csrrowptra, csrcolinda, b, i_ldb, &beta, c, i_ldc));
TORCH_CUDASPARSE_CHECK(hipsparseDestroyMatDescr(desc));
// TODO: Proper fix is to create real descriptor classes
}
// T can only be float or double
template<typename T>
void csrmm2(
char transa, char transb,
int64_t m, int64_t n, int64_t k, int64_t nnz,
T alpha, T *csrvala, int *csrrowptra, int *csrcolinda,
T *b, int64_t ldb, T beta, T *c, int64_t ldc)
{
TORCH_INTERNAL_ASSERT(false, "cusparse csr MM only supports data type of float and double.");
}
template<> void csrmm2<float>(
char transa, char transb,
int64_t m, int64_t n, int64_t k, int64_t nnz,
float alpha, float *csrvala, int *csrrowptra, int *csrcolinda,
float *b, int64_t ldb, float beta, float *c, int64_t ldc)
{
Scsrmm2(transa, transb, m, n, k, nnz, alpha, csrvala, csrrowptra, csrcolinda, b, ldb, beta, c, ldc);
}
template<> void csrmm2<double>(
char transa, char transb,
int64_t m, int64_t n, int64_t k, int64_t nnz,
double alpha, double *csrvala, int *csrrowptra, int *csrcolinda,
double *b, int64_t ldb, double beta, double *c, int64_t ldc)
{
Dcsrmm2(transa, transb, m, n, k, nnz, alpha, csrvala, csrrowptra, csrcolinda, b, ldb, beta, c, ldc);
}
#endif
/* format conversion */
void CreateIdentityPermutation(int64_t nnz, int *P) {
TORCH_CHECK((nnz <= INT_MAX),
"Xcsrsort_bufferSizeExt only supports m, n, nnz with the bound [val] <= ",
INT_MAX);
int i_nnz = (int)nnz;
auto handle = at::cuda::getCurrentCUDASparseHandle();
hipsparseCreateIdentityPermutation(handle, i_nnz, P);
}
void Xcsrsort_bufferSizeExt(int64_t m, int64_t n, int64_t nnz, const int *csrRowPtr, const int *csrColInd, size_t *pBufferSizeInBytes)
{
TORCH_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (nnz <= INT_MAX),
"Xcsrsort_bufferSizeExt only supports m, n, nnz with the bound [val] <=",
INT_MAX);
int i_m = (int)m;
int i_n = (int)n;
int i_nnz = (int)nnz;
auto handle = at::cuda::getCurrentCUDASparseHandle();
TORCH_CUDASPARSE_CHECK(hipsparseXcsrsort_bufferSizeExt(handle, i_m, i_n, i_nnz, csrRowPtr, csrColInd, pBufferSizeInBytes));
}
void Xcsrsort(int64_t m, int64_t n, int64_t nnz, const int *csrRowPtr, int *csrColInd, int *P, void *pBuffer)
{
TORCH_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (nnz <= INT_MAX),
"Xcsrsort only supports m, n, nnz with the bound [val] <= ",
INT_MAX);
int i_m = (int)m;
int i_n = (int)n;
int i_nnz = (int)nnz;
auto handle = at::cuda::getCurrentCUDASparseHandle();
hipsparseMatDescr_t desc;
hipsparseCreateMatDescr(&desc);
TORCH_CUDASPARSE_CHECK(hipsparseXcsrsort(handle, i_m, i_n, i_nnz, desc, csrRowPtr, csrColInd, P, pBuffer));
TORCH_CUDASPARSE_CHECK(hipsparseDestroyMatDescr(desc));
}
void Xcoosort_bufferSizeExt(int64_t m, int64_t n, int64_t nnz, const int *cooRows, const int *cooCols, size_t *pBufferSizeInBytes)
{
TORCH_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (nnz <= INT_MAX),
"Xcoosort_bufferSizeExt only supports m, n, nnz with the bound [val] <= ",
INT_MAX);
int i_m = (int)m;
int i_n = (int)n;
int i_nnz = (int)nnz;
auto handle = at::cuda::getCurrentCUDASparseHandle();
TORCH_CUDASPARSE_CHECK(hipsparseXcoosort_bufferSizeExt(handle, i_m, i_n, i_nnz, cooRows, cooCols, pBufferSizeInBytes));
}
void XcoosortByRow(int64_t m, int64_t n, int64_t nnz, int *cooRows, int *cooCols, int *P, void *pBuffer)
{
TORCH_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (nnz <= INT_MAX),
"XcoosortByRow only supports m, n, nnz with the bound [val] <= ",
INT_MAX);
int i_m = (int)m;
int i_n = (int)n;
int i_nnz = (int)nnz;
auto handle = at::cuda::getCurrentCUDASparseHandle();
TORCH_CUDASPARSE_CHECK(hipsparseXcoosortByRow(handle, i_m, i_n, i_nnz, cooRows, cooCols, P, pBuffer));
}
}}}} // namespace at::native::sparse::cuda
|
e4fbfe6fccbf00dbec5e136becfaa0a346e69d28.cu
|
#include <ATen/Context.h>
#include <ATen/cuda/CUDAContext.h>
#include <c10/util/Exception.h>
#include <ATen/cuda/Exceptions.h>
#include <ATen/native/sparse/cuda/SparseCUDABlas.cuh>
#include <c10/cuda/CUDACachingAllocator.h>
#include <TH/THGeneral.h>
#include <cusparse.h>
// LIMITATION (cusparseSpMM):
// The generic APIs are available on all platforms on CUDA 11.0
// For CUDA 10.1+ it is available for all platforms except Windows.
// Using these APIs in any other systems will result in compile-time or run-time failures.
// Their support will be extended in the next releases.
#if defined(__CUDACC__) && (CUSPARSE_VERSION >= 11000 || (!defined(_MSC_VER) && CUSPARSE_VERSION >= 10301))
#define IS_SPMM_AVAILABLE() 1
#else
#define IS_SPMM_AVAILABLE() 0
#endif
#if IS_SPMM_AVAILABLE()
#include <library_types.h>
#endif
#if !defined(CUSPARSE_VERSION) || (CUSPARSE_VERSION < 10100)
const char* cusparseGetErrorString(cusparseStatus_t status) {
switch(status)
{
case CUSPARSE_STATUS_SUCCESS:
return "success";
case CUSPARSE_STATUS_NOT_INITIALIZED:
return "library not initialized";
case CUSPARSE_STATUS_ALLOC_FAILED:
return "resource allocation failed";
case CUSPARSE_STATUS_INVALID_VALUE:
return "an invalid numeric value was used as an argument";
case CUSPARSE_STATUS_ARCH_MISMATCH:
return "an absent device architectural feature is required";
case CUSPARSE_STATUS_MAPPING_ERROR:
return "an access to GPU memory space failed";
case CUSPARSE_STATUS_EXECUTION_FAILED:
return "the GPU program failed to execute";
case CUSPARSE_STATUS_INTERNAL_ERROR:
return "an internal operation failed";
case CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "the matrix type is not supported by this function";
case CUSPARSE_STATUS_ZERO_PIVOT:
return "an entry of the matrix is either structural zero or numerical zero (singular block)";
default:
return "unknown error";
}
}
#endif
namespace at { namespace native { namespace sparse { namespace cuda {
void Xcoo2csr(const int *coorowind, int64_t nnz, int64_t m, int *csrrowptr) {
TORCH_CHECK((m <= INT_MAX) && (nnz <= INT_MAX),
"cusparseXcoo2csr only supports m, nnz with the bound [val] <= ",
INT_MAX);
int i_nnz = (int)nnz;
int i_m = (int)m;
auto handle = at::cuda::getCurrentCUDASparseHandle();
TORCH_CUDASPARSE_CHECK(cusparseXcoo2csr(handle, coorowind, i_nnz, i_m, csrrowptr, CUSPARSE_INDEX_BASE_ZERO));
}
cusparseOperation_t convertTransToCusparseOperation(char trans) {
if (trans == 't') return CUSPARSE_OPERATION_TRANSPOSE;
else if (trans == 'n') return CUSPARSE_OPERATION_NON_TRANSPOSE;
else if (trans == 'c') return CUSPARSE_OPERATION_CONJUGATE_TRANSPOSE;
else {
AT_ERROR("trans must be one of: t, n, c");
}
}
#if IS_SPMM_AVAILABLE()
template<typename T>
void csrmm2(
char transa, char transb,
int64_t m, int64_t n, int64_t k, int64_t nnz,
T alpha, T *csrvala, int *csrrowptra, int *csrcolinda,
T *b, int64_t ldb, T beta, T *c, int64_t ldc)
{
static_assert(std::is_same<float, T>::value || std::is_same<double, T>::value, "csrmm2 only supports float and double value types");
constexpr auto cusparse_value_type = std::is_same<float, T>::value ? CUDA_R_32F : CUDA_R_64F;
if (csrvala == nullptr || b == nullptr || c == nullptr) return;
cusparseOperation_t opa = convertTransToCusparseOperation(transa);
cusparseOperation_t opb = convertTransToCusparseOperation(transb);
// cusparseSpMM actually supports int64_t.
// In order to support int64 here, index pointers csrrowptra, csrcolinda have to be passed as int64_t.
TORCH_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (k <= INT_MAX) && (nnz <= INT_MAX) && (ldb <= INT_MAX) && (ldc <= INT_MAX),
"At the moment, cusparseSpMM only supports m, n, k, nnz, ldb, ldc with the bound [val] <= ", INT_MAX, ".",
"If you need this, please file an issue on GitHub."
);
int64_t ma = m, ka = k;
if (transa != 'n') std::swap(ma, ka);
cusparseSpMatDescr_t descA;
TORCH_CUDASPARSE_CHECK(cusparseCreateCsr(
&descA, /* output */
ma, ka, nnz, /* rows, cols, number of non zero elements */
csrrowptra, /* row offsets of the sparse matrix, size = rows +1 */
csrcolinda, /* column indices of the sparse matrix, size = nnz */
csrvala, /* values of the sparse matrix, size = nnz */
CUSPARSE_INDEX_32I, /* data type of row offsets index */
CUSPARSE_INDEX_32I, /* data type of col indices */
CUSPARSE_INDEX_BASE_ZERO, /* base index of row offset and col indes */
cusparse_value_type /* data type of values */
));
int64_t kb = k, nb = n;
if (transb != 'n') std::swap(kb, nb);
cusparseDnMatDescr_t descB;
TORCH_CUDASPARSE_CHECK(cusparseCreateDnMat(
&descB, /* output */
kb, nb, ldb, /* rows, cols, leading dimension */
b, /* values */
cusparse_value_type, /* data type of values */
CUSPARSE_ORDER_COL /* memory layout, ONLY column-major is supported now */
));
cusparseDnMatDescr_t descC;
TORCH_CUDASPARSE_CHECK(cusparseCreateDnMat(
&descC, /* output */
m, n, ldc, /* rows, cols, leading dimension */
c, /* values */
cusparse_value_type, /* data type of values */
CUSPARSE_ORDER_COL /* memory layout, ONLY column-major is supported now */
));
auto handle = at::cuda::getCurrentCUDASparseHandle();
// cusparseSpMM_bufferSize returns the bufferSize that can be used by cusparseSpMM
size_t bufferSize;
TORCH_CUDASPARSE_CHECK(cusparseSpMM_bufferSize(
handle, opa, opb,
&alpha,
descA, descB,
&beta,
descC,
cusparse_value_type, /* data type in which the computation is executed */
CUSPARSE_CSRMM_ALG1, /* default computing algorithm for CSR sparse matrix format */
&bufferSize /* output */
));
auto& allocator = *c10::cuda::CUDACachingAllocator::get();
auto dataPtr = allocator.allocate(bufferSize);
TORCH_CUDASPARSE_CHECK(cusparseSpMM(
handle, opa, opb,
&alpha,
descA, descB,
&beta,
descC,
cusparse_value_type, /* data type in which the computation is executed */
CUSPARSE_CSRMM_ALG1, /* default computing algorithm for CSR sparse matrix format */
dataPtr.get() /* external buffer */
));
TORCH_CUDASPARSE_CHECK(cusparseDestroySpMat(descA));
TORCH_CUDASPARSE_CHECK(cusparseDestroyDnMat(descB));
TORCH_CUDASPARSE_CHECK(cusparseDestroyDnMat(descC));
// TODO: Proper fix is to create real descriptor classes
}
template void csrmm2<float>(
char transa, char transb,
int64_t m, int64_t n, int64_t k, int64_t nnz,
float alpha, float *csrvala, int *csrrowptra, int *csrcolinda,
float *b, int64_t ldb, float beta, float *c, int64_t ldc);
template void csrmm2<double>(
char transa, char transb,
int64_t m, int64_t n, int64_t k, int64_t nnz,
double alpha, double *csrvala, int *csrrowptra, int *csrcolinda,
double *b, int64_t ldb, double beta, double *c, int64_t ldc);
#else
void adjustLd(char transb, int64_t m, int64_t n, int64_t k, int64_t *ldb, int64_t *ldc)
{
int transb_ = ((transb == 't') || (transb == 'T'));
if(n == 1)
*ldc = m;
if(transb_)
{
if(k == 1)
*ldb = n;
}
else
{
if(n == 1)
*ldb = k;
}
}
void Scsrmm2(char transa, char transb, int64_t m, int64_t n, int64_t k, int64_t nnz, float alpha, float *csrvala, int *csrrowptra, int *csrcolinda, float *b, int64_t ldb, float beta, float *c, int64_t ldc)
{
adjustLd(transb, m, n, k, &ldb, &ldc);
cusparseOperation_t opa = convertTransToCusparseOperation(transa);
cusparseOperation_t opb = convertTransToCusparseOperation(transb);
TORCH_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (k <= INT_MAX) && (nnz <= INT_MAX) && (ldb <= INT_MAX) && (ldc <= INT_MAX),
"cusparseScsrmm2 only supports m, n, k, nnz, ldb, ldc with the bound [val] <= ", INT_MAX);
int i_m = (int)m;
int i_n = (int)n;
int i_k = (int)k;
int i_nnz = (int)nnz;
int i_ldb = (int)ldb;
int i_ldc = (int)ldc;
auto handle = at::cuda::getCurrentCUDASparseHandle();
cusparseMatDescr_t desc;
cusparseCreateMatDescr(&desc);
TORCH_CUDASPARSE_CHECK(cusparseScsrmm2(handle, opa, opb, i_m, i_n, i_k, i_nnz, &alpha, desc, csrvala, csrrowptra, csrcolinda, b, i_ldb, &beta, c, i_ldc));
TORCH_CUDASPARSE_CHECK(cusparseDestroyMatDescr(desc));
}
void Dcsrmm2(char transa, char transb, int64_t m, int64_t n, int64_t k, int64_t nnz, double alpha, double *csrvala, int *csrrowptra, int *csrcolinda, double *b, int64_t ldb, double beta, double *c, int64_t ldc)
{
adjustLd(transb, m, n, k, &ldb, &ldc);
cusparseOperation_t opa = convertTransToCusparseOperation(transa);
cusparseOperation_t opb = convertTransToCusparseOperation(transb);
TORCH_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (k <= INT_MAX) && (nnz <= INT_MAX) && (ldb <= INT_MAX) && (ldc <= INT_MAX),
"cusparseDcsrmm2 only supports m, n, k, nnz, ldb, ldc with the bound [val] <= ", INT_MAX);
int i_m = (int)m;
int i_n = (int)n;
int i_k = (int)k;
int i_nnz = (int)nnz;
int i_ldb = (int)ldb;
int i_ldc = (int)ldc;
auto handle = at::cuda::getCurrentCUDASparseHandle();
cusparseMatDescr_t desc;
cusparseCreateMatDescr(&desc);
TORCH_CUDASPARSE_CHECK(cusparseDcsrmm2(handle, opa, opb, i_m, i_n, i_k, i_nnz, &alpha, desc, csrvala, csrrowptra, csrcolinda, b, i_ldb, &beta, c, i_ldc));
TORCH_CUDASPARSE_CHECK(cusparseDestroyMatDescr(desc));
// TODO: Proper fix is to create real descriptor classes
}
// T can only be float or double
template<typename T>
void csrmm2(
char transa, char transb,
int64_t m, int64_t n, int64_t k, int64_t nnz,
T alpha, T *csrvala, int *csrrowptra, int *csrcolinda,
T *b, int64_t ldb, T beta, T *c, int64_t ldc)
{
TORCH_INTERNAL_ASSERT(false, "cusparse csr MM only supports data type of float and double.");
}
template<> void csrmm2<float>(
char transa, char transb,
int64_t m, int64_t n, int64_t k, int64_t nnz,
float alpha, float *csrvala, int *csrrowptra, int *csrcolinda,
float *b, int64_t ldb, float beta, float *c, int64_t ldc)
{
Scsrmm2(transa, transb, m, n, k, nnz, alpha, csrvala, csrrowptra, csrcolinda, b, ldb, beta, c, ldc);
}
template<> void csrmm2<double>(
char transa, char transb,
int64_t m, int64_t n, int64_t k, int64_t nnz,
double alpha, double *csrvala, int *csrrowptra, int *csrcolinda,
double *b, int64_t ldb, double beta, double *c, int64_t ldc)
{
Dcsrmm2(transa, transb, m, n, k, nnz, alpha, csrvala, csrrowptra, csrcolinda, b, ldb, beta, c, ldc);
}
#endif
/* format conversion */
void CreateIdentityPermutation(int64_t nnz, int *P) {
TORCH_CHECK((nnz <= INT_MAX),
"Xcsrsort_bufferSizeExt only supports m, n, nnz with the bound [val] <= ",
INT_MAX);
int i_nnz = (int)nnz;
auto handle = at::cuda::getCurrentCUDASparseHandle();
cusparseCreateIdentityPermutation(handle, i_nnz, P);
}
void Xcsrsort_bufferSizeExt(int64_t m, int64_t n, int64_t nnz, const int *csrRowPtr, const int *csrColInd, size_t *pBufferSizeInBytes)
{
TORCH_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (nnz <= INT_MAX),
"Xcsrsort_bufferSizeExt only supports m, n, nnz with the bound [val] <=",
INT_MAX);
int i_m = (int)m;
int i_n = (int)n;
int i_nnz = (int)nnz;
auto handle = at::cuda::getCurrentCUDASparseHandle();
TORCH_CUDASPARSE_CHECK(cusparseXcsrsort_bufferSizeExt(handle, i_m, i_n, i_nnz, csrRowPtr, csrColInd, pBufferSizeInBytes));
}
void Xcsrsort(int64_t m, int64_t n, int64_t nnz, const int *csrRowPtr, int *csrColInd, int *P, void *pBuffer)
{
TORCH_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (nnz <= INT_MAX),
"Xcsrsort only supports m, n, nnz with the bound [val] <= ",
INT_MAX);
int i_m = (int)m;
int i_n = (int)n;
int i_nnz = (int)nnz;
auto handle = at::cuda::getCurrentCUDASparseHandle();
cusparseMatDescr_t desc;
cusparseCreateMatDescr(&desc);
TORCH_CUDASPARSE_CHECK(cusparseXcsrsort(handle, i_m, i_n, i_nnz, desc, csrRowPtr, csrColInd, P, pBuffer));
TORCH_CUDASPARSE_CHECK(cusparseDestroyMatDescr(desc));
}
void Xcoosort_bufferSizeExt(int64_t m, int64_t n, int64_t nnz, const int *cooRows, const int *cooCols, size_t *pBufferSizeInBytes)
{
TORCH_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (nnz <= INT_MAX),
"Xcoosort_bufferSizeExt only supports m, n, nnz with the bound [val] <= ",
INT_MAX);
int i_m = (int)m;
int i_n = (int)n;
int i_nnz = (int)nnz;
auto handle = at::cuda::getCurrentCUDASparseHandle();
TORCH_CUDASPARSE_CHECK(cusparseXcoosort_bufferSizeExt(handle, i_m, i_n, i_nnz, cooRows, cooCols, pBufferSizeInBytes));
}
void XcoosortByRow(int64_t m, int64_t n, int64_t nnz, int *cooRows, int *cooCols, int *P, void *pBuffer)
{
TORCH_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (nnz <= INT_MAX),
"XcoosortByRow only supports m, n, nnz with the bound [val] <= ",
INT_MAX);
int i_m = (int)m;
int i_n = (int)n;
int i_nnz = (int)nnz;
auto handle = at::cuda::getCurrentCUDASparseHandle();
TORCH_CUDASPARSE_CHECK(cusparseXcoosortByRow(handle, i_m, i_n, i_nnz, cooRows, cooCols, P, pBuffer));
}
}}}} // namespace at::native::sparse::cuda
|
16b3228985977fdb8331a4052f784cedc4e04819.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file writer_impl.cu
* @brief cuDF-IO parquet writer class implementation
*/
#include "writer_impl.hpp"
#include <cudf/null_mask.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <algorithm>
#include <cstring>
#include <utility>
#include <rmm/thrust_rmm_allocator.h>
#include <rmm/device_buffer.hpp>
namespace cudf {
namespace io {
namespace detail {
namespace parquet {
using namespace cudf::io::parquet;
using namespace cudf::io;
namespace {
/**
* @brief Helper for pinned host memory
**/
template <typename T>
using pinned_buffer = std::unique_ptr<T, decltype(&hipHostFree)>;
/**
* @brief Function that translates GDF compression to parquet compression
**/
parquet::Compression to_parquet_compression(compression_type compression)
{
switch (compression) {
case compression_type::AUTO:
case compression_type::SNAPPY: return parquet::Compression::SNAPPY;
case compression_type::NONE: return parquet::Compression::UNCOMPRESSED;
default:
CUDF_EXPECTS(false, "Unsupported compression type");
return parquet::Compression::UNCOMPRESSED;
}
}
} // namespace
/**
* @brief Helper kernel for converting string data/offsets into nvstrdesc
* REMOVEME: Once we eliminate the legacy readers/writers, the kernels could be
* made to use the native offset+data layout.
**/
__global__ void stringdata_to_nvstrdesc(gpu::nvstrdesc_s *dst,
const size_type *offsets,
const char *strdata,
const uint32_t *nulls,
size_type column_size)
{
size_type row = blockIdx.x * blockDim.x + threadIdx.x;
if (row < column_size) {
uint32_t is_valid = (nulls) ? (nulls[row >> 5] >> (row & 0x1f)) & 1 : 1;
size_t count;
const char *ptr;
if (is_valid) {
size_type cur = offsets[row];
size_type next = offsets[row + 1];
ptr = strdata + cur;
count = (next > cur) ? next - cur : 0;
} else {
ptr = nullptr;
count = 0;
}
dst[row].ptr = ptr;
dst[row].count = count;
}
}
/**
* @brief Helper class that adds parquet-specific column info
**/
class parquet_column_view {
public:
/**
* @brief Constructor that extracts out the string position + length pairs
* for building dictionaries for string columns
**/
explicit parquet_column_view(size_t id,
column_view const &col,
const table_metadata *metadata,
hipStream_t stream)
: _id(id),
_string_type(col.type().id() == type_id::STRING),
_type_width(_string_type ? 0 : cudf::size_of(col.type())),
_converted_type(ConvertedType::UNKNOWN),
_ts_scale(0),
_data_count(col.size()),
_null_count(col.null_count()),
_data(col.head<uint8_t>() + col.offset() * _type_width),
_nulls(col.nullable() ? col.null_mask() : nullptr)
{
switch (col.type().id()) {
case cudf::type_id::INT8:
_physical_type = Type::INT32;
_converted_type = ConvertedType::INT_8;
_stats_dtype = statistics_dtype::dtype_int8;
break;
case cudf::type_id::INT16:
_physical_type = Type::INT32;
_converted_type = ConvertedType::INT_16;
_stats_dtype = statistics_dtype::dtype_int16;
break;
case cudf::type_id::INT32:
_physical_type = Type::INT32;
_stats_dtype = statistics_dtype::dtype_int32;
break;
case cudf::type_id::INT64:
_physical_type = Type::INT64;
_stats_dtype = statistics_dtype::dtype_int64;
break;
case cudf::type_id::FLOAT32:
_physical_type = Type::FLOAT;
_stats_dtype = statistics_dtype::dtype_float32;
break;
case cudf::type_id::FLOAT64:
_physical_type = Type::DOUBLE;
_stats_dtype = statistics_dtype::dtype_float64;
break;
case cudf::type_id::BOOL8:
_physical_type = Type::BOOLEAN;
_stats_dtype = statistics_dtype::dtype_bool;
break;
case cudf::type_id::TIMESTAMP_DAYS:
_physical_type = Type::INT32;
_converted_type = ConvertedType::DATE;
_stats_dtype = statistics_dtype::dtype_int32;
break;
case cudf::type_id::TIMESTAMP_SECONDS:
_physical_type = Type::INT64;
_converted_type = ConvertedType::TIMESTAMP_MILLIS;
_stats_dtype = statistics_dtype::dtype_timestamp64;
_ts_scale = 1000;
break;
case cudf::type_id::TIMESTAMP_MILLISECONDS:
_physical_type = Type::INT64;
_converted_type = ConvertedType::TIMESTAMP_MILLIS;
_stats_dtype = statistics_dtype::dtype_timestamp64;
break;
case cudf::type_id::TIMESTAMP_MICROSECONDS:
_physical_type = Type::INT64;
_converted_type = ConvertedType::TIMESTAMP_MICROS;
_stats_dtype = statistics_dtype::dtype_timestamp64;
break;
case cudf::type_id::TIMESTAMP_NANOSECONDS:
_physical_type = Type::INT64;
_converted_type = ConvertedType::TIMESTAMP_MICROS;
_stats_dtype = statistics_dtype::dtype_timestamp64;
_ts_scale = -1000;
break;
case cudf::type_id::STRING:
_physical_type = Type::BYTE_ARRAY;
//_converted_type = ConvertedType::UTF8; // TBD
_stats_dtype = statistics_dtype::dtype_string;
break;
default:
_physical_type = UNDEFINED_TYPE;
_stats_dtype = dtype_none;
break;
}
if (_string_type && _data_count > 0) {
strings_column_view view{col};
_indexes = rmm::device_buffer(_data_count * sizeof(gpu::nvstrdesc_s), stream);
hipLaunchKernelGGL(( stringdata_to_nvstrdesc), dim3(((_data_count - 1) >> 8) + 1), dim3(256), 0, stream,
reinterpret_cast<gpu::nvstrdesc_s *>(_indexes.data()),
view.offsets().data<size_type>(),
view.chars().data<char>(),
_nulls,
_data_count);
_data = _indexes.data();
CUDA_TRY(hipStreamSynchronize(stream));
}
// Generating default name if name isn't present in metadata
if (metadata && _id < metadata->column_names.size()) {
_name = metadata->column_names[_id];
} else {
_name = "_col" + std::to_string(_id);
}
}
auto is_string() const noexcept { return _string_type; }
size_t type_width() const noexcept { return _type_width; }
size_t data_count() const noexcept { return _data_count; }
size_t null_count() const noexcept { return _null_count; }
bool nullable() const noexcept { return (_nulls != nullptr); }
void const *data() const noexcept { return _data; }
uint32_t const *nulls() const noexcept { return _nulls; }
auto name() const noexcept { return _name; }
auto physical_type() const noexcept { return _physical_type; }
auto converted_type() const noexcept { return _converted_type; }
auto stats_type() const noexcept { return _stats_dtype; }
int32_t ts_scale() const noexcept { return _ts_scale; }
// Dictionary management
uint32_t *get_dict_data() { return (_dict_data.size()) ? _dict_data.data().get() : nullptr; }
uint32_t *get_dict_index() { return (_dict_index.size()) ? _dict_index.data().get() : nullptr; }
void use_dictionary(bool use_dict) { _dictionary_used = use_dict; }
void alloc_dictionary(size_t max_num_rows)
{
_dict_data.resize(max_num_rows);
_dict_index.resize(max_num_rows);
}
bool check_dictionary_used()
{
if (!_dictionary_used) {
_dict_data.resize(0);
_dict_data.shrink_to_fit();
_dict_index.resize(0);
_dict_index.shrink_to_fit();
}
return _dictionary_used;
}
private:
// Identifier within set of columns
size_t _id = 0;
bool _string_type = false;
size_t _type_width = 0;
size_t _data_count = 0;
size_t _null_count = 0;
void const *_data = nullptr;
uint32_t const *_nulls = nullptr;
// parquet-related members
std::string _name{};
Type _physical_type;
ConvertedType _converted_type;
statistics_dtype _stats_dtype;
int32_t _ts_scale;
// Dictionary-related members
bool _dictionary_used = false;
rmm::device_vector<uint32_t> _dict_data;
rmm::device_vector<uint32_t> _dict_index;
// String-related members
rmm::device_buffer _indexes;
};
void writer::impl::init_page_fragments(hostdevice_vector<gpu::PageFragment> &frag,
hostdevice_vector<gpu::EncColumnDesc> &col_desc,
uint32_t num_columns,
uint32_t num_fragments,
uint32_t num_rows,
uint32_t fragment_size,
hipStream_t stream)
{
CUDA_TRY(hipMemcpyAsync(col_desc.device_ptr(),
col_desc.host_ptr(),
col_desc.memory_size(),
hipMemcpyHostToDevice,
stream));
CUDA_TRY(gpu::InitPageFragments(frag.device_ptr(),
col_desc.device_ptr(),
num_fragments,
num_columns,
fragment_size,
num_rows,
stream));
CUDA_TRY(hipMemcpyAsync(
frag.host_ptr(), frag.device_ptr(), frag.memory_size(), hipMemcpyDeviceToHost, stream));
CUDA_TRY(hipStreamSynchronize(stream));
}
void writer::impl::gather_fragment_statistics(statistics_chunk *frag_stats_chunk,
hostdevice_vector<gpu::PageFragment> &frag,
hostdevice_vector<gpu::EncColumnDesc> &col_desc,
uint32_t num_columns,
uint32_t num_fragments,
uint32_t fragment_size,
hipStream_t stream)
{
rmm::device_vector<statistics_group> frag_stats_group(num_fragments * num_columns);
CUDA_TRY(gpu::InitFragmentStatistics(frag_stats_group.data().get(),
frag.device_ptr(),
col_desc.device_ptr(),
num_fragments,
num_columns,
fragment_size,
stream));
CUDA_TRY(GatherColumnStatistics(
frag_stats_chunk, frag_stats_group.data().get(), num_fragments * num_columns, stream));
CUDA_TRY(hipStreamSynchronize(stream));
}
void writer::impl::build_chunk_dictionaries(hostdevice_vector<gpu::EncColumnChunk> &chunks,
hostdevice_vector<gpu::EncColumnDesc> &col_desc,
uint32_t num_rowgroups,
uint32_t num_columns,
uint32_t num_dictionaries,
hipStream_t stream)
{
size_t dict_scratch_size = (size_t)num_dictionaries * gpu::kDictScratchSize;
rmm::device_vector<uint32_t> dict_scratch(dict_scratch_size / sizeof(uint32_t));
CUDA_TRY(hipMemcpyAsync(
chunks.device_ptr(), chunks.host_ptr(), chunks.memory_size(), hipMemcpyHostToDevice, stream));
CUDA_TRY(gpu::BuildChunkDictionaries(chunks.device_ptr(),
dict_scratch.data().get(),
dict_scratch_size,
num_rowgroups * num_columns,
stream));
CUDA_TRY(gpu::InitEncoderPages(chunks.device_ptr(),
nullptr,
col_desc.device_ptr(),
num_rowgroups,
num_columns,
nullptr,
nullptr,
stream));
CUDA_TRY(hipMemcpyAsync(
chunks.host_ptr(), chunks.device_ptr(), chunks.memory_size(), hipMemcpyDeviceToHost, stream));
CUDA_TRY(hipStreamSynchronize(stream));
}
void writer::impl::init_encoder_pages(hostdevice_vector<gpu::EncColumnChunk> &chunks,
hostdevice_vector<gpu::EncColumnDesc> &col_desc,
gpu::EncPage *pages,
statistics_chunk *page_stats,
statistics_chunk *frag_stats,
uint32_t num_rowgroups,
uint32_t num_columns,
uint32_t num_pages,
uint32_t num_stats_bfr,
hipStream_t stream)
{
rmm::device_vector<statistics_merge_group> page_stats_mrg(num_stats_bfr);
CUDA_TRY(hipMemcpyAsync(
chunks.device_ptr(), chunks.host_ptr(), chunks.memory_size(), hipMemcpyHostToDevice, stream));
CUDA_TRY(InitEncoderPages(
chunks.device_ptr(),
pages,
col_desc.device_ptr(),
num_rowgroups,
num_columns,
(num_stats_bfr) ? page_stats_mrg.data().get() : nullptr,
(num_stats_bfr > num_pages) ? page_stats_mrg.data().get() + num_pages : nullptr,
stream));
if (num_stats_bfr > 0) {
CUDA_TRY(MergeColumnStatistics(
page_stats, frag_stats, page_stats_mrg.data().get(), num_pages, stream));
if (num_stats_bfr > num_pages) {
CUDA_TRY(MergeColumnStatistics(page_stats + num_pages,
page_stats,
page_stats_mrg.data().get() + num_pages,
num_stats_bfr - num_pages,
stream));
}
}
CUDA_TRY(hipStreamSynchronize(stream));
}
void writer::impl::encode_pages(hostdevice_vector<gpu::EncColumnChunk> &chunks,
gpu::EncPage *pages,
uint32_t num_columns,
uint32_t pages_in_batch,
uint32_t first_page_in_batch,
uint32_t rowgroups_in_batch,
uint32_t first_rowgroup,
gpu_inflate_input_s *comp_in,
gpu_inflate_status_s *comp_out,
const statistics_chunk *page_stats,
const statistics_chunk *chunk_stats,
hipStream_t stream)
{
CUDA_TRY(gpu::EncodePages(
pages, chunks.device_ptr(), pages_in_batch, first_page_in_batch, comp_in, comp_out, stream));
switch (compression_) {
case parquet::Compression::SNAPPY:
CUDA_TRY(gpu_snap(comp_in, comp_out, pages_in_batch, stream));
break;
default: break;
}
// TBD: Not clear if the official spec actually allows dynamically turning off compression at the
// chunk-level
CUDA_TRY(DecideCompression(chunks.device_ptr() + first_rowgroup * num_columns,
pages,
rowgroups_in_batch * num_columns,
first_page_in_batch,
comp_out,
stream));
CUDA_TRY(EncodePageHeaders(pages,
chunks.device_ptr(),
pages_in_batch,
first_page_in_batch,
comp_out,
page_stats,
chunk_stats,
stream));
CUDA_TRY(GatherPages(chunks.device_ptr() + first_rowgroup * num_columns,
pages,
rowgroups_in_batch * num_columns,
stream));
CUDA_TRY(hipMemcpyAsync(&chunks[first_rowgroup * num_columns],
chunks.device_ptr() + first_rowgroup * num_columns,
rowgroups_in_batch * num_columns * sizeof(gpu::EncColumnChunk),
hipMemcpyDeviceToHost,
stream));
CUDA_TRY(hipStreamSynchronize(stream));
}
writer::impl::impl(std::unique_ptr<data_sink> sink,
writer_options const &options,
rmm::mr::device_memory_resource *mr)
: _mr(mr),
compression_(to_parquet_compression(options.compression)),
stats_granularity_(options.stats_granularity),
out_sink_(std::move(sink))
{
}
std::unique_ptr<std::vector<uint8_t>> writer::impl::write(table_view const &table,
const table_metadata *metadata,
bool return_filemetadata,
const std::string &metadata_out_file_path,
hipStream_t stream)
{
pq_chunked_state state{metadata, SingleWriteMode::YES, stream};
write_chunked_begin(state);
write_chunked(table, state);
return write_chunked_end(state, return_filemetadata, metadata_out_file_path);
}
void writer::impl::write_chunked_begin(pq_chunked_state &state)
{
// Write file header
file_header_s fhdr;
fhdr.magic = PARQUET_MAGIC;
out_sink_->host_write(&fhdr, sizeof(fhdr));
state.current_chunk_offset = sizeof(file_header_s);
}
void writer::impl::write_chunked(table_view const &table, pq_chunked_state &state)
{
size_type num_columns = table.num_columns();
size_type num_rows = 0;
// Wrapper around cudf columns to attach parquet-specific type info.
// Note : I wish we could do this in the begin() function but since the
// metadata is optional we would have no way of knowing how many columns
// we actually have.
std::vector<parquet_column_view> parquet_columns;
parquet_columns.reserve(num_columns); // Avoids unnecessary re-allocation
for (auto it = table.begin(); it < table.end(); ++it) {
const auto col = *it;
const auto current_id = parquet_columns.size();
num_rows = std::max<uint32_t>(num_rows, col.size());
parquet_columns.emplace_back(current_id, col, state.user_metadata, state.stream);
}
if (state.user_metadata_with_nullability.column_nullable.size() > 0) {
CUDF_EXPECTS(state.user_metadata_with_nullability.column_nullable.size() ==
static_cast<size_t>(num_columns),
"When passing values in user_metadata_with_nullability, data for all columns must "
"be specified");
}
// first call. setup metadata. num_rows will get incremented as write_chunked is
// called multiple times.
if (state.md.version == 0) {
state.md.version = 1;
state.md.num_rows = num_rows;
state.md.schema.resize(1 + num_columns);
state.md.schema[0].type = UNDEFINED_TYPE;
state.md.schema[0].repetition_type = NO_REPETITION_TYPE;
state.md.schema[0].name = "schema";
state.md.schema[0].num_children = num_columns;
state.md.column_order_listsize =
(stats_granularity_ != statistics_freq::STATISTICS_NONE) ? num_columns : 0;
if (state.user_metadata != nullptr) {
for (auto it = state.user_metadata->user_data.begin();
it != state.user_metadata->user_data.end();
it++) {
state.md.key_value_metadata.push_back({it->first, it->second});
}
}
for (auto i = 0; i < num_columns; i++) {
auto &col = parquet_columns[i];
// Column metadata
state.md.schema[1 + i].type = col.physical_type();
state.md.schema[1 + i].converted_type = col.converted_type();
// because the repetition type is global (in the sense of, not per-rowgroup or per
// write_chunked() call) we cannot know up front if the user is going to end up passing tables
// with nulls/no nulls in the multiple write_chunked() case. so we'll do some special
// handling.
//
// if the user is explicitly saying "I am only calling this once", fall back to the original
// behavior and assume the columns in this one table tell us everything we need to know.
if (state.single_write_mode) {
state.md.schema[1 + i].repetition_type =
(col.nullable() || col.data_count() < (size_t)num_rows) ? OPTIONAL : REQUIRED;
}
// otherwise, if the user is explicitly telling us global information about all the tables
// that will ever get passed in
else if (state.user_metadata_with_nullability.column_nullable.size() > 0) {
state.md.schema[1 + i].repetition_type =
state.user_metadata_with_nullability.column_nullable[i] ? OPTIONAL : REQUIRED;
}
// otherwise assume the worst case.
else {
state.md.schema[1 + i].repetition_type = OPTIONAL;
}
state.md.schema[1 + i].name = col.name();
state.md.schema[1 + i].num_children = 0; // Leaf node
}
} else {
// verify the user isn't passing mismatched tables
CUDF_EXPECTS(state.md.schema[0].num_children == num_columns,
"Mismatch in table structure between multiple calls to write_chunked");
for (auto i = 0; i < num_columns; i++) {
auto &col = parquet_columns[i];
CUDF_EXPECTS(state.md.schema[1 + i].type == col.physical_type(),
"Mismatch in column types between multiple calls to write_chunked");
}
// increment num rows
state.md.num_rows += num_rows;
}
// Initialize column description
hostdevice_vector<gpu::EncColumnDesc> col_desc(num_columns);
// setup gpu column description.
// applicable to only this _write_chunked() call
for (auto i = 0; i < num_columns; i++) {
auto &col = parquet_columns[i];
// GPU column description
auto *desc = &col_desc[i];
desc->column_data_base = col.data();
desc->valid_map_base = col.nulls();
desc->stats_dtype = col.stats_type();
desc->ts_scale = col.ts_scale();
if (state.md.schema[1 + i].type != BOOLEAN && state.md.schema[1 + i].type != UNDEFINED_TYPE) {
col.alloc_dictionary(num_rows);
desc->dict_index = col.get_dict_index();
desc->dict_data = col.get_dict_data();
} else {
desc->dict_data = nullptr;
desc->dict_index = nullptr;
}
desc->num_rows = col.data_count();
desc->physical_type = static_cast<uint8_t>(state.md.schema[1 + i].type);
desc->converted_type = static_cast<uint8_t>(state.md.schema[1 + i].converted_type);
desc->level_bits = (state.md.schema[1 + i].repetition_type == OPTIONAL) ? 1 : 0;
}
// Init page fragments
// 5000 is good enough for up to ~200-character strings. Longer strings will start producing
// fragments larger than the desired page size -> TODO: keep track of the max fragment size, and
// iteratively reduce this value if the largest fragment exceeds the max page size limit (we
// ideally want the page size to be below 1MB so as to have enough pages to get good
// compression/decompression performance).
uint32_t fragment_size = 5000;
uint32_t num_fragments = (uint32_t)((num_rows + fragment_size - 1) / fragment_size);
hostdevice_vector<gpu::PageFragment> fragments(num_columns * num_fragments);
if (fragments.size() != 0) {
init_page_fragments(
fragments, col_desc, num_columns, num_fragments, num_rows, fragment_size, state.stream);
}
size_t global_rowgroup_base = state.md.row_groups.size();
// Decide row group boundaries based on uncompressed data size
size_t rowgroup_size = 0;
uint32_t num_rowgroups = 0;
for (uint32_t f = 0, global_r = global_rowgroup_base, rowgroup_start = 0; f < num_fragments;
f++) {
size_t fragment_data_size = 0;
for (auto i = 0; i < num_columns; i++) {
fragment_data_size += fragments[i * num_fragments + f].fragment_data_size;
}
if (f > rowgroup_start && (rowgroup_size + fragment_data_size > max_rowgroup_size_ ||
(f + 1 - rowgroup_start) * fragment_size > max_rowgroup_rows_)) {
// update schema
state.md.row_groups.resize(state.md.row_groups.size() + 1);
state.md.row_groups[global_r++].num_rows = (f - rowgroup_start) * fragment_size;
num_rowgroups++;
rowgroup_start = f;
rowgroup_size = 0;
}
rowgroup_size += fragment_data_size;
if (f + 1 == num_fragments) {
// update schema
state.md.row_groups.resize(state.md.row_groups.size() + 1);
state.md.row_groups[global_r++].num_rows = num_rows - rowgroup_start * fragment_size;
num_rowgroups++;
}
}
// Allocate column chunks and gather fragment statistics
rmm::device_vector<statistics_chunk> frag_stats;
if (stats_granularity_ != statistics_freq::STATISTICS_NONE) {
frag_stats.resize(num_fragments * num_columns);
if (frag_stats.size() != 0) {
gather_fragment_statistics(frag_stats.data().get(),
fragments,
col_desc,
num_columns,
num_fragments,
fragment_size,
state.stream);
}
}
// Initialize row groups and column chunks
uint32_t num_chunks = num_rowgroups * num_columns;
hostdevice_vector<gpu::EncColumnChunk> chunks(num_chunks);
uint32_t num_dictionaries = 0;
for (uint32_t r = 0, global_r = global_rowgroup_base, f = 0, start_row = 0; r < num_rowgroups;
r++, global_r++) {
uint32_t fragments_in_chunk =
(uint32_t)((state.md.row_groups[global_r].num_rows + fragment_size - 1) / fragment_size);
state.md.row_groups[global_r].total_byte_size = 0;
state.md.row_groups[global_r].columns.resize(num_columns);
for (int i = 0; i < num_columns; i++) {
gpu::EncColumnChunk *ck = &chunks[r * num_columns + i];
bool dict_enable = false;
ck->col_desc = col_desc.device_ptr() + i;
ck->uncompressed_bfr = nullptr;
ck->compressed_bfr = nullptr;
ck->bfr_size = 0;
ck->compressed_size = 0;
ck->fragments = fragments.device_ptr() + i * num_fragments + f;
ck->stats =
(frag_stats.size() != 0) ? frag_stats.data().get() + i * num_fragments + f : nullptr;
ck->start_row = start_row;
ck->num_rows = (uint32_t)state.md.row_groups[global_r].num_rows;
ck->first_fragment = i * num_fragments + f;
ck->first_page = 0;
ck->num_pages = 0;
ck->is_compressed = 0;
ck->dictionary_id = num_dictionaries;
ck->ck_stat_size = 0;
if (col_desc[i].dict_data) {
const gpu::PageFragment *ck_frag = &fragments[i * num_fragments + f];
size_t plain_size = 0;
size_t dict_size = 1;
uint32_t num_dict_vals = 0;
for (uint32_t j = 0; j < fragments_in_chunk && num_dict_vals < 65536; j++) {
plain_size += ck_frag[j].fragment_data_size;
dict_size +=
ck_frag[j].dict_data_size + ((num_dict_vals > 256) ? 2 : 1) * ck_frag[j].non_nulls;
num_dict_vals += ck_frag[j].num_dict_vals;
}
if (dict_size < plain_size) {
parquet_columns[i].use_dictionary(true);
dict_enable = true;
num_dictionaries++;
}
}
ck->has_dictionary = dict_enable;
state.md.row_groups[global_r].columns[i].meta_data.type = state.md.schema[1 + i].type;
state.md.row_groups[global_r].columns[i].meta_data.encodings = {PLAIN, RLE};
if (dict_enable) {
state.md.row_groups[global_r].columns[i].meta_data.encodings.push_back(PLAIN_DICTIONARY);
}
state.md.row_groups[global_r].columns[i].meta_data.path_in_schema = {
state.md.schema[1 + i].name};
state.md.row_groups[global_r].columns[i].meta_data.codec = UNCOMPRESSED;
state.md.row_groups[global_r].columns[i].meta_data.num_values =
state.md.row_groups[global_r].num_rows;
}
f += fragments_in_chunk;
start_row += (uint32_t)state.md.row_groups[global_r].num_rows;
}
// Free unused dictionaries
for (auto &col : parquet_columns) { col.check_dictionary_used(); }
// Build chunk dictionaries and count pages
if (num_chunks != 0) {
build_chunk_dictionaries(
chunks, col_desc, num_rowgroups, num_columns, num_dictionaries, state.stream);
}
// Initialize batches of rowgroups to encode (mainly to limit peak memory usage)
std::vector<uint32_t> batch_list;
uint32_t num_pages = 0;
size_t max_bytes_in_batch = 1024 * 1024 * 1024; // 1GB - TBD: Tune this
size_t max_uncomp_bfr_size = 0;
size_t max_chunk_bfr_size = 0;
uint32_t max_pages_in_batch = 0;
size_t bytes_in_batch = 0;
for (uint32_t r = 0, groups_in_batch = 0, pages_in_batch = 0; r <= num_rowgroups; r++) {
size_t rowgroup_size = 0;
if (r < num_rowgroups) {
for (int i = 0; i < num_columns; i++) {
gpu::EncColumnChunk *ck = &chunks[r * num_columns + i];
ck->first_page = num_pages;
num_pages += ck->num_pages;
pages_in_batch += ck->num_pages;
rowgroup_size += ck->bfr_size;
max_chunk_bfr_size =
::max(max_chunk_bfr_size, (size_t)::max(ck->bfr_size, ck->compressed_size));
}
}
// TBD: We may want to also shorten the batch if we have enough pages (not just based on size)
if ((r == num_rowgroups) ||
(groups_in_batch != 0 && bytes_in_batch + rowgroup_size > max_bytes_in_batch)) {
max_uncomp_bfr_size = ::max(max_uncomp_bfr_size, bytes_in_batch);
max_pages_in_batch = ::max(max_pages_in_batch, pages_in_batch);
if (groups_in_batch != 0) {
batch_list.push_back(groups_in_batch);
groups_in_batch = 0;
}
bytes_in_batch = 0;
pages_in_batch = 0;
}
bytes_in_batch += rowgroup_size;
groups_in_batch++;
}
// Initialize data pointers in batch
size_t max_comp_bfr_size =
(compression_ != parquet::Compression::UNCOMPRESSED)
? gpu::GetMaxCompressedBfrSize(max_uncomp_bfr_size, max_pages_in_batch)
: 0;
uint32_t max_comp_pages =
(compression_ != parquet::Compression::UNCOMPRESSED) ? max_pages_in_batch : 0;
uint32_t num_stats_bfr =
(stats_granularity_ != statistics_freq::STATISTICS_NONE) ? num_pages + num_chunks : 0;
rmm::device_buffer uncomp_bfr(max_uncomp_bfr_size, state.stream);
rmm::device_buffer comp_bfr(max_comp_bfr_size, state.stream);
rmm::device_vector<gpu_inflate_input_s> comp_in(max_comp_pages);
rmm::device_vector<gpu_inflate_status_s> comp_out(max_comp_pages);
rmm::device_vector<gpu::EncPage> pages(num_pages);
rmm::device_vector<statistics_chunk> page_stats(num_stats_bfr);
for (uint32_t b = 0, r = 0; b < (uint32_t)batch_list.size(); b++) {
uint8_t *bfr = reinterpret_cast<uint8_t *>(uncomp_bfr.data());
uint8_t *bfr_c = reinterpret_cast<uint8_t *>(comp_bfr.data());
for (uint32_t j = 0; j < batch_list[b]; j++, r++) {
for (int i = 0; i < num_columns; i++) {
gpu::EncColumnChunk *ck = &chunks[r * num_columns + i];
ck->uncompressed_bfr = bfr;
ck->compressed_bfr = bfr_c;
bfr += ck->bfr_size;
bfr_c += ck->compressed_size;
}
}
}
if (num_pages != 0) {
init_encoder_pages(chunks,
col_desc,
pages.data().get(),
(num_stats_bfr) ? page_stats.data().get() : nullptr,
(num_stats_bfr) ? frag_stats.data().get() : nullptr,
num_rowgroups,
num_columns,
num_pages,
num_stats_bfr,
state.stream);
}
auto host_bfr = [&]() {
// if the writer supports device_write(), we don't need this scratch space
if (out_sink_->supports_device_write()) {
return pinned_buffer<uint8_t>{nullptr, hipHostFree};
} else {
return pinned_buffer<uint8_t>{[](size_t size) {
uint8_t *ptr = nullptr;
CUDA_TRY(hipHostMalloc(&ptr, size));
return ptr;
}(max_chunk_bfr_size),
hipHostFree};
}
}();
// Encode row groups in batches
for (uint32_t b = 0, r = 0, global_r = global_rowgroup_base; b < (uint32_t)batch_list.size();
b++) {
// Count pages in this batch
uint32_t rnext = r + batch_list[b];
uint32_t first_page_in_batch = chunks[r * num_columns].first_page;
uint32_t first_page_in_next_batch =
(rnext < num_rowgroups) ? chunks[rnext * num_columns].first_page : num_pages;
uint32_t pages_in_batch = first_page_in_next_batch - first_page_in_batch;
encode_pages(
chunks,
pages.data().get(),
num_columns,
pages_in_batch,
first_page_in_batch,
batch_list[b],
r,
comp_in.data().get(),
comp_out.data().get(),
(stats_granularity_ == statistics_freq::STATISTICS_PAGE) ? page_stats.data().get() : nullptr,
(stats_granularity_ != statistics_freq::STATISTICS_NONE) ? page_stats.data().get() + num_pages
: nullptr,
state.stream);
for (; r < rnext; r++, global_r++) {
for (auto i = 0; i < num_columns; i++) {
gpu::EncColumnChunk *ck = &chunks[r * num_columns + i];
uint8_t *dev_bfr;
if (ck->is_compressed) {
state.md.row_groups[global_r].columns[i].meta_data.codec = compression_;
dev_bfr = ck->compressed_bfr;
} else {
dev_bfr = ck->uncompressed_bfr;
}
if (out_sink_->supports_device_write()) {
// let the writer do what it wants to retrieve the data from the gpu.
out_sink_->device_write(dev_bfr + ck->ck_stat_size, ck->compressed_size, state.stream);
// we still need to do a (much smaller) memcpy for the statistics.
if (ck->ck_stat_size != 0) {
state.md.row_groups[global_r].columns[i].meta_data.statistics_blob.resize(
ck->ck_stat_size);
CUDA_TRY(hipMemcpyAsync(
state.md.row_groups[global_r].columns[i].meta_data.statistics_blob.data(),
dev_bfr,
ck->ck_stat_size,
hipMemcpyDeviceToHost,
state.stream));
CUDA_TRY(hipStreamSynchronize(state.stream));
}
} else {
// copy the full data
CUDA_TRY(hipMemcpyAsync(host_bfr.get(),
dev_bfr,
ck->ck_stat_size + ck->compressed_size,
hipMemcpyDeviceToHost,
state.stream));
CUDA_TRY(hipStreamSynchronize(state.stream));
out_sink_->host_write(host_bfr.get() + ck->ck_stat_size, ck->compressed_size);
if (ck->ck_stat_size != 0) {
state.md.row_groups[global_r].columns[i].meta_data.statistics_blob.resize(
ck->ck_stat_size);
memcpy(state.md.row_groups[global_r].columns[i].meta_data.statistics_blob.data(),
host_bfr.get(),
ck->ck_stat_size);
}
}
state.md.row_groups[global_r].total_byte_size += ck->compressed_size;
state.md.row_groups[global_r].columns[i].meta_data.data_page_offset =
state.current_chunk_offset + ((ck->has_dictionary) ? ck->dictionary_size : 0);
state.md.row_groups[global_r].columns[i].meta_data.dictionary_page_offset =
(ck->has_dictionary) ? state.current_chunk_offset : 0;
state.md.row_groups[global_r].columns[i].meta_data.total_uncompressed_size = ck->bfr_size;
state.md.row_groups[global_r].columns[i].meta_data.total_compressed_size =
ck->compressed_size;
state.current_chunk_offset += ck->compressed_size;
}
}
}
}
std::unique_ptr<std::vector<uint8_t>> writer::impl::write_chunked_end(
pq_chunked_state &state, bool return_filemetadata, const std::string &metadata_out_file_path)
{
CompactProtocolWriter cpw(&buffer_);
file_ender_s fendr;
buffer_.resize(0);
fendr.footer_len = static_cast<uint32_t>(cpw.write(&state.md));
fendr.magic = PARQUET_MAGIC;
out_sink_->host_write(buffer_.data(), buffer_.size());
out_sink_->host_write(&fendr, sizeof(fendr));
out_sink_->flush();
// Optionally output raw file metadata with the specified column chunk file path
if (return_filemetadata) {
file_header_s fhdr = {PARQUET_MAGIC};
buffer_.resize(0);
buffer_.insert(buffer_.end(),
reinterpret_cast<const uint8_t *>(&fhdr),
reinterpret_cast<const uint8_t *>(&fhdr) + sizeof(fhdr));
for (auto &rowgroup : state.md.row_groups) {
for (auto &col : rowgroup.columns) { col.file_path = metadata_out_file_path; }
}
fendr.footer_len = static_cast<uint32_t>(cpw.write(&state.md));
buffer_.insert(buffer_.end(),
reinterpret_cast<const uint8_t *>(&fendr),
reinterpret_cast<const uint8_t *>(&fendr) + sizeof(fendr));
return std::make_unique<std::vector<uint8_t>>(std::move(buffer_));
} else {
return {nullptr};
}
}
// Forward to implementation
writer::writer(std::unique_ptr<data_sink> sink,
writer_options const &options,
rmm::mr::device_memory_resource *mr)
: _impl(std::make_unique<impl>(std::move(sink), options, mr))
{
}
// Destructor within this translation unit
writer::~writer() = default;
// Forward to implementation
std::unique_ptr<std::vector<uint8_t>> writer::write_all(table_view const &table,
const table_metadata *metadata,
bool return_filemetadata,
const std::string metadata_out_file_path,
hipStream_t stream)
{
return _impl->write(table, metadata, return_filemetadata, metadata_out_file_path, stream);
}
// Forward to implementation
void writer::write_chunked_begin(pq_chunked_state &state)
{
return _impl->write_chunked_begin(state);
}
// Forward to implementation
void writer::write_chunked(table_view const &table, pq_chunked_state &state)
{
_impl->write_chunked(table, state);
}
// Forward to implementation
void writer::write_chunked_end(pq_chunked_state &state) { _impl->write_chunked_end(state); }
std::unique_ptr<std::vector<uint8_t>> writer::merge_rowgroup_metadata(
const std::vector<std::unique_ptr<std::vector<uint8_t>>> &metadata_list)
{
std::vector<uint8_t> output;
CompactProtocolWriter cpw(&output);
FileMetaData md;
md.row_groups.reserve(metadata_list.size());
for (const auto &blob : metadata_list) {
CompactProtocolReader cpreader(
blob.get()->data(),
std::max<size_t>(blob.get()->size(), sizeof(file_ender_s)) - sizeof(file_ender_s));
cpreader.skip_bytes(sizeof(file_header_s)); // Skip over file header
if (md.num_rows == 0) {
cpreader.read(&md);
} else {
FileMetaData tmp;
cpreader.read(&tmp);
md.row_groups.insert(md.row_groups.end(),
std::make_move_iterator(tmp.row_groups.begin()),
std::make_move_iterator(tmp.row_groups.end()));
md.num_rows += tmp.num_rows;
}
}
// Reader doesn't currently populate column_order, so infer it here
if (md.row_groups.size() != 0) {
uint32_t num_columns = static_cast<uint32_t>(md.row_groups[0].columns.size());
md.column_order_listsize =
(num_columns > 0 && md.row_groups[0].columns[0].meta_data.statistics_blob.size())
? num_columns
: 0;
}
// Thrift-encode the resulting output
file_header_s fhdr;
file_ender_s fendr;
fhdr.magic = PARQUET_MAGIC;
output.insert(output.end(),
reinterpret_cast<const uint8_t *>(&fhdr),
reinterpret_cast<const uint8_t *>(&fhdr) + sizeof(fhdr));
fendr.footer_len = static_cast<uint32_t>(cpw.write(&md));
fendr.magic = PARQUET_MAGIC;
output.insert(output.end(),
reinterpret_cast<const uint8_t *>(&fendr),
reinterpret_cast<const uint8_t *>(&fendr) + sizeof(fendr));
return std::make_unique<std::vector<uint8_t>>(std::move(output));
}
} // namespace parquet
} // namespace detail
} // namespace io
} // namespace cudf
|
16b3228985977fdb8331a4052f784cedc4e04819.cu
|
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file writer_impl.cu
* @brief cuDF-IO parquet writer class implementation
*/
#include "writer_impl.hpp"
#include <cudf/null_mask.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <algorithm>
#include <cstring>
#include <utility>
#include <rmm/thrust_rmm_allocator.h>
#include <rmm/device_buffer.hpp>
namespace cudf {
namespace io {
namespace detail {
namespace parquet {
using namespace cudf::io::parquet;
using namespace cudf::io;
namespace {
/**
* @brief Helper for pinned host memory
**/
template <typename T>
using pinned_buffer = std::unique_ptr<T, decltype(&cudaFreeHost)>;
/**
* @brief Function that translates GDF compression to parquet compression
**/
parquet::Compression to_parquet_compression(compression_type compression)
{
switch (compression) {
case compression_type::AUTO:
case compression_type::SNAPPY: return parquet::Compression::SNAPPY;
case compression_type::NONE: return parquet::Compression::UNCOMPRESSED;
default:
CUDF_EXPECTS(false, "Unsupported compression type");
return parquet::Compression::UNCOMPRESSED;
}
}
} // namespace
/**
* @brief Helper kernel for converting string data/offsets into nvstrdesc
* REMOVEME: Once we eliminate the legacy readers/writers, the kernels could be
* made to use the native offset+data layout.
**/
__global__ void stringdata_to_nvstrdesc(gpu::nvstrdesc_s *dst,
const size_type *offsets,
const char *strdata,
const uint32_t *nulls,
size_type column_size)
{
size_type row = blockIdx.x * blockDim.x + threadIdx.x;
if (row < column_size) {
uint32_t is_valid = (nulls) ? (nulls[row >> 5] >> (row & 0x1f)) & 1 : 1;
size_t count;
const char *ptr;
if (is_valid) {
size_type cur = offsets[row];
size_type next = offsets[row + 1];
ptr = strdata + cur;
count = (next > cur) ? next - cur : 0;
} else {
ptr = nullptr;
count = 0;
}
dst[row].ptr = ptr;
dst[row].count = count;
}
}
/**
* @brief Helper class that adds parquet-specific column info
**/
class parquet_column_view {
public:
/**
* @brief Constructor that extracts out the string position + length pairs
* for building dictionaries for string columns
**/
explicit parquet_column_view(size_t id,
column_view const &col,
const table_metadata *metadata,
cudaStream_t stream)
: _id(id),
_string_type(col.type().id() == type_id::STRING),
_type_width(_string_type ? 0 : cudf::size_of(col.type())),
_converted_type(ConvertedType::UNKNOWN),
_ts_scale(0),
_data_count(col.size()),
_null_count(col.null_count()),
_data(col.head<uint8_t>() + col.offset() * _type_width),
_nulls(col.nullable() ? col.null_mask() : nullptr)
{
switch (col.type().id()) {
case cudf::type_id::INT8:
_physical_type = Type::INT32;
_converted_type = ConvertedType::INT_8;
_stats_dtype = statistics_dtype::dtype_int8;
break;
case cudf::type_id::INT16:
_physical_type = Type::INT32;
_converted_type = ConvertedType::INT_16;
_stats_dtype = statistics_dtype::dtype_int16;
break;
case cudf::type_id::INT32:
_physical_type = Type::INT32;
_stats_dtype = statistics_dtype::dtype_int32;
break;
case cudf::type_id::INT64:
_physical_type = Type::INT64;
_stats_dtype = statistics_dtype::dtype_int64;
break;
case cudf::type_id::FLOAT32:
_physical_type = Type::FLOAT;
_stats_dtype = statistics_dtype::dtype_float32;
break;
case cudf::type_id::FLOAT64:
_physical_type = Type::DOUBLE;
_stats_dtype = statistics_dtype::dtype_float64;
break;
case cudf::type_id::BOOL8:
_physical_type = Type::BOOLEAN;
_stats_dtype = statistics_dtype::dtype_bool;
break;
case cudf::type_id::TIMESTAMP_DAYS:
_physical_type = Type::INT32;
_converted_type = ConvertedType::DATE;
_stats_dtype = statistics_dtype::dtype_int32;
break;
case cudf::type_id::TIMESTAMP_SECONDS:
_physical_type = Type::INT64;
_converted_type = ConvertedType::TIMESTAMP_MILLIS;
_stats_dtype = statistics_dtype::dtype_timestamp64;
_ts_scale = 1000;
break;
case cudf::type_id::TIMESTAMP_MILLISECONDS:
_physical_type = Type::INT64;
_converted_type = ConvertedType::TIMESTAMP_MILLIS;
_stats_dtype = statistics_dtype::dtype_timestamp64;
break;
case cudf::type_id::TIMESTAMP_MICROSECONDS:
_physical_type = Type::INT64;
_converted_type = ConvertedType::TIMESTAMP_MICROS;
_stats_dtype = statistics_dtype::dtype_timestamp64;
break;
case cudf::type_id::TIMESTAMP_NANOSECONDS:
_physical_type = Type::INT64;
_converted_type = ConvertedType::TIMESTAMP_MICROS;
_stats_dtype = statistics_dtype::dtype_timestamp64;
_ts_scale = -1000;
break;
case cudf::type_id::STRING:
_physical_type = Type::BYTE_ARRAY;
//_converted_type = ConvertedType::UTF8; // TBD
_stats_dtype = statistics_dtype::dtype_string;
break;
default:
_physical_type = UNDEFINED_TYPE;
_stats_dtype = dtype_none;
break;
}
if (_string_type && _data_count > 0) {
strings_column_view view{col};
_indexes = rmm::device_buffer(_data_count * sizeof(gpu::nvstrdesc_s), stream);
stringdata_to_nvstrdesc<<<((_data_count - 1) >> 8) + 1, 256, 0, stream>>>(
reinterpret_cast<gpu::nvstrdesc_s *>(_indexes.data()),
view.offsets().data<size_type>(),
view.chars().data<char>(),
_nulls,
_data_count);
_data = _indexes.data();
CUDA_TRY(cudaStreamSynchronize(stream));
}
// Generating default name if name isn't present in metadata
if (metadata && _id < metadata->column_names.size()) {
_name = metadata->column_names[_id];
} else {
_name = "_col" + std::to_string(_id);
}
}
auto is_string() const noexcept { return _string_type; }
size_t type_width() const noexcept { return _type_width; }
size_t data_count() const noexcept { return _data_count; }
size_t null_count() const noexcept { return _null_count; }
bool nullable() const noexcept { return (_nulls != nullptr); }
void const *data() const noexcept { return _data; }
uint32_t const *nulls() const noexcept { return _nulls; }
auto name() const noexcept { return _name; }
auto physical_type() const noexcept { return _physical_type; }
auto converted_type() const noexcept { return _converted_type; }
auto stats_type() const noexcept { return _stats_dtype; }
int32_t ts_scale() const noexcept { return _ts_scale; }
// Dictionary management
uint32_t *get_dict_data() { return (_dict_data.size()) ? _dict_data.data().get() : nullptr; }
uint32_t *get_dict_index() { return (_dict_index.size()) ? _dict_index.data().get() : nullptr; }
void use_dictionary(bool use_dict) { _dictionary_used = use_dict; }
void alloc_dictionary(size_t max_num_rows)
{
_dict_data.resize(max_num_rows);
_dict_index.resize(max_num_rows);
}
bool check_dictionary_used()
{
if (!_dictionary_used) {
_dict_data.resize(0);
_dict_data.shrink_to_fit();
_dict_index.resize(0);
_dict_index.shrink_to_fit();
}
return _dictionary_used;
}
private:
// Identifier within set of columns
size_t _id = 0;
bool _string_type = false;
size_t _type_width = 0;
size_t _data_count = 0;
size_t _null_count = 0;
void const *_data = nullptr;
uint32_t const *_nulls = nullptr;
// parquet-related members
std::string _name{};
Type _physical_type;
ConvertedType _converted_type;
statistics_dtype _stats_dtype;
int32_t _ts_scale;
// Dictionary-related members
bool _dictionary_used = false;
rmm::device_vector<uint32_t> _dict_data;
rmm::device_vector<uint32_t> _dict_index;
// String-related members
rmm::device_buffer _indexes;
};
void writer::impl::init_page_fragments(hostdevice_vector<gpu::PageFragment> &frag,
hostdevice_vector<gpu::EncColumnDesc> &col_desc,
uint32_t num_columns,
uint32_t num_fragments,
uint32_t num_rows,
uint32_t fragment_size,
cudaStream_t stream)
{
CUDA_TRY(cudaMemcpyAsync(col_desc.device_ptr(),
col_desc.host_ptr(),
col_desc.memory_size(),
cudaMemcpyHostToDevice,
stream));
CUDA_TRY(gpu::InitPageFragments(frag.device_ptr(),
col_desc.device_ptr(),
num_fragments,
num_columns,
fragment_size,
num_rows,
stream));
CUDA_TRY(cudaMemcpyAsync(
frag.host_ptr(), frag.device_ptr(), frag.memory_size(), cudaMemcpyDeviceToHost, stream));
CUDA_TRY(cudaStreamSynchronize(stream));
}
void writer::impl::gather_fragment_statistics(statistics_chunk *frag_stats_chunk,
hostdevice_vector<gpu::PageFragment> &frag,
hostdevice_vector<gpu::EncColumnDesc> &col_desc,
uint32_t num_columns,
uint32_t num_fragments,
uint32_t fragment_size,
cudaStream_t stream)
{
rmm::device_vector<statistics_group> frag_stats_group(num_fragments * num_columns);
CUDA_TRY(gpu::InitFragmentStatistics(frag_stats_group.data().get(),
frag.device_ptr(),
col_desc.device_ptr(),
num_fragments,
num_columns,
fragment_size,
stream));
CUDA_TRY(GatherColumnStatistics(
frag_stats_chunk, frag_stats_group.data().get(), num_fragments * num_columns, stream));
CUDA_TRY(cudaStreamSynchronize(stream));
}
void writer::impl::build_chunk_dictionaries(hostdevice_vector<gpu::EncColumnChunk> &chunks,
hostdevice_vector<gpu::EncColumnDesc> &col_desc,
uint32_t num_rowgroups,
uint32_t num_columns,
uint32_t num_dictionaries,
cudaStream_t stream)
{
size_t dict_scratch_size = (size_t)num_dictionaries * gpu::kDictScratchSize;
rmm::device_vector<uint32_t> dict_scratch(dict_scratch_size / sizeof(uint32_t));
CUDA_TRY(cudaMemcpyAsync(
chunks.device_ptr(), chunks.host_ptr(), chunks.memory_size(), cudaMemcpyHostToDevice, stream));
CUDA_TRY(gpu::BuildChunkDictionaries(chunks.device_ptr(),
dict_scratch.data().get(),
dict_scratch_size,
num_rowgroups * num_columns,
stream));
CUDA_TRY(gpu::InitEncoderPages(chunks.device_ptr(),
nullptr,
col_desc.device_ptr(),
num_rowgroups,
num_columns,
nullptr,
nullptr,
stream));
CUDA_TRY(cudaMemcpyAsync(
chunks.host_ptr(), chunks.device_ptr(), chunks.memory_size(), cudaMemcpyDeviceToHost, stream));
CUDA_TRY(cudaStreamSynchronize(stream));
}
void writer::impl::init_encoder_pages(hostdevice_vector<gpu::EncColumnChunk> &chunks,
hostdevice_vector<gpu::EncColumnDesc> &col_desc,
gpu::EncPage *pages,
statistics_chunk *page_stats,
statistics_chunk *frag_stats,
uint32_t num_rowgroups,
uint32_t num_columns,
uint32_t num_pages,
uint32_t num_stats_bfr,
cudaStream_t stream)
{
rmm::device_vector<statistics_merge_group> page_stats_mrg(num_stats_bfr);
CUDA_TRY(cudaMemcpyAsync(
chunks.device_ptr(), chunks.host_ptr(), chunks.memory_size(), cudaMemcpyHostToDevice, stream));
CUDA_TRY(InitEncoderPages(
chunks.device_ptr(),
pages,
col_desc.device_ptr(),
num_rowgroups,
num_columns,
(num_stats_bfr) ? page_stats_mrg.data().get() : nullptr,
(num_stats_bfr > num_pages) ? page_stats_mrg.data().get() + num_pages : nullptr,
stream));
if (num_stats_bfr > 0) {
CUDA_TRY(MergeColumnStatistics(
page_stats, frag_stats, page_stats_mrg.data().get(), num_pages, stream));
if (num_stats_bfr > num_pages) {
CUDA_TRY(MergeColumnStatistics(page_stats + num_pages,
page_stats,
page_stats_mrg.data().get() + num_pages,
num_stats_bfr - num_pages,
stream));
}
}
CUDA_TRY(cudaStreamSynchronize(stream));
}
void writer::impl::encode_pages(hostdevice_vector<gpu::EncColumnChunk> &chunks,
gpu::EncPage *pages,
uint32_t num_columns,
uint32_t pages_in_batch,
uint32_t first_page_in_batch,
uint32_t rowgroups_in_batch,
uint32_t first_rowgroup,
gpu_inflate_input_s *comp_in,
gpu_inflate_status_s *comp_out,
const statistics_chunk *page_stats,
const statistics_chunk *chunk_stats,
cudaStream_t stream)
{
CUDA_TRY(gpu::EncodePages(
pages, chunks.device_ptr(), pages_in_batch, first_page_in_batch, comp_in, comp_out, stream));
switch (compression_) {
case parquet::Compression::SNAPPY:
CUDA_TRY(gpu_snap(comp_in, comp_out, pages_in_batch, stream));
break;
default: break;
}
// TBD: Not clear if the official spec actually allows dynamically turning off compression at the
// chunk-level
CUDA_TRY(DecideCompression(chunks.device_ptr() + first_rowgroup * num_columns,
pages,
rowgroups_in_batch * num_columns,
first_page_in_batch,
comp_out,
stream));
CUDA_TRY(EncodePageHeaders(pages,
chunks.device_ptr(),
pages_in_batch,
first_page_in_batch,
comp_out,
page_stats,
chunk_stats,
stream));
CUDA_TRY(GatherPages(chunks.device_ptr() + first_rowgroup * num_columns,
pages,
rowgroups_in_batch * num_columns,
stream));
CUDA_TRY(cudaMemcpyAsync(&chunks[first_rowgroup * num_columns],
chunks.device_ptr() + first_rowgroup * num_columns,
rowgroups_in_batch * num_columns * sizeof(gpu::EncColumnChunk),
cudaMemcpyDeviceToHost,
stream));
CUDA_TRY(cudaStreamSynchronize(stream));
}
writer::impl::impl(std::unique_ptr<data_sink> sink,
writer_options const &options,
rmm::mr::device_memory_resource *mr)
: _mr(mr),
compression_(to_parquet_compression(options.compression)),
stats_granularity_(options.stats_granularity),
out_sink_(std::move(sink))
{
}
std::unique_ptr<std::vector<uint8_t>> writer::impl::write(table_view const &table,
const table_metadata *metadata,
bool return_filemetadata,
const std::string &metadata_out_file_path,
cudaStream_t stream)
{
pq_chunked_state state{metadata, SingleWriteMode::YES, stream};
write_chunked_begin(state);
write_chunked(table, state);
return write_chunked_end(state, return_filemetadata, metadata_out_file_path);
}
void writer::impl::write_chunked_begin(pq_chunked_state &state)
{
// Write file header
file_header_s fhdr;
fhdr.magic = PARQUET_MAGIC;
out_sink_->host_write(&fhdr, sizeof(fhdr));
state.current_chunk_offset = sizeof(file_header_s);
}
void writer::impl::write_chunked(table_view const &table, pq_chunked_state &state)
{
size_type num_columns = table.num_columns();
size_type num_rows = 0;
// Wrapper around cudf columns to attach parquet-specific type info.
// Note : I wish we could do this in the begin() function but since the
// metadata is optional we would have no way of knowing how many columns
// we actually have.
std::vector<parquet_column_view> parquet_columns;
parquet_columns.reserve(num_columns); // Avoids unnecessary re-allocation
for (auto it = table.begin(); it < table.end(); ++it) {
const auto col = *it;
const auto current_id = parquet_columns.size();
num_rows = std::max<uint32_t>(num_rows, col.size());
parquet_columns.emplace_back(current_id, col, state.user_metadata, state.stream);
}
if (state.user_metadata_with_nullability.column_nullable.size() > 0) {
CUDF_EXPECTS(state.user_metadata_with_nullability.column_nullable.size() ==
static_cast<size_t>(num_columns),
"When passing values in user_metadata_with_nullability, data for all columns must "
"be specified");
}
// first call. setup metadata. num_rows will get incremented as write_chunked is
// called multiple times.
if (state.md.version == 0) {
state.md.version = 1;
state.md.num_rows = num_rows;
state.md.schema.resize(1 + num_columns);
state.md.schema[0].type = UNDEFINED_TYPE;
state.md.schema[0].repetition_type = NO_REPETITION_TYPE;
state.md.schema[0].name = "schema";
state.md.schema[0].num_children = num_columns;
state.md.column_order_listsize =
(stats_granularity_ != statistics_freq::STATISTICS_NONE) ? num_columns : 0;
if (state.user_metadata != nullptr) {
for (auto it = state.user_metadata->user_data.begin();
it != state.user_metadata->user_data.end();
it++) {
state.md.key_value_metadata.push_back({it->first, it->second});
}
}
for (auto i = 0; i < num_columns; i++) {
auto &col = parquet_columns[i];
// Column metadata
state.md.schema[1 + i].type = col.physical_type();
state.md.schema[1 + i].converted_type = col.converted_type();
// because the repetition type is global (in the sense of, not per-rowgroup or per
// write_chunked() call) we cannot know up front if the user is going to end up passing tables
// with nulls/no nulls in the multiple write_chunked() case. so we'll do some special
// handling.
//
// if the user is explicitly saying "I am only calling this once", fall back to the original
// behavior and assume the columns in this one table tell us everything we need to know.
if (state.single_write_mode) {
state.md.schema[1 + i].repetition_type =
(col.nullable() || col.data_count() < (size_t)num_rows) ? OPTIONAL : REQUIRED;
}
// otherwise, if the user is explicitly telling us global information about all the tables
// that will ever get passed in
else if (state.user_metadata_with_nullability.column_nullable.size() > 0) {
state.md.schema[1 + i].repetition_type =
state.user_metadata_with_nullability.column_nullable[i] ? OPTIONAL : REQUIRED;
}
// otherwise assume the worst case.
else {
state.md.schema[1 + i].repetition_type = OPTIONAL;
}
state.md.schema[1 + i].name = col.name();
state.md.schema[1 + i].num_children = 0; // Leaf node
}
} else {
// verify the user isn't passing mismatched tables
CUDF_EXPECTS(state.md.schema[0].num_children == num_columns,
"Mismatch in table structure between multiple calls to write_chunked");
for (auto i = 0; i < num_columns; i++) {
auto &col = parquet_columns[i];
CUDF_EXPECTS(state.md.schema[1 + i].type == col.physical_type(),
"Mismatch in column types between multiple calls to write_chunked");
}
// increment num rows
state.md.num_rows += num_rows;
}
// Initialize column description
hostdevice_vector<gpu::EncColumnDesc> col_desc(num_columns);
// setup gpu column description.
// applicable to only this _write_chunked() call
for (auto i = 0; i < num_columns; i++) {
auto &col = parquet_columns[i];
// GPU column description
auto *desc = &col_desc[i];
desc->column_data_base = col.data();
desc->valid_map_base = col.nulls();
desc->stats_dtype = col.stats_type();
desc->ts_scale = col.ts_scale();
if (state.md.schema[1 + i].type != BOOLEAN && state.md.schema[1 + i].type != UNDEFINED_TYPE) {
col.alloc_dictionary(num_rows);
desc->dict_index = col.get_dict_index();
desc->dict_data = col.get_dict_data();
} else {
desc->dict_data = nullptr;
desc->dict_index = nullptr;
}
desc->num_rows = col.data_count();
desc->physical_type = static_cast<uint8_t>(state.md.schema[1 + i].type);
desc->converted_type = static_cast<uint8_t>(state.md.schema[1 + i].converted_type);
desc->level_bits = (state.md.schema[1 + i].repetition_type == OPTIONAL) ? 1 : 0;
}
// Init page fragments
// 5000 is good enough for up to ~200-character strings. Longer strings will start producing
// fragments larger than the desired page size -> TODO: keep track of the max fragment size, and
// iteratively reduce this value if the largest fragment exceeds the max page size limit (we
// ideally want the page size to be below 1MB so as to have enough pages to get good
// compression/decompression performance).
uint32_t fragment_size = 5000;
uint32_t num_fragments = (uint32_t)((num_rows + fragment_size - 1) / fragment_size);
hostdevice_vector<gpu::PageFragment> fragments(num_columns * num_fragments);
if (fragments.size() != 0) {
init_page_fragments(
fragments, col_desc, num_columns, num_fragments, num_rows, fragment_size, state.stream);
}
size_t global_rowgroup_base = state.md.row_groups.size();
// Decide row group boundaries based on uncompressed data size
size_t rowgroup_size = 0;
uint32_t num_rowgroups = 0;
for (uint32_t f = 0, global_r = global_rowgroup_base, rowgroup_start = 0; f < num_fragments;
f++) {
size_t fragment_data_size = 0;
for (auto i = 0; i < num_columns; i++) {
fragment_data_size += fragments[i * num_fragments + f].fragment_data_size;
}
if (f > rowgroup_start && (rowgroup_size + fragment_data_size > max_rowgroup_size_ ||
(f + 1 - rowgroup_start) * fragment_size > max_rowgroup_rows_)) {
// update schema
state.md.row_groups.resize(state.md.row_groups.size() + 1);
state.md.row_groups[global_r++].num_rows = (f - rowgroup_start) * fragment_size;
num_rowgroups++;
rowgroup_start = f;
rowgroup_size = 0;
}
rowgroup_size += fragment_data_size;
if (f + 1 == num_fragments) {
// update schema
state.md.row_groups.resize(state.md.row_groups.size() + 1);
state.md.row_groups[global_r++].num_rows = num_rows - rowgroup_start * fragment_size;
num_rowgroups++;
}
}
// Allocate column chunks and gather fragment statistics
rmm::device_vector<statistics_chunk> frag_stats;
if (stats_granularity_ != statistics_freq::STATISTICS_NONE) {
frag_stats.resize(num_fragments * num_columns);
if (frag_stats.size() != 0) {
gather_fragment_statistics(frag_stats.data().get(),
fragments,
col_desc,
num_columns,
num_fragments,
fragment_size,
state.stream);
}
}
// Initialize row groups and column chunks
uint32_t num_chunks = num_rowgroups * num_columns;
hostdevice_vector<gpu::EncColumnChunk> chunks(num_chunks);
uint32_t num_dictionaries = 0;
for (uint32_t r = 0, global_r = global_rowgroup_base, f = 0, start_row = 0; r < num_rowgroups;
r++, global_r++) {
uint32_t fragments_in_chunk =
(uint32_t)((state.md.row_groups[global_r].num_rows + fragment_size - 1) / fragment_size);
state.md.row_groups[global_r].total_byte_size = 0;
state.md.row_groups[global_r].columns.resize(num_columns);
for (int i = 0; i < num_columns; i++) {
gpu::EncColumnChunk *ck = &chunks[r * num_columns + i];
bool dict_enable = false;
ck->col_desc = col_desc.device_ptr() + i;
ck->uncompressed_bfr = nullptr;
ck->compressed_bfr = nullptr;
ck->bfr_size = 0;
ck->compressed_size = 0;
ck->fragments = fragments.device_ptr() + i * num_fragments + f;
ck->stats =
(frag_stats.size() != 0) ? frag_stats.data().get() + i * num_fragments + f : nullptr;
ck->start_row = start_row;
ck->num_rows = (uint32_t)state.md.row_groups[global_r].num_rows;
ck->first_fragment = i * num_fragments + f;
ck->first_page = 0;
ck->num_pages = 0;
ck->is_compressed = 0;
ck->dictionary_id = num_dictionaries;
ck->ck_stat_size = 0;
if (col_desc[i].dict_data) {
const gpu::PageFragment *ck_frag = &fragments[i * num_fragments + f];
size_t plain_size = 0;
size_t dict_size = 1;
uint32_t num_dict_vals = 0;
for (uint32_t j = 0; j < fragments_in_chunk && num_dict_vals < 65536; j++) {
plain_size += ck_frag[j].fragment_data_size;
dict_size +=
ck_frag[j].dict_data_size + ((num_dict_vals > 256) ? 2 : 1) * ck_frag[j].non_nulls;
num_dict_vals += ck_frag[j].num_dict_vals;
}
if (dict_size < plain_size) {
parquet_columns[i].use_dictionary(true);
dict_enable = true;
num_dictionaries++;
}
}
ck->has_dictionary = dict_enable;
state.md.row_groups[global_r].columns[i].meta_data.type = state.md.schema[1 + i].type;
state.md.row_groups[global_r].columns[i].meta_data.encodings = {PLAIN, RLE};
if (dict_enable) {
state.md.row_groups[global_r].columns[i].meta_data.encodings.push_back(PLAIN_DICTIONARY);
}
state.md.row_groups[global_r].columns[i].meta_data.path_in_schema = {
state.md.schema[1 + i].name};
state.md.row_groups[global_r].columns[i].meta_data.codec = UNCOMPRESSED;
state.md.row_groups[global_r].columns[i].meta_data.num_values =
state.md.row_groups[global_r].num_rows;
}
f += fragments_in_chunk;
start_row += (uint32_t)state.md.row_groups[global_r].num_rows;
}
// Free unused dictionaries
for (auto &col : parquet_columns) { col.check_dictionary_used(); }
// Build chunk dictionaries and count pages
if (num_chunks != 0) {
build_chunk_dictionaries(
chunks, col_desc, num_rowgroups, num_columns, num_dictionaries, state.stream);
}
// Initialize batches of rowgroups to encode (mainly to limit peak memory usage)
std::vector<uint32_t> batch_list;
uint32_t num_pages = 0;
size_t max_bytes_in_batch = 1024 * 1024 * 1024; // 1GB - TBD: Tune this
size_t max_uncomp_bfr_size = 0;
size_t max_chunk_bfr_size = 0;
uint32_t max_pages_in_batch = 0;
size_t bytes_in_batch = 0;
for (uint32_t r = 0, groups_in_batch = 0, pages_in_batch = 0; r <= num_rowgroups; r++) {
size_t rowgroup_size = 0;
if (r < num_rowgroups) {
for (int i = 0; i < num_columns; i++) {
gpu::EncColumnChunk *ck = &chunks[r * num_columns + i];
ck->first_page = num_pages;
num_pages += ck->num_pages;
pages_in_batch += ck->num_pages;
rowgroup_size += ck->bfr_size;
max_chunk_bfr_size =
std::max(max_chunk_bfr_size, (size_t)std::max(ck->bfr_size, ck->compressed_size));
}
}
// TBD: We may want to also shorten the batch if we have enough pages (not just based on size)
if ((r == num_rowgroups) ||
(groups_in_batch != 0 && bytes_in_batch + rowgroup_size > max_bytes_in_batch)) {
max_uncomp_bfr_size = std::max(max_uncomp_bfr_size, bytes_in_batch);
max_pages_in_batch = std::max(max_pages_in_batch, pages_in_batch);
if (groups_in_batch != 0) {
batch_list.push_back(groups_in_batch);
groups_in_batch = 0;
}
bytes_in_batch = 0;
pages_in_batch = 0;
}
bytes_in_batch += rowgroup_size;
groups_in_batch++;
}
// Initialize data pointers in batch
size_t max_comp_bfr_size =
(compression_ != parquet::Compression::UNCOMPRESSED)
? gpu::GetMaxCompressedBfrSize(max_uncomp_bfr_size, max_pages_in_batch)
: 0;
uint32_t max_comp_pages =
(compression_ != parquet::Compression::UNCOMPRESSED) ? max_pages_in_batch : 0;
uint32_t num_stats_bfr =
(stats_granularity_ != statistics_freq::STATISTICS_NONE) ? num_pages + num_chunks : 0;
rmm::device_buffer uncomp_bfr(max_uncomp_bfr_size, state.stream);
rmm::device_buffer comp_bfr(max_comp_bfr_size, state.stream);
rmm::device_vector<gpu_inflate_input_s> comp_in(max_comp_pages);
rmm::device_vector<gpu_inflate_status_s> comp_out(max_comp_pages);
rmm::device_vector<gpu::EncPage> pages(num_pages);
rmm::device_vector<statistics_chunk> page_stats(num_stats_bfr);
for (uint32_t b = 0, r = 0; b < (uint32_t)batch_list.size(); b++) {
uint8_t *bfr = reinterpret_cast<uint8_t *>(uncomp_bfr.data());
uint8_t *bfr_c = reinterpret_cast<uint8_t *>(comp_bfr.data());
for (uint32_t j = 0; j < batch_list[b]; j++, r++) {
for (int i = 0; i < num_columns; i++) {
gpu::EncColumnChunk *ck = &chunks[r * num_columns + i];
ck->uncompressed_bfr = bfr;
ck->compressed_bfr = bfr_c;
bfr += ck->bfr_size;
bfr_c += ck->compressed_size;
}
}
}
if (num_pages != 0) {
init_encoder_pages(chunks,
col_desc,
pages.data().get(),
(num_stats_bfr) ? page_stats.data().get() : nullptr,
(num_stats_bfr) ? frag_stats.data().get() : nullptr,
num_rowgroups,
num_columns,
num_pages,
num_stats_bfr,
state.stream);
}
auto host_bfr = [&]() {
// if the writer supports device_write(), we don't need this scratch space
if (out_sink_->supports_device_write()) {
return pinned_buffer<uint8_t>{nullptr, cudaFreeHost};
} else {
return pinned_buffer<uint8_t>{[](size_t size) {
uint8_t *ptr = nullptr;
CUDA_TRY(cudaMallocHost(&ptr, size));
return ptr;
}(max_chunk_bfr_size),
cudaFreeHost};
}
}();
// Encode row groups in batches
for (uint32_t b = 0, r = 0, global_r = global_rowgroup_base; b < (uint32_t)batch_list.size();
b++) {
// Count pages in this batch
uint32_t rnext = r + batch_list[b];
uint32_t first_page_in_batch = chunks[r * num_columns].first_page;
uint32_t first_page_in_next_batch =
(rnext < num_rowgroups) ? chunks[rnext * num_columns].first_page : num_pages;
uint32_t pages_in_batch = first_page_in_next_batch - first_page_in_batch;
encode_pages(
chunks,
pages.data().get(),
num_columns,
pages_in_batch,
first_page_in_batch,
batch_list[b],
r,
comp_in.data().get(),
comp_out.data().get(),
(stats_granularity_ == statistics_freq::STATISTICS_PAGE) ? page_stats.data().get() : nullptr,
(stats_granularity_ != statistics_freq::STATISTICS_NONE) ? page_stats.data().get() + num_pages
: nullptr,
state.stream);
for (; r < rnext; r++, global_r++) {
for (auto i = 0; i < num_columns; i++) {
gpu::EncColumnChunk *ck = &chunks[r * num_columns + i];
uint8_t *dev_bfr;
if (ck->is_compressed) {
state.md.row_groups[global_r].columns[i].meta_data.codec = compression_;
dev_bfr = ck->compressed_bfr;
} else {
dev_bfr = ck->uncompressed_bfr;
}
if (out_sink_->supports_device_write()) {
// let the writer do what it wants to retrieve the data from the gpu.
out_sink_->device_write(dev_bfr + ck->ck_stat_size, ck->compressed_size, state.stream);
// we still need to do a (much smaller) memcpy for the statistics.
if (ck->ck_stat_size != 0) {
state.md.row_groups[global_r].columns[i].meta_data.statistics_blob.resize(
ck->ck_stat_size);
CUDA_TRY(cudaMemcpyAsync(
state.md.row_groups[global_r].columns[i].meta_data.statistics_blob.data(),
dev_bfr,
ck->ck_stat_size,
cudaMemcpyDeviceToHost,
state.stream));
CUDA_TRY(cudaStreamSynchronize(state.stream));
}
} else {
// copy the full data
CUDA_TRY(cudaMemcpyAsync(host_bfr.get(),
dev_bfr,
ck->ck_stat_size + ck->compressed_size,
cudaMemcpyDeviceToHost,
state.stream));
CUDA_TRY(cudaStreamSynchronize(state.stream));
out_sink_->host_write(host_bfr.get() + ck->ck_stat_size, ck->compressed_size);
if (ck->ck_stat_size != 0) {
state.md.row_groups[global_r].columns[i].meta_data.statistics_blob.resize(
ck->ck_stat_size);
memcpy(state.md.row_groups[global_r].columns[i].meta_data.statistics_blob.data(),
host_bfr.get(),
ck->ck_stat_size);
}
}
state.md.row_groups[global_r].total_byte_size += ck->compressed_size;
state.md.row_groups[global_r].columns[i].meta_data.data_page_offset =
state.current_chunk_offset + ((ck->has_dictionary) ? ck->dictionary_size : 0);
state.md.row_groups[global_r].columns[i].meta_data.dictionary_page_offset =
(ck->has_dictionary) ? state.current_chunk_offset : 0;
state.md.row_groups[global_r].columns[i].meta_data.total_uncompressed_size = ck->bfr_size;
state.md.row_groups[global_r].columns[i].meta_data.total_compressed_size =
ck->compressed_size;
state.current_chunk_offset += ck->compressed_size;
}
}
}
}
std::unique_ptr<std::vector<uint8_t>> writer::impl::write_chunked_end(
pq_chunked_state &state, bool return_filemetadata, const std::string &metadata_out_file_path)
{
CompactProtocolWriter cpw(&buffer_);
file_ender_s fendr;
buffer_.resize(0);
fendr.footer_len = static_cast<uint32_t>(cpw.write(&state.md));
fendr.magic = PARQUET_MAGIC;
out_sink_->host_write(buffer_.data(), buffer_.size());
out_sink_->host_write(&fendr, sizeof(fendr));
out_sink_->flush();
// Optionally output raw file metadata with the specified column chunk file path
if (return_filemetadata) {
file_header_s fhdr = {PARQUET_MAGIC};
buffer_.resize(0);
buffer_.insert(buffer_.end(),
reinterpret_cast<const uint8_t *>(&fhdr),
reinterpret_cast<const uint8_t *>(&fhdr) + sizeof(fhdr));
for (auto &rowgroup : state.md.row_groups) {
for (auto &col : rowgroup.columns) { col.file_path = metadata_out_file_path; }
}
fendr.footer_len = static_cast<uint32_t>(cpw.write(&state.md));
buffer_.insert(buffer_.end(),
reinterpret_cast<const uint8_t *>(&fendr),
reinterpret_cast<const uint8_t *>(&fendr) + sizeof(fendr));
return std::make_unique<std::vector<uint8_t>>(std::move(buffer_));
} else {
return {nullptr};
}
}
// Forward to implementation
writer::writer(std::unique_ptr<data_sink> sink,
writer_options const &options,
rmm::mr::device_memory_resource *mr)
: _impl(std::make_unique<impl>(std::move(sink), options, mr))
{
}
// Destructor within this translation unit
writer::~writer() = default;
// Forward to implementation
std::unique_ptr<std::vector<uint8_t>> writer::write_all(table_view const &table,
const table_metadata *metadata,
bool return_filemetadata,
const std::string metadata_out_file_path,
cudaStream_t stream)
{
return _impl->write(table, metadata, return_filemetadata, metadata_out_file_path, stream);
}
// Forward to implementation
void writer::write_chunked_begin(pq_chunked_state &state)
{
return _impl->write_chunked_begin(state);
}
// Forward to implementation
void writer::write_chunked(table_view const &table, pq_chunked_state &state)
{
_impl->write_chunked(table, state);
}
// Forward to implementation
void writer::write_chunked_end(pq_chunked_state &state) { _impl->write_chunked_end(state); }
std::unique_ptr<std::vector<uint8_t>> writer::merge_rowgroup_metadata(
const std::vector<std::unique_ptr<std::vector<uint8_t>>> &metadata_list)
{
std::vector<uint8_t> output;
CompactProtocolWriter cpw(&output);
FileMetaData md;
md.row_groups.reserve(metadata_list.size());
for (const auto &blob : metadata_list) {
CompactProtocolReader cpreader(
blob.get()->data(),
std::max<size_t>(blob.get()->size(), sizeof(file_ender_s)) - sizeof(file_ender_s));
cpreader.skip_bytes(sizeof(file_header_s)); // Skip over file header
if (md.num_rows == 0) {
cpreader.read(&md);
} else {
FileMetaData tmp;
cpreader.read(&tmp);
md.row_groups.insert(md.row_groups.end(),
std::make_move_iterator(tmp.row_groups.begin()),
std::make_move_iterator(tmp.row_groups.end()));
md.num_rows += tmp.num_rows;
}
}
// Reader doesn't currently populate column_order, so infer it here
if (md.row_groups.size() != 0) {
uint32_t num_columns = static_cast<uint32_t>(md.row_groups[0].columns.size());
md.column_order_listsize =
(num_columns > 0 && md.row_groups[0].columns[0].meta_data.statistics_blob.size())
? num_columns
: 0;
}
// Thrift-encode the resulting output
file_header_s fhdr;
file_ender_s fendr;
fhdr.magic = PARQUET_MAGIC;
output.insert(output.end(),
reinterpret_cast<const uint8_t *>(&fhdr),
reinterpret_cast<const uint8_t *>(&fhdr) + sizeof(fhdr));
fendr.footer_len = static_cast<uint32_t>(cpw.write(&md));
fendr.magic = PARQUET_MAGIC;
output.insert(output.end(),
reinterpret_cast<const uint8_t *>(&fendr),
reinterpret_cast<const uint8_t *>(&fendr) + sizeof(fendr));
return std::make_unique<std::vector<uint8_t>>(std::move(output));
}
} // namespace parquet
} // namespace detail
} // namespace io
} // namespace cudf
|
d044263f5393322710ce379723a30113018384ac.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "matgpuadd.h"
__global__ void matgpuadd(int *a, int *b, int *c, int N) {
// int columns = blockIdx.x * blockDim.x + threadIdx.x;
// int rows = blockIdx.y * blockDim.y + threadIdx.y;
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < N;
i += blockDim.x * gridDim.x
){
for(int j = blockIdx.y*blockDim.y + threadIdx.y;
j < N;
j += blockDim.y * gridDim.y
){
c[i*N + j] = a[i*N + j] + b[i*N + j];
}
}
}
|
d044263f5393322710ce379723a30113018384ac.cu
|
#include "matgpuadd.h"
__global__ void matgpuadd(int *a, int *b, int *c, int N) {
// int columns = blockIdx.x * blockDim.x + threadIdx.x;
// int rows = blockIdx.y * blockDim.y + threadIdx.y;
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < N;
i += blockDim.x * gridDim.x
){
for(int j = blockIdx.y*blockDim.y + threadIdx.y;
j < N;
j += blockDim.y * gridDim.y
){
c[i*N + j] = a[i*N + j] + b[i*N + j];
}
}
}
|
d34088dd7a3837c42f443cf47cda8b2d21b9eead.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "fluids/marching_cubes.hpp"
#include "fluids/grid.hpp"
namespace Fluids {
__global__ void clearVoxelData(MarchingCubes::device_data& data) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i < data.point_volume / 32 + 1 )
MarchingCubes::clearVoxelData(data, i);
}
__global__ void computeVoxelData(grid::device_data& g, MarchingCubes::device_data& data) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i < data.point_volume )
MarchingCubes::computeVoxelData(g, data, i);
}
__global__ void computeSurfaceNodes(grid::device_data& g, MarchingCubes::device_data& data) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i < data.volume )
MarchingCubes::computeSurfaceNodes(g, data, i);
}
__global__ void computeIsoSurface(MarchingCubes::device_data& data) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i < 2 * data.volume + 2 )
MarchingCubes::computeIsoSurface(data, i);
}
void runMarchingCubes(grid::device_data& g, MarchingCubes& mc) {
float3* v = (float3*)mc.bindVertices();
float3* n = (float3*)mc.bindNormals();
int* indices = mc.bindIndices();
checkCUDAReturn( hipDeviceSynchronize() );
MarchingCubes::device_data data(mc, v, n, indices);
data.face_count = 0;
mc.uploadData(data);
int block_count, thread_count;
int needed = data.point_volume / 32 + 1;
block_count = needed/THREAD_COUNT + (((needed%THREAD_COUNT) > 0)?1:0);
thread_count = ::min(needed, THREAD_COUNT);
hipLaunchKernelGGL(( clearVoxelData), dim3(block_count),dim3(thread_count), 0, 0, mc.getUploadedData() );
checkCUDAResult();
checkCUDAReturn( hipDeviceSynchronize() );
block_count = data.point_volume/THREAD_COUNT + (((data.point_volume%THREAD_COUNT) > 0)?1:0);
thread_count = ::min(data.point_volume, THREAD_COUNT);
hipLaunchKernelGGL(( computeVoxelData), dim3(block_count),dim3(thread_count), 0, 0, g, mc.getUploadedData() );
checkCUDAResult();
checkCUDAReturn( hipDeviceSynchronize() );
block_count = data.volume/THREAD_COUNT + (((data.volume%THREAD_COUNT) > 0)?1:0);
thread_count = ::min(data.volume, THREAD_COUNT);
hipLaunchKernelGGL(( computeSurfaceNodes), dim3(block_count),dim3(thread_count), 0, 0, g, mc.getUploadedData() );
checkCUDAResult();
checkCUDAReturn( hipDeviceSynchronize() );
needed = 2 * data.volume + 2;
block_count = needed/THREAD_COUNT + (((needed%THREAD_COUNT) > 0)?1:0);
thread_count = ::min(needed, THREAD_COUNT);
hipLaunchKernelGGL(( computeIsoSurface), dim3(block_count),dim3(thread_count), 0, 0, mc.getUploadedData() );
checkCUDAResult();
checkCUDAReturn( hipDeviceSynchronize() );
mc.unbindVertices();
mc.unbindNormals();
mc.unbindIndices();
checkCUDAReturn( hipDeviceSynchronize() );
mc.synchronizeWithDevice();
}
MarchingCubes::device_data::device_data(MarchingCubes& mc, float3* v, float3* n, int* indbuf) :
face_count(0),
volume(mc._volume),
point_volume(mc._point_volume),
length(mc._length),
width(mc._width),
depth(mc._depth),
length_x_width(length * width),
cube_size(mc._cube_dimensions),
cube_values(thrust::raw_pointer_cast(mc._cube_values.data())),
neighbor_values(thrust::raw_pointer_cast(mc._neighbor_values.data())),
normal_values(thrust::raw_pointer_cast(mc._normal_values.data())),
vertex_buffer(v),
normal_buffer(n),
index_buffer(indbuf) { ; }
MarchingCubes::MarchingCubes(int L, int W, int D, const core::vec3i& dim) :
_device_data(1),
_length(L),
_width(W),
_depth(D),
_volume(L*W*D),
_point_volume((L+1)*(W+1)*(D+1)),
_cube_values(_point_volume/32 + 1),
_normal_values(_point_volume),
_neighbor_values(_volume/32 + 1) {
_cube_dimensions = make_float3( (float)dim.x / L,
(float)dim.y / W,
(float)dim.z / D );
{
glGenBuffers(1, &_vertex_buffer);
glGenBuffers(1, &_normal_buffer);
glBindBuffer(GL_ARRAY_BUFFER, _vertex_buffer);
core::vec3* temp = new core::vec3[_volume];
for ( int z=0;z<D;z++ ) for ( int y=0;y<W;y++ ) for ( int x=0;x<L;x++ )
temp[x + y*L + z*L*W] = core::vec3( _cube_dimensions.x * x / 2.0f + _cube_dimensions.x / 4.0f,
_cube_dimensions.y * y / 2.0f + _cube_dimensions.y / 4.0f,
_cube_dimensions.z * z / 2.0f + _cube_dimensions.z / 4.0f );
std::cerr << "create buffer of size " << _volume * sizeof(core::vec3) << std::endl;
glBufferData(GL_ARRAY_BUFFER, _volume * sizeof(core::vec3), temp, GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, _normal_buffer);
for ( int z=0;z<D;z++ ) for ( int y=0;y<W;y++ ) for ( int x=0;x<L;x++ )
temp[x + y*L + z*L*W] = core::vec3( 1,0,0 );
glBufferData(GL_ARRAY_BUFFER, _volume * sizeof(core::vec3), temp, GL_DYNAMIC_DRAW);
delete[] temp;
glBindBuffer(GL_ARRAY_BUFFER, 0);
}
{
int indices_x = (W+1) * (D+1) * 16;
int indices_y = (L+1) * (D+1) * 16;
int indices_z = (L+1) * (W+1) * 16;
glGenBuffers(1, &_index_buffer);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, _index_buffer);
std::cerr << "create buffer of size " << sizeof(int) * (indices_x + indices_y + indices_z) * 3 << std::endl;
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(int) * (indices_x + indices_y + indices_z) * 3, NULL, GL_DYNAMIC_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
}
checkCUDAReturn( hipGraphicsGLRegisterBuffer(&_cuda_resource_ib, _index_buffer, hipGraphicsRegisterFlagsNone) );
checkCUDAReturn( hipGraphicsGLRegisterBuffer(&_cuda_resource_vb, _vertex_buffer, hipGraphicsRegisterFlagsNone) );
checkCUDAReturn( hipGraphicsGLRegisterBuffer(&_cuda_resource_nb, _normal_buffer, hipGraphicsRegisterFlagsNone) );
checkCUDAReturn( hipDeviceSynchronize() );
}
void MarchingCubes::bindGL() {
glEnableVertexAttribArray(0);
glEnableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER, _vertex_buffer);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
glBindBuffer(GL_ARRAY_BUFFER, _normal_buffer);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 0, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, _index_buffer);
}
int* MarchingCubes::bindIndices() {
int* mapped_data;
std::size_t mapped_size;
checkCUDAReturn( hipGraphicsMapResources( 1, &_cuda_resource_ib, 0 ) );
checkCUDAReturn( hipGraphicsResourceGetMappedPointer( (void**)&mapped_data, &mapped_size, _cuda_resource_ib ) );
return mapped_data;
}
void MarchingCubes::unbindIndices() {
checkCUDAReturn( hipGraphicsUnmapResources( 1, &_cuda_resource_ib, 0 ) );
}
float* MarchingCubes::bindVertices() {
float* mapped_data;
std::size_t mapped_size;
checkCUDAReturn( hipGraphicsMapResources( 1, &_cuda_resource_vb, 0 ) );
checkCUDAReturn( hipGraphicsResourceGetMappedPointer( (void**)&mapped_data, &mapped_size, _cuda_resource_vb ) );
return mapped_data;
}
void MarchingCubes::unbindVertices() {
checkCUDAReturn( hipGraphicsUnmapResources( 1, &_cuda_resource_vb, 0 ) );
}
float* MarchingCubes::bindNormals() {
float* mapped_data;
std::size_t mapped_size;
checkCUDAReturn( hipGraphicsMapResources( 1, &_cuda_resource_nb, 0 ) );
checkCUDAReturn( hipGraphicsResourceGetMappedPointer( (void**)&mapped_data, &mapped_size, _cuda_resource_nb ) );
return mapped_data;
}
void MarchingCubes::unbindNormals() {
checkCUDAReturn( hipGraphicsUnmapResources( 1, &_cuda_resource_nb, 0 ) );
}
CUDA_DEVICE_FUNCTION void MarchingCubes::clearVoxelData(device_data& data, int index) {
data.cube_values[index] = 0;
if ( index < data.volume / 32 + 1)
data.neighbor_values[index] = 0;
//printf("clear %d(%d)\r\n", index, index * 32);
//printf("%d - N/A = %x\r\n", index, data.cube_values[index]);
}
CUDA_DEVICE_FUNCTION void MarchingCubes::computeVoxelData(grid::device_data& g, device_data& data, int index) {
int4 cell;
getXYZFromIndex(index, data.length + 1, data.width + 1, &cell.x, &cell.y, &cell.z);
float4 position = make_float4(data.cube_size.x*cell.x, data.cube_size.y*cell.y, data.cube_size.z*cell.z, 0.0f);
//float density = 0.0f;
//iterate over all neighbors
int counted = 0;
/*for (int x = -1; x <= 1; x++) {
if (cell.x + x < 0) continue;
else if (cell.x + x >= g.dimensions.x) break;
for (int y = -1; y <= 1; y++) {
if (cell.y + y < 0) continue;
else if (cell.y + y >= g.dimensions.y ) break;
for (int z = -1; z <= 1; z++) {
if (cell.z + z < 0) continue;
else if (cell.z + z >= g.dimensions.z ) break;
int neighbor_id = 0;
getIndexFromXYZ(cell.x+x, cell.y+y, cell.z+z, g.dimensions.x, g.dimensions.y, &neighbor_id);
int offset = g.cell_offset[neighbor_id];
int count = g.cell_count[neighbor_id];
//printf("source %d to index %d : offset = %d, count = %d\r\n", id, neighbor_id, offset, count);
for (int k = 0; k < count; k++) {
int j = g.sorted_particle_id[k + offset];
float4 dist = position - g.positions[j];
if ( std::sqrt(cuDot4(dist, dist)) <= CONST_H ) {
atomicOr(&data.cube_values[index/32], (1<<(index%32)));
data.normal_values[index] = data.normal_values[index] + g.color_field[j];
counted ++ ;
//data.index_buffer[atomicAdd(&data.face_count, 1)] = index;
}
}
}
}
}*/
data.normal_values[index] = make_float3(0,0,0);
for (int i=0;i<g.particle_count;i++) {
float4 dist = position - g.positions[i];
if ( std::sqrt(cuDot4(dist, dist)) <= CONST_H ) {
atomicOr(&data.cube_values[index/32], (1<<(index%32)));
data.normal_values[index] = data.normal_values[index] + g.color_field[i];
counted ++ ;
}
}
if ( counted != 0 )
data.normal_values[index] = data.normal_values[index] / (float)counted;
}
CUDA_DEVICE_FUNCTION inline bool check_node(Uint* data, int index, int offset) {
return data[index] & (Uint)(1<<offset);
}
CUDA_DEVICE_FUNCTION void MarchingCubes::computeSurfaceNodes(grid::device_data& g, device_data& data, int index) {
int index_div = index / 32;
int index_mod = index % 32;
int x, y, z;
getXYZFromIndex(index, data.length, data.width, &x, &y, &z);
data.vertex_buffer[index] = make_float3(data.cube_size.x * x / 2.0f + data.cube_size.x / 4.0f,
data.cube_size.y * y / 2.0f + data.cube_size.y / 4.0f,
data.cube_size.z * z / 2.0f + data.cube_size.z / 4.0f);
int i[8];
getIndexFromXYZ(x , y, z , data.length + 1, data.width + 1, i + 0);
getIndexFromXYZ(x+1, y, z , data.length + 1, data.width + 1, i + 1);
getIndexFromXYZ(x , y+1, z , data.length + 1, data.width + 1, i + 2);
getIndexFromXYZ(x+1, y+1, z , data.length + 1, data.width + 1, i + 3);
getIndexFromXYZ(x , y , z+1, data.length + 1, data.width + 1, i + 4);
getIndexFromXYZ(x+1, y , z+1, data.length + 1, data.width + 1, i + 5);
getIndexFromXYZ(x , y+1, z+1, data.length + 1, data.width + 1, i + 6);
getIndexFromXYZ(x+1, y+1, z+1, data.length + 1, data.width + 1, i + 7);
int count = 0;
for ( int j=0;j<8;j++ )
count += ((data.cube_values[i[j]/32] & (1<<(i[j]%32))) > 0);
atomicOr(&data.neighbor_values[index_div], ((count!=0&&count!=8)<<(index_mod)));
__syncthreads();
getIndexFromXYZ(max(0, x-1), y, z, data.length, data.width, i + 0);
getIndexFromXYZ(min(x+1, data.length-1), y, z, data.length, data.width, i + 1);
getIndexFromXYZ(x, max(0, y-1), z, data.length, data.width, i + 2);
getIndexFromXYZ(x, min(y+1, data.width-1), z, data.length, data.width, i + 3);
getIndexFromXYZ(x, y, max(0, z-1), data.length, data.width, i + 4);
getIndexFromXYZ(x, y, min(z+1, data.depth-1), data.length, data.width, i + 5);
bool left = check_node(data.neighbor_values, i[0]/32, i[0]%32);
bool right = check_node(data.neighbor_values, i[1]/32, i[1]%32);
bool front = check_node(data.neighbor_values, i[2]/32, i[2]%32);
bool back = check_node(data.neighbor_values, i[3]/32, i[3]%32);
bool up = check_node(data.neighbor_values, i[4]/32, i[4]%32);
bool down = check_node(data.neighbor_values, i[5]/32, i[5]%32);
float total = left + right + front + back + up + down;
if ( total != 0 ) {
float3 highest = make_float3(data.cube_size.x * (x+1) / 2.0f,data.cube_size.y * (y+1) / 2.0f,data.cube_size.z * (z+1) / 2.0f);
float3 lowest = make_float3(data.cube_size.x * x / 2.0f,data.cube_size.y * y / 2.0f,data.cube_size.z * z / 2.0f);
for ( int k=0;k<10;k++ ) {
data.vertex_buffer[index] = ( left * data.vertex_buffer[i[0]] +
right * data.vertex_buffer[i[1]] +
front * data.vertex_buffer[i[2]] +
back * data.vertex_buffer[i[3]] +
up * data.vertex_buffer[i[4]] +
down * data.vertex_buffer[i[5]] ) / total;
data.vertex_buffer[index].x = max(min(data.vertex_buffer[index].x, highest.x), lowest.x);
data.vertex_buffer[index].y = max(min(data.vertex_buffer[index].y, highest.y), lowest.y);
data.vertex_buffer[index].z = max(min(data.vertex_buffer[index].z, highest.z), lowest.z);
}
}
/*if ( self && left && front ) {
int face_index = atomicAdd(&data.face_count, 3);
printf("%d = %d\r\n", index, face_index);
data.index_buffer[face_index+0] = index;
data.index_buffer[face_index+1] = i[0];
data.index_buffer[face_index+2] = i[2];
}*/
//int count = 8 * ((data.cube_values[i[0]/32] & (1<<(i[0]%32)))>0);
/*printf("--------------------\r\n%4d %4d %4d %4d\r\n%4d %4d %4d %4d\r\n",
i[0], i[1], i[2], i[3],
i[4], i[5], i[6], i[7]);
printf("%d => %d%d%d%d%d%d%d%d\r\n",
index,
((data.cube_values[i[0]/32] & (1<<(i[0]%32))) > 0),
((data.cube_values[i[1]/32] & (1<<(i[1]%32))) > 0),
((data.cube_values[i[2]/32] & (1<<(i[2]%32))) > 0),
((data.cube_values[i[3]/32] & (1<<(i[3]%32))) > 0),
((data.cube_values[i[4]/32] & (1<<(i[4]%32))) > 0),
((data.cube_values[i[5]/32] & (1<<(i[5]%32))) > 0),
((data.cube_values[i[6]/32] & (1<<(i[6]%32))) > 0),
((data.cube_values[i[7]/32] & (1<<(i[7]%32))) > 0)
);*/
//printf("%d %d %d = %d\r\n", x,y,z, count);
//printf("-- %d = %x (%u)\r\n", index/32, data.neighbor_values[index/32], ((data.neighbor_values[(index)/32] & ((count!=0&&count!=8)<<((index)%32)))>0) );
if ( data.neighbor_values[(index)/32] & ((count!=0&&count!=8)<<((index)%32)) ) {
//int new_index = atomicAdd(&data.face_count, 1);
//data.index_buffer[new_index] = index;
data.normal_buffer[index]= (data.normal_values[i[0]] +
data.normal_values[i[1]] +
data.normal_values[i[2]] +
data.normal_values[i[3]] +
data.normal_values[i[4]] +
data.normal_values[i[5]] +
data.normal_values[i[6]] +
data.normal_values[i[7]])/8.0f;
}
}
CUDA_DEVICE_FUNCTION void MarchingCubes::computeIsoSurface(device_data& data, int index) {
int real_index = index / 2;
int index_div = real_index / 32;
int index_mod = real_index % 32;
int x, y, z;
getXYZFromIndex(real_index, data.length, data.width, &x, &y, &z);
//printf("%d = %d %d %d\r\n", index, x, y, z);
bool self = check_node(data.neighbor_values, index_div, index_mod);
int i[6];
int x2 = ((x+1) % data.length) ? x+1 : -1,
y2 = ((y+1) % data.width) ? y+1 : -1,
z2 = ((z+1) % data.depth) ? z+1 : -1;
getIndexFromXYZ(x2, y, z , data.length, data.width, i + 0);
getIndexFromXYZ(x2, y2, z , data.length, data.width, i + 1);
getIndexFromXYZ(x , y2, z , data.length, data.width, i + 2);
getIndexFromXYZ(x , y2, z2, data.length, data.width, i + 3);
getIndexFromXYZ(x , y , z2, data.length, data.width, i + 4);
getIndexFromXYZ(x2, y , z2, data.length, data.width, i + 5);
bool left = x2 != -1 && check_node(data.neighbor_values, i[0]/32, i[0]%32);
bool right = x2 != -1 && check_node(data.neighbor_values, i[1]/32, i[1]%32);
bool front = y2 != -1 && check_node(data.neighbor_values, i[2]/32, i[2]%32);
bool back = y2 != -1 && check_node(data.neighbor_values, i[3]/32, i[3]%32);
bool up = z2 != -1 && check_node(data.neighbor_values, i[4]/32, i[4]%32);
bool down = z2 != -1 && check_node(data.neighbor_values, i[5]/32, i[5]%32);
if ( !(index % 2) ) {
if ( self && left && front ) {
int face_index = atomicAdd(&data.face_count, 3);
//printf("%d = %d\r\n", index, face_index);
data.index_buffer[face_index+0] = real_index;
data.index_buffer[face_index+1] = i[0];
data.index_buffer[face_index+2] = i[2];
}
if ( self && left && up ) {
int face_index = atomicAdd(&data.face_count, 3);
//printf("%d = %d\r\n", index, face_index);
data.index_buffer[face_index+0] = real_index;
data.index_buffer[face_index+1] = i[0];
data.index_buffer[face_index+2] = i[4];
}
if ( self && front && up ) {
int face_index = atomicAdd(&data.face_count, 3);
//printf("%d = %d\r\n", index, face_index);
data.index_buffer[face_index+0] = real_index;
data.index_buffer[face_index+1] = i[2];
data.index_buffer[face_index+2] = i[4];
}
} else {
if ( right && left && front ) {
int face_index = atomicAdd(&data.face_count, 3);
//printf("%d = %d\r\n", index, face_index);
data.index_buffer[face_index+0] = i[0];
data.index_buffer[face_index+1] = i[1];
data.index_buffer[face_index+2] = i[2];
}
if ( down && left && up ) {
int face_index = atomicAdd(&data.face_count, 3);
//printf("%d = %d\r\n", index, face_index);
data.index_buffer[face_index+0] = i[0];
data.index_buffer[face_index+1] = i[4];
data.index_buffer[face_index+2] = i[5];
}
if ( back && front && up ) {
int face_index = atomicAdd(&data.face_count, 3);
//printf("%d = %d\r\n", index, face_index);
data.index_buffer[face_index+0] = i[2];
data.index_buffer[face_index+1] = i[3];
data.index_buffer[face_index+2] = i[4];
}
}
//printf("++ %d = %x (%u)\r\n", index/32, data.neighbor_values[index/32], self);
/*if ( self ) {
int x, y, z;
getXYZFromIndex(index, data.length, data.width, &x, &y, &z);
int i[6];
int x2 = ((x+1) % data.length) ? x+1 : -1,
y2 = ((y+1) % data.width) ? y+1 : -1,
z2 = ((z+1) % data.depth) ? z+1 : -1;
getIndexFromXYZ(x2, y, z , data.length, data.width, i + 0);
getIndexFromXYZ(x2, y2, z , data.length, data.width, i + 1);
getIndexFromXYZ(x , y2, z , data.length, data.width, i + 2);
getIndexFromXYZ(x , y2, z2, data.length, data.width, i + 3);
getIndexFromXYZ(x , y , z2, data.length, data.width, i + 4);
getIndexFromXYZ(x2, y , z2, data.length, data.width, i + 5);
bool left = x2 != -1 && check_node(data.neighbor_values, i[0]/32, i[0]%32);
bool right = x2 != -1 && check_node(data.neighbor_values, i[1]/32, i[1]%32);
bool front = y2 != -1 && check_node(data.neighbor_values, i[2]/32, i[2]%32);
bool back = y2 != -1 && check_node(data.neighbor_values, i[3]/32, i[3]%32);
bool up = z2 != -1 && check_node(data.neighbor_values, i[4]/32, i[4]%32);
bool down = z2 != -1 && check_node(data.neighbor_values, i[5]/32, i[5]%32);
if ( left && front) {
int face_index = atomicAdd(&data.face_count, 3);
//printf("%d = %d\r\n", index, face_index);
data.index_buffer[face_index+0] = index;
data.index_buffer[face_index+1] = i[0];
data.index_buffer[face_index+2] = i[2];
}
if ( left && up ) {
int face_index = atomicAdd(&data.face_count, 3);
//printf("%d = %d\r\n", index, face_index);
data.index_buffer[face_index+0] = index;
data.index_buffer[face_index+1] = i[0];
data.index_buffer[face_index+2] = i[4];
}
if ( front && up ) {
int face_index = atomicAdd(&data.face_count, 3);
//printf("%d = %d\r\n", index, face_index);
data.index_buffer[face_index+0] = index;
data.index_buffer[face_index+1] = i[2];
data.index_buffer[face_index+2] = i[4];
}
}*/
}
}
|
d34088dd7a3837c42f443cf47cda8b2d21b9eead.cu
|
#include "fluids/marching_cubes.hpp"
#include "fluids/grid.hpp"
namespace Fluids {
__global__ void clearVoxelData(MarchingCubes::device_data& data) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i < data.point_volume / 32 + 1 )
MarchingCubes::clearVoxelData(data, i);
}
__global__ void computeVoxelData(grid::device_data& g, MarchingCubes::device_data& data) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i < data.point_volume )
MarchingCubes::computeVoxelData(g, data, i);
}
__global__ void computeSurfaceNodes(grid::device_data& g, MarchingCubes::device_data& data) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i < data.volume )
MarchingCubes::computeSurfaceNodes(g, data, i);
}
__global__ void computeIsoSurface(MarchingCubes::device_data& data) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i < 2 * data.volume + 2 )
MarchingCubes::computeIsoSurface(data, i);
}
void runMarchingCubes(grid::device_data& g, MarchingCubes& mc) {
float3* v = (float3*)mc.bindVertices();
float3* n = (float3*)mc.bindNormals();
int* indices = mc.bindIndices();
checkCUDAReturn( cudaDeviceSynchronize() );
MarchingCubes::device_data data(mc, v, n, indices);
data.face_count = 0;
mc.uploadData(data);
int block_count, thread_count;
int needed = data.point_volume / 32 + 1;
block_count = needed/THREAD_COUNT + (((needed%THREAD_COUNT) > 0)?1:0);
thread_count = std::min(needed, THREAD_COUNT);
clearVoxelData<<<block_count,thread_count>>>( mc.getUploadedData() );
checkCUDAResult();
checkCUDAReturn( cudaDeviceSynchronize() );
block_count = data.point_volume/THREAD_COUNT + (((data.point_volume%THREAD_COUNT) > 0)?1:0);
thread_count = std::min(data.point_volume, THREAD_COUNT);
computeVoxelData<<<block_count,thread_count>>>( g, mc.getUploadedData() );
checkCUDAResult();
checkCUDAReturn( cudaDeviceSynchronize() );
block_count = data.volume/THREAD_COUNT + (((data.volume%THREAD_COUNT) > 0)?1:0);
thread_count = std::min(data.volume, THREAD_COUNT);
computeSurfaceNodes<<<block_count,thread_count>>>( g, mc.getUploadedData() );
checkCUDAResult();
checkCUDAReturn( cudaDeviceSynchronize() );
needed = 2 * data.volume + 2;
block_count = needed/THREAD_COUNT + (((needed%THREAD_COUNT) > 0)?1:0);
thread_count = std::min(needed, THREAD_COUNT);
computeIsoSurface<<<block_count,thread_count>>>( mc.getUploadedData() );
checkCUDAResult();
checkCUDAReturn( cudaDeviceSynchronize() );
mc.unbindVertices();
mc.unbindNormals();
mc.unbindIndices();
checkCUDAReturn( cudaDeviceSynchronize() );
mc.synchronizeWithDevice();
}
MarchingCubes::device_data::device_data(MarchingCubes& mc, float3* v, float3* n, int* indbuf) :
face_count(0),
volume(mc._volume),
point_volume(mc._point_volume),
length(mc._length),
width(mc._width),
depth(mc._depth),
length_x_width(length * width),
cube_size(mc._cube_dimensions),
cube_values(thrust::raw_pointer_cast(mc._cube_values.data())),
neighbor_values(thrust::raw_pointer_cast(mc._neighbor_values.data())),
normal_values(thrust::raw_pointer_cast(mc._normal_values.data())),
vertex_buffer(v),
normal_buffer(n),
index_buffer(indbuf) { ; }
MarchingCubes::MarchingCubes(int L, int W, int D, const core::vec3i& dim) :
_device_data(1),
_length(L),
_width(W),
_depth(D),
_volume(L*W*D),
_point_volume((L+1)*(W+1)*(D+1)),
_cube_values(_point_volume/32 + 1),
_normal_values(_point_volume),
_neighbor_values(_volume/32 + 1) {
_cube_dimensions = make_float3( (float)dim.x / L,
(float)dim.y / W,
(float)dim.z / D );
{
glGenBuffers(1, &_vertex_buffer);
glGenBuffers(1, &_normal_buffer);
glBindBuffer(GL_ARRAY_BUFFER, _vertex_buffer);
core::vec3* temp = new core::vec3[_volume];
for ( int z=0;z<D;z++ ) for ( int y=0;y<W;y++ ) for ( int x=0;x<L;x++ )
temp[x + y*L + z*L*W] = core::vec3( _cube_dimensions.x * x / 2.0f + _cube_dimensions.x / 4.0f,
_cube_dimensions.y * y / 2.0f + _cube_dimensions.y / 4.0f,
_cube_dimensions.z * z / 2.0f + _cube_dimensions.z / 4.0f );
std::cerr << "create buffer of size " << _volume * sizeof(core::vec3) << std::endl;
glBufferData(GL_ARRAY_BUFFER, _volume * sizeof(core::vec3), temp, GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, _normal_buffer);
for ( int z=0;z<D;z++ ) for ( int y=0;y<W;y++ ) for ( int x=0;x<L;x++ )
temp[x + y*L + z*L*W] = core::vec3( 1,0,0 );
glBufferData(GL_ARRAY_BUFFER, _volume * sizeof(core::vec3), temp, GL_DYNAMIC_DRAW);
delete[] temp;
glBindBuffer(GL_ARRAY_BUFFER, 0);
}
{
int indices_x = (W+1) * (D+1) * 16;
int indices_y = (L+1) * (D+1) * 16;
int indices_z = (L+1) * (W+1) * 16;
glGenBuffers(1, &_index_buffer);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, _index_buffer);
std::cerr << "create buffer of size " << sizeof(int) * (indices_x + indices_y + indices_z) * 3 << std::endl;
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(int) * (indices_x + indices_y + indices_z) * 3, NULL, GL_DYNAMIC_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
}
checkCUDAReturn( cudaGraphicsGLRegisterBuffer(&_cuda_resource_ib, _index_buffer, cudaGraphicsRegisterFlagsNone) );
checkCUDAReturn( cudaGraphicsGLRegisterBuffer(&_cuda_resource_vb, _vertex_buffer, cudaGraphicsRegisterFlagsNone) );
checkCUDAReturn( cudaGraphicsGLRegisterBuffer(&_cuda_resource_nb, _normal_buffer, cudaGraphicsRegisterFlagsNone) );
checkCUDAReturn( cudaDeviceSynchronize() );
}
void MarchingCubes::bindGL() {
glEnableVertexAttribArray(0);
glEnableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER, _vertex_buffer);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
glBindBuffer(GL_ARRAY_BUFFER, _normal_buffer);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 0, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, _index_buffer);
}
int* MarchingCubes::bindIndices() {
int* mapped_data;
std::size_t mapped_size;
checkCUDAReturn( cudaGraphicsMapResources( 1, &_cuda_resource_ib, 0 ) );
checkCUDAReturn( cudaGraphicsResourceGetMappedPointer( (void**)&mapped_data, &mapped_size, _cuda_resource_ib ) );
return mapped_data;
}
void MarchingCubes::unbindIndices() {
checkCUDAReturn( cudaGraphicsUnmapResources( 1, &_cuda_resource_ib, 0 ) );
}
float* MarchingCubes::bindVertices() {
float* mapped_data;
std::size_t mapped_size;
checkCUDAReturn( cudaGraphicsMapResources( 1, &_cuda_resource_vb, 0 ) );
checkCUDAReturn( cudaGraphicsResourceGetMappedPointer( (void**)&mapped_data, &mapped_size, _cuda_resource_vb ) );
return mapped_data;
}
void MarchingCubes::unbindVertices() {
checkCUDAReturn( cudaGraphicsUnmapResources( 1, &_cuda_resource_vb, 0 ) );
}
float* MarchingCubes::bindNormals() {
float* mapped_data;
std::size_t mapped_size;
checkCUDAReturn( cudaGraphicsMapResources( 1, &_cuda_resource_nb, 0 ) );
checkCUDAReturn( cudaGraphicsResourceGetMappedPointer( (void**)&mapped_data, &mapped_size, _cuda_resource_nb ) );
return mapped_data;
}
void MarchingCubes::unbindNormals() {
checkCUDAReturn( cudaGraphicsUnmapResources( 1, &_cuda_resource_nb, 0 ) );
}
CUDA_DEVICE_FUNCTION void MarchingCubes::clearVoxelData(device_data& data, int index) {
data.cube_values[index] = 0;
if ( index < data.volume / 32 + 1)
data.neighbor_values[index] = 0;
//printf("clear %d(%d)\r\n", index, index * 32);
//printf("%d - N/A = %x\r\n", index, data.cube_values[index]);
}
CUDA_DEVICE_FUNCTION void MarchingCubes::computeVoxelData(grid::device_data& g, device_data& data, int index) {
int4 cell;
getXYZFromIndex(index, data.length + 1, data.width + 1, &cell.x, &cell.y, &cell.z);
float4 position = make_float4(data.cube_size.x*cell.x, data.cube_size.y*cell.y, data.cube_size.z*cell.z, 0.0f);
//float density = 0.0f;
//iterate over all neighbors
int counted = 0;
/*for (int x = -1; x <= 1; x++) {
if (cell.x + x < 0) continue;
else if (cell.x + x >= g.dimensions.x) break;
for (int y = -1; y <= 1; y++) {
if (cell.y + y < 0) continue;
else if (cell.y + y >= g.dimensions.y ) break;
for (int z = -1; z <= 1; z++) {
if (cell.z + z < 0) continue;
else if (cell.z + z >= g.dimensions.z ) break;
int neighbor_id = 0;
getIndexFromXYZ(cell.x+x, cell.y+y, cell.z+z, g.dimensions.x, g.dimensions.y, &neighbor_id);
int offset = g.cell_offset[neighbor_id];
int count = g.cell_count[neighbor_id];
//printf("source %d to index %d : offset = %d, count = %d\r\n", id, neighbor_id, offset, count);
for (int k = 0; k < count; k++) {
int j = g.sorted_particle_id[k + offset];
float4 dist = position - g.positions[j];
if ( std::sqrt(cuDot4(dist, dist)) <= CONST_H ) {
atomicOr(&data.cube_values[index/32], (1<<(index%32)));
data.normal_values[index] = data.normal_values[index] + g.color_field[j];
counted ++ ;
//data.index_buffer[atomicAdd(&data.face_count, 1)] = index;
}
}
}
}
}*/
data.normal_values[index] = make_float3(0,0,0);
for (int i=0;i<g.particle_count;i++) {
float4 dist = position - g.positions[i];
if ( std::sqrt(cuDot4(dist, dist)) <= CONST_H ) {
atomicOr(&data.cube_values[index/32], (1<<(index%32)));
data.normal_values[index] = data.normal_values[index] + g.color_field[i];
counted ++ ;
}
}
if ( counted != 0 )
data.normal_values[index] = data.normal_values[index] / (float)counted;
}
CUDA_DEVICE_FUNCTION inline bool check_node(Uint* data, int index, int offset) {
return data[index] & (Uint)(1<<offset);
}
CUDA_DEVICE_FUNCTION void MarchingCubes::computeSurfaceNodes(grid::device_data& g, device_data& data, int index) {
int index_div = index / 32;
int index_mod = index % 32;
int x, y, z;
getXYZFromIndex(index, data.length, data.width, &x, &y, &z);
data.vertex_buffer[index] = make_float3(data.cube_size.x * x / 2.0f + data.cube_size.x / 4.0f,
data.cube_size.y * y / 2.0f + data.cube_size.y / 4.0f,
data.cube_size.z * z / 2.0f + data.cube_size.z / 4.0f);
int i[8];
getIndexFromXYZ(x , y, z , data.length + 1, data.width + 1, i + 0);
getIndexFromXYZ(x+1, y, z , data.length + 1, data.width + 1, i + 1);
getIndexFromXYZ(x , y+1, z , data.length + 1, data.width + 1, i + 2);
getIndexFromXYZ(x+1, y+1, z , data.length + 1, data.width + 1, i + 3);
getIndexFromXYZ(x , y , z+1, data.length + 1, data.width + 1, i + 4);
getIndexFromXYZ(x+1, y , z+1, data.length + 1, data.width + 1, i + 5);
getIndexFromXYZ(x , y+1, z+1, data.length + 1, data.width + 1, i + 6);
getIndexFromXYZ(x+1, y+1, z+1, data.length + 1, data.width + 1, i + 7);
int count = 0;
for ( int j=0;j<8;j++ )
count += ((data.cube_values[i[j]/32] & (1<<(i[j]%32))) > 0);
atomicOr(&data.neighbor_values[index_div], ((count!=0&&count!=8)<<(index_mod)));
__syncthreads();
getIndexFromXYZ(max(0, x-1), y, z, data.length, data.width, i + 0);
getIndexFromXYZ(min(x+1, data.length-1), y, z, data.length, data.width, i + 1);
getIndexFromXYZ(x, max(0, y-1), z, data.length, data.width, i + 2);
getIndexFromXYZ(x, min(y+1, data.width-1), z, data.length, data.width, i + 3);
getIndexFromXYZ(x, y, max(0, z-1), data.length, data.width, i + 4);
getIndexFromXYZ(x, y, min(z+1, data.depth-1), data.length, data.width, i + 5);
bool left = check_node(data.neighbor_values, i[0]/32, i[0]%32);
bool right = check_node(data.neighbor_values, i[1]/32, i[1]%32);
bool front = check_node(data.neighbor_values, i[2]/32, i[2]%32);
bool back = check_node(data.neighbor_values, i[3]/32, i[3]%32);
bool up = check_node(data.neighbor_values, i[4]/32, i[4]%32);
bool down = check_node(data.neighbor_values, i[5]/32, i[5]%32);
float total = left + right + front + back + up + down;
if ( total != 0 ) {
float3 highest = make_float3(data.cube_size.x * (x+1) / 2.0f,data.cube_size.y * (y+1) / 2.0f,data.cube_size.z * (z+1) / 2.0f);
float3 lowest = make_float3(data.cube_size.x * x / 2.0f,data.cube_size.y * y / 2.0f,data.cube_size.z * z / 2.0f);
for ( int k=0;k<10;k++ ) {
data.vertex_buffer[index] = ( left * data.vertex_buffer[i[0]] +
right * data.vertex_buffer[i[1]] +
front * data.vertex_buffer[i[2]] +
back * data.vertex_buffer[i[3]] +
up * data.vertex_buffer[i[4]] +
down * data.vertex_buffer[i[5]] ) / total;
data.vertex_buffer[index].x = max(min(data.vertex_buffer[index].x, highest.x), lowest.x);
data.vertex_buffer[index].y = max(min(data.vertex_buffer[index].y, highest.y), lowest.y);
data.vertex_buffer[index].z = max(min(data.vertex_buffer[index].z, highest.z), lowest.z);
}
}
/*if ( self && left && front ) {
int face_index = atomicAdd(&data.face_count, 3);
printf("%d = %d\r\n", index, face_index);
data.index_buffer[face_index+0] = index;
data.index_buffer[face_index+1] = i[0];
data.index_buffer[face_index+2] = i[2];
}*/
//int count = 8 * ((data.cube_values[i[0]/32] & (1<<(i[0]%32)))>0);
/*printf("--------------------\r\n%4d %4d %4d %4d\r\n%4d %4d %4d %4d\r\n",
i[0], i[1], i[2], i[3],
i[4], i[5], i[6], i[7]);
printf("%d => %d%d%d%d%d%d%d%d\r\n",
index,
((data.cube_values[i[0]/32] & (1<<(i[0]%32))) > 0),
((data.cube_values[i[1]/32] & (1<<(i[1]%32))) > 0),
((data.cube_values[i[2]/32] & (1<<(i[2]%32))) > 0),
((data.cube_values[i[3]/32] & (1<<(i[3]%32))) > 0),
((data.cube_values[i[4]/32] & (1<<(i[4]%32))) > 0),
((data.cube_values[i[5]/32] & (1<<(i[5]%32))) > 0),
((data.cube_values[i[6]/32] & (1<<(i[6]%32))) > 0),
((data.cube_values[i[7]/32] & (1<<(i[7]%32))) > 0)
);*/
//printf("%d %d %d = %d\r\n", x,y,z, count);
//printf("-- %d = %x (%u)\r\n", index/32, data.neighbor_values[index/32], ((data.neighbor_values[(index)/32] & ((count!=0&&count!=8)<<((index)%32)))>0) );
if ( data.neighbor_values[(index)/32] & ((count!=0&&count!=8)<<((index)%32)) ) {
//int new_index = atomicAdd(&data.face_count, 1);
//data.index_buffer[new_index] = index;
data.normal_buffer[index]= (data.normal_values[i[0]] +
data.normal_values[i[1]] +
data.normal_values[i[2]] +
data.normal_values[i[3]] +
data.normal_values[i[4]] +
data.normal_values[i[5]] +
data.normal_values[i[6]] +
data.normal_values[i[7]])/8.0f;
}
}
CUDA_DEVICE_FUNCTION void MarchingCubes::computeIsoSurface(device_data& data, int index) {
int real_index = index / 2;
int index_div = real_index / 32;
int index_mod = real_index % 32;
int x, y, z;
getXYZFromIndex(real_index, data.length, data.width, &x, &y, &z);
//printf("%d = %d %d %d\r\n", index, x, y, z);
bool self = check_node(data.neighbor_values, index_div, index_mod);
int i[6];
int x2 = ((x+1) % data.length) ? x+1 : -1,
y2 = ((y+1) % data.width) ? y+1 : -1,
z2 = ((z+1) % data.depth) ? z+1 : -1;
getIndexFromXYZ(x2, y, z , data.length, data.width, i + 0);
getIndexFromXYZ(x2, y2, z , data.length, data.width, i + 1);
getIndexFromXYZ(x , y2, z , data.length, data.width, i + 2);
getIndexFromXYZ(x , y2, z2, data.length, data.width, i + 3);
getIndexFromXYZ(x , y , z2, data.length, data.width, i + 4);
getIndexFromXYZ(x2, y , z2, data.length, data.width, i + 5);
bool left = x2 != -1 && check_node(data.neighbor_values, i[0]/32, i[0]%32);
bool right = x2 != -1 && check_node(data.neighbor_values, i[1]/32, i[1]%32);
bool front = y2 != -1 && check_node(data.neighbor_values, i[2]/32, i[2]%32);
bool back = y2 != -1 && check_node(data.neighbor_values, i[3]/32, i[3]%32);
bool up = z2 != -1 && check_node(data.neighbor_values, i[4]/32, i[4]%32);
bool down = z2 != -1 && check_node(data.neighbor_values, i[5]/32, i[5]%32);
if ( !(index % 2) ) {
if ( self && left && front ) {
int face_index = atomicAdd(&data.face_count, 3);
//printf("%d = %d\r\n", index, face_index);
data.index_buffer[face_index+0] = real_index;
data.index_buffer[face_index+1] = i[0];
data.index_buffer[face_index+2] = i[2];
}
if ( self && left && up ) {
int face_index = atomicAdd(&data.face_count, 3);
//printf("%d = %d\r\n", index, face_index);
data.index_buffer[face_index+0] = real_index;
data.index_buffer[face_index+1] = i[0];
data.index_buffer[face_index+2] = i[4];
}
if ( self && front && up ) {
int face_index = atomicAdd(&data.face_count, 3);
//printf("%d = %d\r\n", index, face_index);
data.index_buffer[face_index+0] = real_index;
data.index_buffer[face_index+1] = i[2];
data.index_buffer[face_index+2] = i[4];
}
} else {
if ( right && left && front ) {
int face_index = atomicAdd(&data.face_count, 3);
//printf("%d = %d\r\n", index, face_index);
data.index_buffer[face_index+0] = i[0];
data.index_buffer[face_index+1] = i[1];
data.index_buffer[face_index+2] = i[2];
}
if ( down && left && up ) {
int face_index = atomicAdd(&data.face_count, 3);
//printf("%d = %d\r\n", index, face_index);
data.index_buffer[face_index+0] = i[0];
data.index_buffer[face_index+1] = i[4];
data.index_buffer[face_index+2] = i[5];
}
if ( back && front && up ) {
int face_index = atomicAdd(&data.face_count, 3);
//printf("%d = %d\r\n", index, face_index);
data.index_buffer[face_index+0] = i[2];
data.index_buffer[face_index+1] = i[3];
data.index_buffer[face_index+2] = i[4];
}
}
//printf("++ %d = %x (%u)\r\n", index/32, data.neighbor_values[index/32], self);
/*if ( self ) {
int x, y, z;
getXYZFromIndex(index, data.length, data.width, &x, &y, &z);
int i[6];
int x2 = ((x+1) % data.length) ? x+1 : -1,
y2 = ((y+1) % data.width) ? y+1 : -1,
z2 = ((z+1) % data.depth) ? z+1 : -1;
getIndexFromXYZ(x2, y, z , data.length, data.width, i + 0);
getIndexFromXYZ(x2, y2, z , data.length, data.width, i + 1);
getIndexFromXYZ(x , y2, z , data.length, data.width, i + 2);
getIndexFromXYZ(x , y2, z2, data.length, data.width, i + 3);
getIndexFromXYZ(x , y , z2, data.length, data.width, i + 4);
getIndexFromXYZ(x2, y , z2, data.length, data.width, i + 5);
bool left = x2 != -1 && check_node(data.neighbor_values, i[0]/32, i[0]%32);
bool right = x2 != -1 && check_node(data.neighbor_values, i[1]/32, i[1]%32);
bool front = y2 != -1 && check_node(data.neighbor_values, i[2]/32, i[2]%32);
bool back = y2 != -1 && check_node(data.neighbor_values, i[3]/32, i[3]%32);
bool up = z2 != -1 && check_node(data.neighbor_values, i[4]/32, i[4]%32);
bool down = z2 != -1 && check_node(data.neighbor_values, i[5]/32, i[5]%32);
if ( left && front) {
int face_index = atomicAdd(&data.face_count, 3);
//printf("%d = %d\r\n", index, face_index);
data.index_buffer[face_index+0] = index;
data.index_buffer[face_index+1] = i[0];
data.index_buffer[face_index+2] = i[2];
}
if ( left && up ) {
int face_index = atomicAdd(&data.face_count, 3);
//printf("%d = %d\r\n", index, face_index);
data.index_buffer[face_index+0] = index;
data.index_buffer[face_index+1] = i[0];
data.index_buffer[face_index+2] = i[4];
}
if ( front && up ) {
int face_index = atomicAdd(&data.face_count, 3);
//printf("%d = %d\r\n", index, face_index);
data.index_buffer[face_index+0] = index;
data.index_buffer[face_index+1] = i[2];
data.index_buffer[face_index+2] = i[4];
}
}*/
}
}
|
f06bfafe662a55cb9babc3892e109f825c73659a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <string>
#include <iostream>
#include <iomanip>
#include <fstream>
#include <vector>
#include <locale>
#include <algorithm>
#include <cstdio>
//-------------------------------------------------
//#ifndef __HIPCC__
//#define __HIPCC__
//#endif
//#include "hip/hip_runtime.h"
//#include "device_launch_parameters.h"
//#include <hip/hip_runtime.h>
//#include <hip/device_functions.h>
//#include <hip/hip_runtime_api.h>
const int maxWordLength = 10;
const int arrayCount = 5;
const int inArrayCount = 11;
//std::vector<std::vector<Stud>> Read(std::string fileName);
//__device__ void charCpy(int index, char *dataWord, char *name);
//__global__ void addKernel(Stud **Q, Stud *Ans);
class Stud {
public:
char name[maxWordLength * arrayCount] = {};
int grades;
double average;
Stud() {}
Stud(char name[], int grades, double average) {
strcpy(this->name, name);
this->grades = grades;
this->average = average;
}
};
__device__ void charCpy(int index, char *dataWord, char *name) {
int tempi = 0;
while (dataWord[tempi] != NULL)
{
name[index] = dataWord[tempi];
tempi++;
index++;
}
}
__global__ void addKernel(Stud **Q, Stud *Ans)
{
int i = threadIdx.x;
__shared__ char name[inArrayCount][arrayCount * maxWordLength];
__shared__ int nmb[inArrayCount];
__shared__ double average[inArrayCount];
nmb[i] = 0;
average[i] = 0;
for (int j = 0; j < arrayCount * maxWordLength; j++)
{
name[i][j] = NULL;
}
for (int j = 0; j < arrayCount; j++)
{
for (int q = 0; q < maxWordLength * arrayCount; q++)
{
if (name[i][q] == NULL)
{
charCpy(q, Q[j][i].name, name[i]);
break;
}
}
nmb[i] += Q[j][i].grades;
average[i] = average[i] + Q[j][i].average;
}
charCpy(0, name[i], Ans[i].name);
Ans[i].grades = nmb[i];
Ans[i].average = average[i];
//printf("%d-%s", i, Ans[i].name);
}
std::vector<std::vector<Stud>> Read(std::string fileName/*, std::vector<std::vector<Stud>> A*/)
{
std::vector<std::vector<Stud>> A;
std::ifstream in;
in.open(fileName);
while (!in.eof())
{
int len;
in >> len;
std::vector<Stud> temp;
for (size_t i = 0; i < len; i++)
{
std::string name;
in >> name;
int nmb;
in >> nmb;
double average;
in >> average;
char tempWord[maxWordLength] = {};
std::transform(name.begin(), name.end(), name.begin(), ::tolower);//change string letters to lower cases
strcpy(tempWord, name.c_str());//put string to chars
temp.push_back(Stud(tempWord, nmb, average));
}
A.push_back(temp);
}
in.close();
return A;
}
void Write(std::string fileName, std::vector<std::vector<Stud>> dataArray, Stud *Ans)
{
std::ofstream out;
out.open(fileName);
int a = 0;
out << "Pradiniai duomenys" << std::endl;
for (std::vector<Stud> data : dataArray)
{
//out << data[a].getLessonName() << endl;
out << "Nr. Vardas Pazymiu_skaicius Vidurkis" << std::endl;
int s = 0;
for (Stud student : data)
{
s++;
out << std::left << std::setw(4) << s << std::setw(10) << student.name << std::setw(18) << student.grades << student.average << std::endl;
}
a++;
}
out << std::endl;
out << "Rezultatas" << std::endl;
out << std::left << std::setw(40) << "Vardai" << std::setw(10) << "Pazymiai" << "Vidurkiai" << std::endl;
for (int i = 0; i < inArrayCount; i++)
{
out << std::left << std::setw(40) << Ans[i].name << std::setw(5) << Ans[i].grades << Ans[i].average << std::endl;
}
out << std::endl;
out.close();
}
int main()
{
std::vector<std::vector<Stud>> A;//data
/*const int arrayCount = 5;
const int inArrayCount = 11;*/
A = Read("IFF68_LaurinaitisTadas_L4.txt");
std::vector<std::vector<Stud>> dataArray;//data
dataArray = A;
Stud arrayA[arrayCount][inArrayCount];
for (int i = 0; i < arrayCount; i++)
{
for (int j = 0; j < inArrayCount; j++)
{
arrayA[i][j] = A[i][j];
}
}
//dataArray = arrayA;
Stud** dev_Q;
hipMalloc((void**)&dev_Q, arrayCount * sizeof(Stud*));//for stud arrays 5
for (int i = 0; i < arrayCount; i++)
{
Stud* temp_Q = nullptr;
hipMalloc((void**)&temp_Q, inArrayCount * sizeof(Stud));
hipMemcpy(temp_Q, &arrayA[i], inArrayCount * sizeof(Stud), hipMemcpyHostToDevice);
hipMemcpy(&dev_Q[i], &temp_Q, sizeof(Stud*), hipMemcpyHostToDevice);
}
Stud Ans[inArrayCount] = {};
Stud *dev_Ans;
hipMalloc((void**)&dev_Ans, inArrayCount * sizeof(Stud));
hipMemcpy(dev_Ans, Ans, inArrayCount * sizeof(int), hipMemcpyHostToDevice);
//addKernel << < 1, arrayCount >> > (dev_Q, dev_Ans);
addKernel << < 1, inArrayCount >> > (dev_Q, dev_Ans);
hipMemcpy(Ans, dev_Ans, inArrayCount * sizeof(Stud), hipMemcpyDeviceToHost);
hipFree(dev_Q);
hipFree(dev_Ans);
for (int i = 0; i < inArrayCount; i++)
{
std::cout << Ans[i].name << " " << Ans[i].grades << " " << Ans[i].average << " ";
std::cout << std::endl;
}
Write("IFF68_LaurinaitisTadas_L4a_rez.txt", dataArray, Ans);
return 0;
}
|
f06bfafe662a55cb9babc3892e109f825c73659a.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <string>
#include <iostream>
#include <iomanip>
#include <fstream>
#include <vector>
#include <locale>
#include <algorithm>
#include <cstdio>
//-------------------------------------------------
//#ifndef __CUDACC__
//#define __CUDACC__
//#endif
//#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
//#include <cuda.h>
//#include <device_functions.h>
//#include <cuda_runtime_api.h>
const int maxWordLength = 10;
const int arrayCount = 5;
const int inArrayCount = 11;
//std::vector<std::vector<Stud>> Read(std::string fileName);
//__device__ void charCpy(int index, char *dataWord, char *name);
//__global__ void addKernel(Stud **Q, Stud *Ans);
class Stud {
public:
char name[maxWordLength * arrayCount] = {};
int grades;
double average;
Stud() {}
Stud(char name[], int grades, double average) {
strcpy(this->name, name);
this->grades = grades;
this->average = average;
}
};
__device__ void charCpy(int index, char *dataWord, char *name) {
int tempi = 0;
while (dataWord[tempi] != NULL)
{
name[index] = dataWord[tempi];
tempi++;
index++;
}
}
__global__ void addKernel(Stud **Q, Stud *Ans)
{
int i = threadIdx.x;
__shared__ char name[inArrayCount][arrayCount * maxWordLength];
__shared__ int nmb[inArrayCount];
__shared__ double average[inArrayCount];
nmb[i] = 0;
average[i] = 0;
for (int j = 0; j < arrayCount * maxWordLength; j++)
{
name[i][j] = NULL;
}
for (int j = 0; j < arrayCount; j++)
{
for (int q = 0; q < maxWordLength * arrayCount; q++)
{
if (name[i][q] == NULL)
{
charCpy(q, Q[j][i].name, name[i]);
break;
}
}
nmb[i] += Q[j][i].grades;
average[i] = average[i] + Q[j][i].average;
}
charCpy(0, name[i], Ans[i].name);
Ans[i].grades = nmb[i];
Ans[i].average = average[i];
//printf("%d-%s", i, Ans[i].name);
}
std::vector<std::vector<Stud>> Read(std::string fileName/*, std::vector<std::vector<Stud>> A*/)
{
std::vector<std::vector<Stud>> A;
std::ifstream in;
in.open(fileName);
while (!in.eof())
{
int len;
in >> len;
std::vector<Stud> temp;
for (size_t i = 0; i < len; i++)
{
std::string name;
in >> name;
int nmb;
in >> nmb;
double average;
in >> average;
char tempWord[maxWordLength] = {};
std::transform(name.begin(), name.end(), name.begin(), ::tolower);//change string letters to lower cases
strcpy(tempWord, name.c_str());//put string to chars
temp.push_back(Stud(tempWord, nmb, average));
}
A.push_back(temp);
}
in.close();
return A;
}
void Write(std::string fileName, std::vector<std::vector<Stud>> dataArray, Stud *Ans)
{
std::ofstream out;
out.open(fileName);
int a = 0;
out << "Pradiniai duomenys" << std::endl;
for (std::vector<Stud> data : dataArray)
{
//out << data[a].getLessonName() << endl;
out << "Nr. Vardas Pazymiu_skaicius Vidurkis" << std::endl;
int s = 0;
for (Stud student : data)
{
s++;
out << std::left << std::setw(4) << s << std::setw(10) << student.name << std::setw(18) << student.grades << student.average << std::endl;
}
a++;
}
out << std::endl;
out << "Rezultatas" << std::endl;
out << std::left << std::setw(40) << "Vardai" << std::setw(10) << "Pazymiai" << "Vidurkiai" << std::endl;
for (int i = 0; i < inArrayCount; i++)
{
out << std::left << std::setw(40) << Ans[i].name << std::setw(5) << Ans[i].grades << Ans[i].average << std::endl;
}
out << std::endl;
out.close();
}
int main()
{
std::vector<std::vector<Stud>> A;//data
/*const int arrayCount = 5;
const int inArrayCount = 11;*/
A = Read("IFF68_LaurinaitisTadas_L4.txt");
std::vector<std::vector<Stud>> dataArray;//data
dataArray = A;
Stud arrayA[arrayCount][inArrayCount];
for (int i = 0; i < arrayCount; i++)
{
for (int j = 0; j < inArrayCount; j++)
{
arrayA[i][j] = A[i][j];
}
}
//dataArray = arrayA;
Stud** dev_Q;
cudaMalloc((void**)&dev_Q, arrayCount * sizeof(Stud*));//for stud arrays 5
for (int i = 0; i < arrayCount; i++)
{
Stud* temp_Q = nullptr;
cudaMalloc((void**)&temp_Q, inArrayCount * sizeof(Stud));
cudaMemcpy(temp_Q, &arrayA[i], inArrayCount * sizeof(Stud), cudaMemcpyHostToDevice);
cudaMemcpy(&dev_Q[i], &temp_Q, sizeof(Stud*), cudaMemcpyHostToDevice);
}
Stud Ans[inArrayCount] = {};
Stud *dev_Ans;
cudaMalloc((void**)&dev_Ans, inArrayCount * sizeof(Stud));
cudaMemcpy(dev_Ans, Ans, inArrayCount * sizeof(int), cudaMemcpyHostToDevice);
//addKernel << < 1, arrayCount >> > (dev_Q, dev_Ans);
addKernel << < 1, inArrayCount >> > (dev_Q, dev_Ans);
cudaMemcpy(Ans, dev_Ans, inArrayCount * sizeof(Stud), cudaMemcpyDeviceToHost);
cudaFree(dev_Q);
cudaFree(dev_Ans);
for (int i = 0; i < inArrayCount; i++)
{
std::cout << Ans[i].name << " " << Ans[i].grades << " " << Ans[i].average << " ";
std::cout << std::endl;
}
Write("IFF68_LaurinaitisTadas_L4a_rez.txt", dataArray, Ans);
return 0;
}
|
51a87d000224fe935a3cd8be26daf1b417da2de1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <primitiv/config.h>
#include <primitiv/devices/cuda16/device.h>
#include <primitiv/devices/cuda16/ops/common.h>
#include <primitiv/internal/cuda/utils.h>
namespace {
__global__ void transpose_fw_dev(
const half *px, std::uint32_t rows, std::uint32_t cols, half *py) {
const std::uint32_t i = IDX;
const std::uint32_t j = IDY;
std::uint32_t ofs = blockIdx.z * rows * cols;
if (i < rows && j < cols) py[ofs + j + i * cols] = px[ofs + i + j * rows];
}
__global__ void transpose_bw_dev(
const half *py, std::uint32_t rows, std::uint32_t cols, half *px) {
const std::uint32_t i = IDX;
const std::uint32_t j = IDY;
std::uint32_t ofs = blockIdx.z * rows * cols;
if (i < rows && j < cols) {
const std::size_t ox = ofs + i + j * rows;
const std::size_t oy = ofs + j + i * cols;
INPLACE_ADD(px + ox, ::__half2float(py[oy]));
}
}
} // namespace
namespace primitiv {
namespace devices {
void CUDA16::transpose_fw_impl(const Tensor &x, Tensor &y) {
const std::uint32_t rows = x.shape()[0];
const std::uint32_t cols = x.shape()[1];
const std::uint32_t bs = x.shape().batch();
const std::uint32_t g1 = GRID_SIZE(rows, dim2_x_);
const std::uint32_t g2 = GRID_SIZE(cols, dim2_y_);
CUDA_CALL(::hipSetDevice(dev_id_));
hipLaunchKernelGGL(( ::transpose_fw_dev), dim3(dim3(g1, g2, bs)), dim3(dim3(dim2_x_, dim2_y_, 1)), 0, 0,
CDATA(half, x), rows, cols, MDATA(half, y));
}
void CUDA16::transpose_bw_impl(
const Tensor &, const Tensor &, const Tensor &gy, Tensor &gx) {
const std::uint32_t rows = gx.shape()[0];
const std::uint32_t cols = gx.shape()[1];
const std::uint32_t bs = gx.shape().batch();
const std::uint32_t g1 = GRID_SIZE(rows, dim2_x_);
const std::uint32_t g2 = GRID_SIZE(cols, dim2_y_);
CUDA_CALL(::hipSetDevice(dev_id_));
hipLaunchKernelGGL(( ::transpose_bw_dev), dim3(dim3(g1, g2, bs)), dim3(dim3(dim2_x_, dim2_y_, 1)), 0, 0,
CDATA(half, gy), rows, cols, MDATA(half, gx));
}
} // namespace devices
} // namespace primitiv
|
51a87d000224fe935a3cd8be26daf1b417da2de1.cu
|
#include <primitiv/config.h>
#include <primitiv/devices/cuda16/device.h>
#include <primitiv/devices/cuda16/ops/common.h>
#include <primitiv/internal/cuda/utils.h>
namespace {
__global__ void transpose_fw_dev(
const half *px, std::uint32_t rows, std::uint32_t cols, half *py) {
const std::uint32_t i = IDX;
const std::uint32_t j = IDY;
std::uint32_t ofs = blockIdx.z * rows * cols;
if (i < rows && j < cols) py[ofs + j + i * cols] = px[ofs + i + j * rows];
}
__global__ void transpose_bw_dev(
const half *py, std::uint32_t rows, std::uint32_t cols, half *px) {
const std::uint32_t i = IDX;
const std::uint32_t j = IDY;
std::uint32_t ofs = blockIdx.z * rows * cols;
if (i < rows && j < cols) {
const std::size_t ox = ofs + i + j * rows;
const std::size_t oy = ofs + j + i * cols;
INPLACE_ADD(px + ox, ::__half2float(py[oy]));
}
}
} // namespace
namespace primitiv {
namespace devices {
void CUDA16::transpose_fw_impl(const Tensor &x, Tensor &y) {
const std::uint32_t rows = x.shape()[0];
const std::uint32_t cols = x.shape()[1];
const std::uint32_t bs = x.shape().batch();
const std::uint32_t g1 = GRID_SIZE(rows, dim2_x_);
const std::uint32_t g2 = GRID_SIZE(cols, dim2_y_);
CUDA_CALL(::cudaSetDevice(dev_id_));
::transpose_fw_dev<<<dim3(g1, g2, bs), dim3(dim2_x_, dim2_y_, 1)>>>(
CDATA(half, x), rows, cols, MDATA(half, y));
}
void CUDA16::transpose_bw_impl(
const Tensor &, const Tensor &, const Tensor &gy, Tensor &gx) {
const std::uint32_t rows = gx.shape()[0];
const std::uint32_t cols = gx.shape()[1];
const std::uint32_t bs = gx.shape().batch();
const std::uint32_t g1 = GRID_SIZE(rows, dim2_x_);
const std::uint32_t g2 = GRID_SIZE(cols, dim2_y_);
CUDA_CALL(::cudaSetDevice(dev_id_));
::transpose_bw_dev<<<dim3(g1, g2, bs), dim3(dim2_x_, dim2_y_, 1)>>>(
CDATA(half, gy), rows, cols, MDATA(half, gx));
}
} // namespace devices
} // namespace primitiv
|
d6307177183b101d9f76b4791912297d2eb5eb2d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*****************************************************************************
Example : cuda-matrix-matrix-multiplication.cu
Objective : Write a CUDA Program to perform Matrix Matrix multiplication.
Input : None
Output : Execution time in seconds , Gflops achieved
Created : Aug 2011
E-mail : RarchK
****************************************************************************/
#include<stdio.h>
#include<cuda.h>
#define BLOCKSIZE 16
#define SIZE 128
hipDeviceProp_t deviceProp;
hipEvent_t start,stop;
hipError_t ret;
double *host_MatA,*host_MatB,*host_MatC,*CPU_Result;
double *device_MatA,*device_MatB,*device_MatC;
int size = SIZE;
float elapsedTime;
double Tsec,gflops;
/*kernel funtion*/
__global__ void Muld(double* A, double* B, int wA, int wB, double* C)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int aBegin = wA * BLOCKSIZE * by;
int aEnd = aBegin + wA - 1;
int aStep = BLOCKSIZE;
int bBegin = BLOCKSIZE * bx;
int bStep = BLOCKSIZE * wB;
double Csub = 0;
for(int a = aBegin, b = bBegin; a <= aEnd ; a += aStep, b += bStep)
{
__shared__ double As[BLOCKSIZE][BLOCKSIZE];
__shared__ double Bs[BLOCKSIZE][BLOCKSIZE];
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b+ wB * ty + tx];
__syncthreads();
for(int k= 0; k< BLOCKSIZE; ++k)
Csub += As[ty][k] * Bs[k][tx];
__syncthreads();
}
int c = wB * BLOCKSIZE * by + BLOCKSIZE * bx;
C[ c+ wB * ty + tx] = Csub;
}/* end of Muld device code */
/*mem error*/
void mem_error(char *arrayname, char *benchmark, int len, char *type)
{
printf("\nMemory not sufficient to allocate for array %s\n\tBenchmark : %s \n\tMemory requested = %d number of %s elements\n",arrayname, benchmark, len, type);
exit(-1);
}
/*cuda safe call*/
void CUDA_SAFE_CALL(hipError_t call)
{
hipError_t ret = call;
//printf("RETURN FROM THE CUDA CALL:%d\t:",ret);
switch(ret)
{
case hipSuccess:
// printf("Success\n");
break;
/* case hipErrorInvalidValue:
{
printf("ERROR: InvalidValue:%i.\n",__LINE__);
exit(-1);
break;
}
case hipErrorInvalidDevicePointer:
{
printf("ERROR:Invalid Device pointeri:%i.\n",__LINE__);
exit(-1);
break;
}
case hipErrorInvalidMemcpyDirection:
{
printf("ERROR:Invalid memcpy direction:%i.\n",__LINE__);
exit(-1);
break;
} */
default:
{
printf(" ERROR at line :%i.%d' ' %s\n",__LINE__,ret,hipGetErrorString(ret));
exit(-1);
break;
}
}
}
/* void SetUp_CUDA_Exe_Config() */
void check_block_grid_dim(hipDeviceProp_t devProp,dim3 blockDim,dim3 gridDim)
{
if( blockDim.x >= devProp.maxThreadsDim[0] || blockDim.y >= devProp.maxThreadsDim[1] || blockDim.z >= devProp.maxThreadsDim[2] )
{
printf("\nBlock Dimensions exceed the maximum limits:%d * %d * %d \n",devProp.maxThreadsDim[0],devProp.maxThreadsDim[1],devProp.maxThreadsDim[2]);
exit(-1);
}
if( gridDim.x >= devProp.maxGridSize[0] || gridDim.y >= devProp.maxGridSize[1] || gridDim.z >= devProp.maxGridSize[2] )
{
printf("\nGrid Dimensions exceed the maximum limits:%d * %d * %d \n",devProp.maxGridSize[0],devProp.maxGridSize[1],devProp.maxGridSize[2]);
exit(-1);
}
}
/*function to free memory*/
void dfree(double * arr[],int len)
{
for(int i=0;i<len;i++)
CUDA_SAFE_CALL(hipFree(arr[i]));
printf("mem freed\n");
}
/*calculate Gflops*/
double calculate_gflops(double &Tsec)
{
//printf("time taken is %.8lf\n",Tsec);
double gflops=(1.0e-9 * (( 1.0 * size*size*size )/Tsec));
//printf("Gflops is \t%f\n",gflops);
return gflops;
}
/*get device count*/
int get_DeviceCount()
{
int count;
hipGetDeviceCount(&count);
return count;
}
/*launch kernel function is called in main()*/
void launch_kernel_MatMatMult()
{
/* threads_per_block= BLOCKSIZE, blocks_per_grid=size/dimBlock */
printf("in launch kernel\n");
dim3 dimBlock(BLOCKSIZE,BLOCKSIZE);
dim3 dimGrid(size/dimBlock.x,size/dimBlock.y);
//checking the maximum limit of blocksize and gridsize-------------------
check_block_grid_dim(deviceProp,dimBlock,dimGrid);
printf("after check\n");
hipEventRecord(start,0);
hipLaunchKernelGGL(( Muld), dim3(dimGrid),dim3(dimBlock), 0, 0, device_MatA,device_MatB,size,size,device_MatC);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime,start,stop);
Tsec=elapsedTime*1.0e-3;
calculate_gflops(Tsec);
}
/* Fill in the vector with double precision values */
void fill_dp_vector(double* vec,int size)
{
int ind;
for(ind=0;ind<size;ind++)
vec[ind]=drand48();
}
/*function to print on the screen*/
void print_on_screen(char * program_name,float tsec,double gflops,int size,int flag)//flag=1 if gflops has been calculated else flag =0
{
printf("\n---------------%s----------------\n",program_name);
printf("\tSIZE\t TIME_SEC\t Gflops\n");
if(flag==1)
printf("\t%d\t%f\t%lf\t",size,tsec,gflops);
else
printf("\t%d\t%lf\t%lf\t",size,"---","---");
}
/*-----main()-----*/
int main()
{
int device_Count=get_DeviceCount();
printf("\n\nNUmber of Devices : %d\n\n", device_Count);
// Device Selection, Device 1: Tesla C1060
hipSetDevice(1);
int device;
// Current Device Detection
hipGetDevice(&device);
hipGetDeviceProperties(&deviceProp,device);
printf("Using device %d: %s \n", device, deviceProp.name);
/* allocate memory for GPU events
start = (hipEvent_t) malloc (sizeof(hipEvent_t));
stop = (hipEvent_t) malloc (sizeof(hipEvent_t));
if(start==NULL)
mem_error("start","MatMatMult",1,"hipEvent_t");
if(stop==NULL)
mem_error("stop","MatMatMult",1,"hipEvent_t");*/
//event creation...
CUDA_SAFE_CALL(hipEventCreate (&start));
CUDA_SAFE_CALL(hipEventCreate (&stop));
/*allocating the memory for each matrix */
host_MatA = new double[size*size];
host_MatB = new double[size*size];
host_MatC = new double[size*size];
CPU_Result= new double[size*size];
if(host_MatA==NULL)
mem_error("host_MatA","MatMatMult",size,"double");
if(host_MatB==NULL)
mem_error("host_MatB","MatMatMult",size,"double");
if(host_MatC==NULL)
mem_error("host_MatC","MatMatMult",size,"double");
if(CPU_Result==NULL)
mem_error("CPU_Result","MatMatMult",size,"double");
//--------filling the matrix with double precision-----------
fill_dp_vector(host_MatA,size*size);
fill_dp_vector(host_MatB,size*size);
//allocating memory on GPU
CUDA_SAFE_CALL(hipMalloc( (void**)&device_MatA,size*size*sizeof(double)));
CUDA_SAFE_CALL(hipMalloc( (void**)&device_MatB, size*size*sizeof(double)));
CUDA_SAFE_CALL(hipMalloc( (void**)&device_MatC,size*size*sizeof(double)));
// copying host matrix to device matrix
CUDA_SAFE_CALL(hipMemcpy((void*)device_MatA, (void*)host_MatA, size*size* sizeof(double) , hipMemcpyHostToDevice ));
CUDA_SAFE_CALL(hipMemcpy((void*)device_MatB, (void*)host_MatB, size*size*sizeof(double) , hipMemcpyHostToDevice ));
launch_kernel_MatMatMult(); //launching the kernel
//retriving result from device
CUDA_SAFE_CALL(hipMemcpy((void*)host_MatC, (void*)device_MatC, size*size*sizeof(double) , hipMemcpyDeviceToHost ));
//comparing result of CPU-GPU
//relError(CPU_Result,host_MatC,size*size);
//printing the result on screen
print_on_screen("MAT MAT Mult",Tsec,calculate_gflops(Tsec),size,1);
//free the device memory----------
double *array[3];
array[0]=device_MatA;
array[1]=device_MatB;
array[2]=device_MatC;
dfree(array,3);
//free host memory----------
free(host_MatA);
free(host_MatB);
free(host_MatC);
free(CPU_Result);
}// end of main
|
d6307177183b101d9f76b4791912297d2eb5eb2d.cu
|
/*****************************************************************************
Example : cuda-matrix-matrix-multiplication.cu
Objective : Write a CUDA Program to perform Matrix Matrix multiplication.
Input : None
Output : Execution time in seconds , Gflops achieved
Created : Aug 2011
E-mail : RarchK
****************************************************************************/
#include<stdio.h>
#include<cuda.h>
#define BLOCKSIZE 16
#define SIZE 128
cudaDeviceProp deviceProp;
cudaEvent_t start,stop;
cudaError_t ret;
double *host_MatA,*host_MatB,*host_MatC,*CPU_Result;
double *device_MatA,*device_MatB,*device_MatC;
int size = SIZE;
float elapsedTime;
double Tsec,gflops;
/*kernel funtion*/
__global__ void Muld(double* A, double* B, int wA, int wB, double* C)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int aBegin = wA * BLOCKSIZE * by;
int aEnd = aBegin + wA - 1;
int aStep = BLOCKSIZE;
int bBegin = BLOCKSIZE * bx;
int bStep = BLOCKSIZE * wB;
double Csub = 0;
for(int a = aBegin, b = bBegin; a <= aEnd ; a += aStep, b += bStep)
{
__shared__ double As[BLOCKSIZE][BLOCKSIZE];
__shared__ double Bs[BLOCKSIZE][BLOCKSIZE];
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b+ wB * ty + tx];
__syncthreads();
for(int k= 0; k< BLOCKSIZE; ++k)
Csub += As[ty][k] * Bs[k][tx];
__syncthreads();
}
int c = wB * BLOCKSIZE * by + BLOCKSIZE * bx;
C[ c+ wB * ty + tx] = Csub;
}/* end of Muld device code */
/*mem error*/
void mem_error(char *arrayname, char *benchmark, int len, char *type)
{
printf("\nMemory not sufficient to allocate for array %s\n\tBenchmark : %s \n\tMemory requested = %d number of %s elements\n",arrayname, benchmark, len, type);
exit(-1);
}
/*cuda safe call*/
void CUDA_SAFE_CALL(cudaError_t call)
{
cudaError_t ret = call;
//printf("RETURN FROM THE CUDA CALL:%d\t:",ret);
switch(ret)
{
case cudaSuccess:
// printf("Success\n");
break;
/* case cudaErrorInvalidValue:
{
printf("ERROR: InvalidValue:%i.\n",__LINE__);
exit(-1);
break;
}
case cudaErrorInvalidDevicePointer:
{
printf("ERROR:Invalid Device pointeri:%i.\n",__LINE__);
exit(-1);
break;
}
case cudaErrorInvalidMemcpyDirection:
{
printf("ERROR:Invalid memcpy direction:%i.\n",__LINE__);
exit(-1);
break;
} */
default:
{
printf(" ERROR at line :%i.%d' ' %s\n",__LINE__,ret,cudaGetErrorString(ret));
exit(-1);
break;
}
}
}
/* void SetUp_CUDA_Exe_Config() */
void check_block_grid_dim(cudaDeviceProp devProp,dim3 blockDim,dim3 gridDim)
{
if( blockDim.x >= devProp.maxThreadsDim[0] || blockDim.y >= devProp.maxThreadsDim[1] || blockDim.z >= devProp.maxThreadsDim[2] )
{
printf("\nBlock Dimensions exceed the maximum limits:%d * %d * %d \n",devProp.maxThreadsDim[0],devProp.maxThreadsDim[1],devProp.maxThreadsDim[2]);
exit(-1);
}
if( gridDim.x >= devProp.maxGridSize[0] || gridDim.y >= devProp.maxGridSize[1] || gridDim.z >= devProp.maxGridSize[2] )
{
printf("\nGrid Dimensions exceed the maximum limits:%d * %d * %d \n",devProp.maxGridSize[0],devProp.maxGridSize[1],devProp.maxGridSize[2]);
exit(-1);
}
}
/*function to free memory*/
void dfree(double * arr[],int len)
{
for(int i=0;i<len;i++)
CUDA_SAFE_CALL(cudaFree(arr[i]));
printf("mem freed\n");
}
/*calculate Gflops*/
double calculate_gflops(double &Tsec)
{
//printf("time taken is %.8lf\n",Tsec);
double gflops=(1.0e-9 * (( 1.0 * size*size*size )/Tsec));
//printf("Gflops is \t%f\n",gflops);
return gflops;
}
/*get device count*/
int get_DeviceCount()
{
int count;
cudaGetDeviceCount(&count);
return count;
}
/*launch kernel function is called in main()*/
void launch_kernel_MatMatMult()
{
/* threads_per_block= BLOCKSIZE, blocks_per_grid=size/dimBlock */
printf("in launch kernel\n");
dim3 dimBlock(BLOCKSIZE,BLOCKSIZE);
dim3 dimGrid(size/dimBlock.x,size/dimBlock.y);
//checking the maximum limit of blocksize and gridsize-------------------
check_block_grid_dim(deviceProp,dimBlock,dimGrid);
printf("after check\n");
cudaEventRecord(start,0);
Muld<<<dimGrid,dimBlock>>>(device_MatA,device_MatB,size,size,device_MatC);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime,start,stop);
Tsec=elapsedTime*1.0e-3;
calculate_gflops(Tsec);
}
/* Fill in the vector with double precision values */
void fill_dp_vector(double* vec,int size)
{
int ind;
for(ind=0;ind<size;ind++)
vec[ind]=drand48();
}
/*function to print on the screen*/
void print_on_screen(char * program_name,float tsec,double gflops,int size,int flag)//flag=1 if gflops has been calculated else flag =0
{
printf("\n---------------%s----------------\n",program_name);
printf("\tSIZE\t TIME_SEC\t Gflops\n");
if(flag==1)
printf("\t%d\t%f\t%lf\t",size,tsec,gflops);
else
printf("\t%d\t%lf\t%lf\t",size,"---","---");
}
/*-----main()-----*/
int main()
{
int device_Count=get_DeviceCount();
printf("\n\nNUmber of Devices : %d\n\n", device_Count);
// Device Selection, Device 1: Tesla C1060
cudaSetDevice(1);
int device;
// Current Device Detection
cudaGetDevice(&device);
cudaGetDeviceProperties(&deviceProp,device);
printf("Using device %d: %s \n", device, deviceProp.name);
/* allocate memory for GPU events
start = (cudaEvent_t) malloc (sizeof(cudaEvent_t));
stop = (cudaEvent_t) malloc (sizeof(cudaEvent_t));
if(start==NULL)
mem_error("start","MatMatMult",1,"cudaEvent_t");
if(stop==NULL)
mem_error("stop","MatMatMult",1,"cudaEvent_t");*/
//event creation...
CUDA_SAFE_CALL(cudaEventCreate (&start));
CUDA_SAFE_CALL(cudaEventCreate (&stop));
/*allocating the memory for each matrix */
host_MatA = new double[size*size];
host_MatB = new double[size*size];
host_MatC = new double[size*size];
CPU_Result= new double[size*size];
if(host_MatA==NULL)
mem_error("host_MatA","MatMatMult",size,"double");
if(host_MatB==NULL)
mem_error("host_MatB","MatMatMult",size,"double");
if(host_MatC==NULL)
mem_error("host_MatC","MatMatMult",size,"double");
if(CPU_Result==NULL)
mem_error("CPU_Result","MatMatMult",size,"double");
//--------filling the matrix with double precision-----------
fill_dp_vector(host_MatA,size*size);
fill_dp_vector(host_MatB,size*size);
//allocating memory on GPU
CUDA_SAFE_CALL(cudaMalloc( (void**)&device_MatA,size*size*sizeof(double)));
CUDA_SAFE_CALL(cudaMalloc( (void**)&device_MatB, size*size*sizeof(double)));
CUDA_SAFE_CALL(cudaMalloc( (void**)&device_MatC,size*size*sizeof(double)));
// copying host matrix to device matrix
CUDA_SAFE_CALL(cudaMemcpy((void*)device_MatA, (void*)host_MatA, size*size* sizeof(double) , cudaMemcpyHostToDevice ));
CUDA_SAFE_CALL(cudaMemcpy((void*)device_MatB, (void*)host_MatB, size*size*sizeof(double) , cudaMemcpyHostToDevice ));
launch_kernel_MatMatMult(); //launching the kernel
//retriving result from device
CUDA_SAFE_CALL(cudaMemcpy((void*)host_MatC, (void*)device_MatC, size*size*sizeof(double) , cudaMemcpyDeviceToHost ));
//comparing result of CPU-GPU
//relError(CPU_Result,host_MatC,size*size);
//printing the result on screen
print_on_screen("MAT MAT Mult",Tsec,calculate_gflops(Tsec),size,1);
//free the device memory----------
double *array[3];
array[0]=device_MatA;
array[1]=device_MatB;
array[2]=device_MatC;
dfree(array,3);
//free host memory----------
free(host_MatA);
free(host_MatB);
free(host_MatC);
free(CPU_Result);
}// end of main
|
dcde4a381952bab5727776978888b099d1f32852.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "initializer/random_uniform.h"
#include "common.h"
#include "common_hip.cuh"
namespace SparseOperationKit {
RandomUniformInit::RandomUniformInit(const float a, const float b)
: a_(a), b_(b)
{
if (a_ >= b_) throw std::runtime_error(ErrorBase + "a must be smaller than b.");
}
std::shared_ptr<RandomUniformInit> RandomUniformInit::create(
const float a, const float b) {
return std::shared_ptr<RandomUniformInit>(new RandomUniformInit(a, b));
}
void RandomUniformInit::fill(std::shared_ptr<Tensor> tensor,
const size_t sm_count,
const hiprandGenerator_t& generator,
const hipStream_t& stream) {
CK_CURAND(hiprandGenerateUniform(generator,
tensor->GetPtrWithType<float>(),
tensor->get_num_elements()));
float a = a_, b = b_;
auto op = [a, b] __device__(float val) { return val * (b - a) + a; };
hipLaunchKernelGGL(( transform_array), dim3(sm_count * 2), dim3(1024), 0, stream, tensor->GetPtrWithType<float>(),
tensor->GetPtrWithType<float>(),
tensor->get_num_elements(), op);
}
} // namespace SparseOperationKit
|
dcde4a381952bab5727776978888b099d1f32852.cu
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "initializer/random_uniform.h"
#include "common.h"
#include "common.cuh"
namespace SparseOperationKit {
RandomUniformInit::RandomUniformInit(const float a, const float b)
: a_(a), b_(b)
{
if (a_ >= b_) throw std::runtime_error(ErrorBase + "a must be smaller than b.");
}
std::shared_ptr<RandomUniformInit> RandomUniformInit::create(
const float a, const float b) {
return std::shared_ptr<RandomUniformInit>(new RandomUniformInit(a, b));
}
void RandomUniformInit::fill(std::shared_ptr<Tensor> tensor,
const size_t sm_count,
const curandGenerator_t& generator,
const cudaStream_t& stream) {
CK_CURAND(curandGenerateUniform(generator,
tensor->GetPtrWithType<float>(),
tensor->get_num_elements()));
float a = a_, b = b_;
auto op = [a, b] __device__(float val) { return val * (b - a) + a; };
transform_array<<<sm_count * 2, 1024, 0, stream>>>(tensor->GetPtrWithType<float>(),
tensor->GetPtrWithType<float>(),
tensor->get_num_elements(), op);
}
} // namespace SparseOperationKit
|
022d39f86fe0187bb6a74322ebda4e2c46061b5b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <algorithm>
#include <tuple>
#include <fstream>
#include <iostream>
#include "minutia.cuh"
#include "constants.cuh"
#include "util.cuh"
#include "errors.h"
#include "debug.h"
#include <vector>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <device_launch_parameters.h>
#include <hip/device_functions.h>
using namespace std;
__host__
vector<Minutia> buildConvexHull(const vector<Minutia>& _minutiae) {
vector<Minutia> minutiae(_minutiae);
int min_y = 0;
for (int i = 1; i < minutiae.size(); ++i) {
if (minutiae[i] < minutiae[min_y])
min_y = i;
}
Minutia pivot(minutiae[min_y]);
swap(minutiae.front(), minutiae[min_y]);
sort(minutiae.begin()+1, minutiae.end(), [&]
(const Minutia &lhs, const Minutia &rhs) {
int turn = minutiaTurn(pivot, lhs, rhs);
if (turn == 0) {
auto ldist = sqrDistance(pivot.x, pivot.y, lhs.x, lhs.y);
auto rdist = sqrDistance(pivot.x, pivot.y, rhs.x, rhs.y);
return ldist < rdist;
}
return turn == 1;
});
vector<Minutia> hull;
for (int i = 0; i < 3; ++i)
hull.push_back(minutiae[i]);
for (int i = 3; i < minutiae.size(); ++i) {
Minutia top(hull.back());
while (!hull.empty() && minutiaTurn(hull.back(), top, minutiae[i]) != 1) {
top = hull.back();
hull.pop_back();
}
hull.push_back(top);
hull.push_back(minutiae[i]);
}
return hull;
}
__global__
void fillConvexHull(Minutia *hull, const int nHull,
const int width, const int height, char *area) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y >= height) return;
extern __shared__ int shared[];
Minutia *sharedHull = (Minutia*)shared;
int idx = threadIdx.y * blockDim.x + threadIdx.x;
if (idx < nHull) sharedHull[idx] = hull[idx];
__syncthreads();
char ok = 1;
for (int i = 0; i < nHull; ++i) {
Minutia a(sharedHull[i]);
Minutia b(sharedHull[(i+1) % nHull]);
if (lineTurn(x, y, a.x, a.y, b.x, b.y) < 0) {
ok = 0;
if (sqrDistanceFromSegment(x, y, a.x, a.y, b.x, b.y) <= OMEGA*OMEGA) {
area[y*width + x] = 1;
return;
}
}
}
area[y*width + x] = ok;
}
__host__
void devBuildValidArea(
const vector<Minutia> &minutiae,
const int width, const int height,
char *devArea) {
auto hull = buildConvexHull(minutiae);
#ifdef DEBUG
ofstream hullStream("plot/hull.txt");
hullStream << width << endl << height << endl << hull.size() << endl;
for (int i = 0; i < hull.size(); ++i)
hullStream << hull[i].x << ' ' << hull[i].y << endl;
hullStream << endl;
hullStream.close();
#endif
Minutia *devHull;
size_t devHullSize = hull.size() * sizeof(Minutia);
handleError(
hipMalloc(&devHull, devHullSize));
handleError(
hipMemcpy(devHull, hull.data(), devHullSize, hipMemcpyHostToDevice));
int threadPerDim = 32;
dim3 blockCount(ceilMod(width, threadPerDim), ceilMod(height, threadPerDim));
dim3 threadCount(threadPerDim, threadPerDim);
hipLaunchKernelGGL(( fillConvexHull), dim3(blockCount), dim3(threadCount), devHullSize, 0,
devHull, hull.size(), width, height, devArea);
handleError(
hipPeekAtLastError());
hipFree(devHull);
}
__host__
vector<char> buildValidArea(
const vector<Minutia>& minutiae,
const int width, const int height) {
char *devArea;
size_t devAreaSize = width * height * sizeof(char);
handleError(
hipMalloc(&devArea, devAreaSize));
devBuildValidArea(minutiae, width, height, devArea);
vector<char> ret(width * height);
handleError(
hipMemcpy(ret.data(), devArea, devAreaSize, hipMemcpyDeviceToHost));
hipFree(devArea);
#ifdef DEBUG
ofstream areaStream("plot/area.txt");
for (int i = 0; i < height; ++i) {
for (int j = 0; j < width; ++j) {
areaStream << (ret[i*width + j] ? '1' : '0');
}
areaStream << endl;
}
areaStream.close();
#endif
return ret;
}
|
022d39f86fe0187bb6a74322ebda4e2c46061b5b.cu
|
#include <algorithm>
#include <tuple>
#include <fstream>
#include <iostream>
#include "minutia.cuh"
#include "constants.cuh"
#include "util.cuh"
#include "errors.h"
#include "debug.h"
#include <vector>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <device_launch_parameters.h>
#include <device_functions.h>
using namespace std;
__host__
vector<Minutia> buildConvexHull(const vector<Minutia>& _minutiae) {
vector<Minutia> minutiae(_minutiae);
int min_y = 0;
for (int i = 1; i < minutiae.size(); ++i) {
if (minutiae[i] < minutiae[min_y])
min_y = i;
}
Minutia pivot(minutiae[min_y]);
swap(minutiae.front(), minutiae[min_y]);
sort(minutiae.begin()+1, minutiae.end(), [&]
(const Minutia &lhs, const Minutia &rhs) {
int turn = minutiaTurn(pivot, lhs, rhs);
if (turn == 0) {
auto ldist = sqrDistance(pivot.x, pivot.y, lhs.x, lhs.y);
auto rdist = sqrDistance(pivot.x, pivot.y, rhs.x, rhs.y);
return ldist < rdist;
}
return turn == 1;
});
vector<Minutia> hull;
for (int i = 0; i < 3; ++i)
hull.push_back(minutiae[i]);
for (int i = 3; i < minutiae.size(); ++i) {
Minutia top(hull.back());
while (!hull.empty() && minutiaTurn(hull.back(), top, minutiae[i]) != 1) {
top = hull.back();
hull.pop_back();
}
hull.push_back(top);
hull.push_back(minutiae[i]);
}
return hull;
}
__global__
void fillConvexHull(Minutia *hull, const int nHull,
const int width, const int height, char *area) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y >= height) return;
extern __shared__ int shared[];
Minutia *sharedHull = (Minutia*)shared;
int idx = threadIdx.y * blockDim.x + threadIdx.x;
if (idx < nHull) sharedHull[idx] = hull[idx];
__syncthreads();
char ok = 1;
for (int i = 0; i < nHull; ++i) {
Minutia a(sharedHull[i]);
Minutia b(sharedHull[(i+1) % nHull]);
if (lineTurn(x, y, a.x, a.y, b.x, b.y) < 0) {
ok = 0;
if (sqrDistanceFromSegment(x, y, a.x, a.y, b.x, b.y) <= OMEGA*OMEGA) {
area[y*width + x] = 1;
return;
}
}
}
area[y*width + x] = ok;
}
__host__
void devBuildValidArea(
const vector<Minutia> &minutiae,
const int width, const int height,
char *devArea) {
auto hull = buildConvexHull(minutiae);
#ifdef DEBUG
ofstream hullStream("plot/hull.txt");
hullStream << width << endl << height << endl << hull.size() << endl;
for (int i = 0; i < hull.size(); ++i)
hullStream << hull[i].x << ' ' << hull[i].y << endl;
hullStream << endl;
hullStream.close();
#endif
Minutia *devHull;
size_t devHullSize = hull.size() * sizeof(Minutia);
handleError(
cudaMalloc(&devHull, devHullSize));
handleError(
cudaMemcpy(devHull, hull.data(), devHullSize, cudaMemcpyHostToDevice));
int threadPerDim = 32;
dim3 blockCount(ceilMod(width, threadPerDim), ceilMod(height, threadPerDim));
dim3 threadCount(threadPerDim, threadPerDim);
fillConvexHull<<<blockCount, threadCount, devHullSize>>>(
devHull, hull.size(), width, height, devArea);
handleError(
cudaPeekAtLastError());
cudaFree(devHull);
}
__host__
vector<char> buildValidArea(
const vector<Minutia>& minutiae,
const int width, const int height) {
char *devArea;
size_t devAreaSize = width * height * sizeof(char);
handleError(
cudaMalloc(&devArea, devAreaSize));
devBuildValidArea(minutiae, width, height, devArea);
vector<char> ret(width * height);
handleError(
cudaMemcpy(ret.data(), devArea, devAreaSize, cudaMemcpyDeviceToHost));
cudaFree(devArea);
#ifdef DEBUG
ofstream areaStream("plot/area.txt");
for (int i = 0; i < height; ++i) {
for (int j = 0; j < width; ++j) {
areaStream << (ret[i*width + j] ? '1' : '0');
}
areaStream << endl;
}
areaStream.close();
#endif
return ret;
}
|
73962a6795a1d6fb48474ae5e93c972e8a6527e1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <cstdlib>
#include <math.h>
#include <chrono>
// matrix multiply on gpu
__global__
void dgem_gpu(int n, float *A, float *B, float *C)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
// demo filler
C[i+j*n] = B[i+j*n];
}
void square_dgemm_naive (int n, float* A, float* B, float* C)
{
for (int i = 0; i < n; ++i)
for (int j = 0; j < n; ++j)
{
float cij = C[i+j*n];
for( int k = 0; k < n; k++ )
cij += A[i+k*n] * B[k+j*n];
C[i+j*n] = cij;
}
}
int check(int n, float *A, float *B) {
for (int i = 0; i < n; ++i)
for (int j = 0; j < n; ++j) {
double diff = std::abs(A[i + j * n] - B[i + j * n]);
if (diff > 0.0003) {
printf("diff is %f\n", diff);
return 0;
}
}
return 1;
}
int main(void)
{
int N = 1000;
int size = N*N; // square matrix
float *A, *B, *C, *verify;
// Works on cpu and gpu
hipMallocManaged(&A, size*sizeof(float));
hipMallocManaged(&B, size*sizeof(float));
hipMallocManaged(&C, size*sizeof(float));
hipMallocManaged(&verify, size*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < size; i++) {
A[i] = i * 0.000000001;
B[i] = i * 0.000000001;
C[i] = 0.0f;
verify[i] = 0.0f;
}
// this is to generate answer
auto serialStart = std::chrono::system_clock::now();
square_dgemm_naive(N, A, B, verify);
auto serialEnd = std::chrono::system_clock::now();
std::chrono::duration<double> serialElapsed = serialEnd - serialStart;
std::cout << serialElapsed.count() << "s\n";
// Run kernel on the GPU
// use this one for actual work
auto gpuStart = std::chrono::system_clock::now();
//hipLaunchKernelGGL(( dgem_gpu), dim3(N), dim3(N), 0, 0, N, A, B, C);
// comment this one out, just for testing
hipLaunchKernelGGL(( dgem_gpu), dim3(N), dim3(N), 0, 0, N, A, C, verify);
auto gpuEnd = std::chrono::system_clock::now();
std::chrono::duration<double> gpuElapsed = gpuEnd - gpuStart;
std::cout << gpuElapsed.count() << "s\n";
// wait for threads to finish
hipDeviceSynchronize();
int correct = check(N, C, verify);
// Free memory
hipFree(A);
hipFree(B);
hipFree(C);
hipFree(verify);
if (correct == 0) {
printf("INVALID OUTPUT\n");
exit(1);
}
printf("Correct output!\n");
return 0;
}
|
73962a6795a1d6fb48474ae5e93c972e8a6527e1.cu
|
#include <iostream>
#include <cstdlib>
#include <math.h>
#include <chrono>
// matrix multiply on gpu
__global__
void dgem_gpu(int n, float *A, float *B, float *C)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
// demo filler
C[i+j*n] = B[i+j*n];
}
void square_dgemm_naive (int n, float* A, float* B, float* C)
{
for (int i = 0; i < n; ++i)
for (int j = 0; j < n; ++j)
{
float cij = C[i+j*n];
for( int k = 0; k < n; k++ )
cij += A[i+k*n] * B[k+j*n];
C[i+j*n] = cij;
}
}
int check(int n, float *A, float *B) {
for (int i = 0; i < n; ++i)
for (int j = 0; j < n; ++j) {
double diff = std::abs(A[i + j * n] - B[i + j * n]);
if (diff > 0.0003) {
printf("diff is %f\n", diff);
return 0;
}
}
return 1;
}
int main(void)
{
int N = 1000;
int size = N*N; // square matrix
float *A, *B, *C, *verify;
// Works on cpu and gpu
cudaMallocManaged(&A, size*sizeof(float));
cudaMallocManaged(&B, size*sizeof(float));
cudaMallocManaged(&C, size*sizeof(float));
cudaMallocManaged(&verify, size*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < size; i++) {
A[i] = i * 0.000000001;
B[i] = i * 0.000000001;
C[i] = 0.0f;
verify[i] = 0.0f;
}
// this is to generate answer
auto serialStart = std::chrono::system_clock::now();
square_dgemm_naive(N, A, B, verify);
auto serialEnd = std::chrono::system_clock::now();
std::chrono::duration<double> serialElapsed = serialEnd - serialStart;
std::cout << serialElapsed.count() << "s\n";
// Run kernel on the GPU
// use this one for actual work
auto gpuStart = std::chrono::system_clock::now();
// dgem_gpu<<<N, N>>>(N, A, B, C);
// comment this one out, just for testing
dgem_gpu<<<N, N>>>(N, A, C, verify);
auto gpuEnd = std::chrono::system_clock::now();
std::chrono::duration<double> gpuElapsed = gpuEnd - gpuStart;
std::cout << gpuElapsed.count() << "s\n";
// wait for threads to finish
cudaDeviceSynchronize();
int correct = check(N, C, verify);
// Free memory
cudaFree(A);
cudaFree(B);
cudaFree(C);
cudaFree(verify);
if (correct == 0) {
printf("INVALID OUTPUT\n");
exit(1);
}
printf("Correct output!\n");
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.