hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
f634a61c7aa0c2d4990c87ed06269f71536e8c75.hip | // !!! This is a file automatically generated by hipify!!!
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
// DEFINE / INCLUDE
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
//======================================================================================================================================================
// LIBRARIES
//======================================================================================================================================================
#include <helper_cuda.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <avilib.h>
#include <avimod.h>
#include <hip/hip_runtime.h>
//======================================================================================================================================================
// STRUCTURES, GLOBAL STRUCTURE VARIABLES
//======================================================================================================================================================
#include "define.c"
params_common_change common_change;
params_common_change *d_common_change;
params_common common;
params_common *d_common;
params_unique unique[ALL_POINTS]; // cannot determine size dynamically so choose
// more than usually needed
params_unique *d_unique;
//======================================================================================================================================================
// KERNEL CODE
//======================================================================================================================================================
#include "kernel.hip"
// WRITE DATA FUNCTION
//===============================================================================================================================================================================================================200
void write_data(char *filename, int frameNo, int frames_processed,
int endoPoints, int *input_a, int *input_b, int epiPoints,
int *input_2a, int *input_2b) {
//================================================================================80
// VARIABLES
//================================================================================80
FILE *fid;
int i, j;
char c;
//================================================================================80
// OPEN FILE FOR READING
//================================================================================80
fid = fopen(filename, "w+");
if (fid == NULL) {
printf("The file was not opened for writing\n");
return;
}
//================================================================================80
// WRITE VALUES TO THE FILE
//================================================================================80
fprintf(fid, "Total AVI Frames: %d\n", frameNo);
fprintf(fid, "Frames Processed: %d\n", frames_processed);
fprintf(fid, "endoPoints: %d\n", endoPoints);
fprintf(fid, "epiPoints: %d", epiPoints);
for (j = 0; j < frames_processed; j++) {
fprintf(fid, "\n---Frame %d---", j);
fprintf(fid, "\n--endo--\n", j);
for (i = 0; i < endoPoints; i++) {
fprintf(fid, "%d\t", input_a[j + i * frameNo]);
}
fprintf(fid, "\n");
for (i = 0; i < endoPoints; i++) {
// if(input_b[j*size+i] > 2000) input_b[j*size+i]=0;
fprintf(fid, "%d\t", input_b[j + i * frameNo]);
}
fprintf(fid, "\n--epi--\n", j);
for (i = 0; i < epiPoints; i++) {
// if(input_2a[j*size_2+i] > 2000) input_2a[j*size_2+i]=0;
fprintf(fid, "%d\t", input_2a[j + i * frameNo]);
}
fprintf(fid, "\n");
for (i = 0; i < epiPoints; i++) {
// if(input_2b[j*size_2+i] > 2000) input_2b[j*size_2+i]=0;
fprintf(fid, "%d\t", input_2b[j + i * frameNo]);
}
}
// ================================================================================80
// CLOSE FILE
// ================================================================================80
fclose(fid);
}
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
// MAIN FUNCTION
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
int main(int argc, char *argv[]) {
printf("WG size of kernel = %d \n", NUMBER_THREADS);
//======================================================================================================================================================
// VARIABLES
//======================================================================================================================================================
// CUDA kernel execution parameters
dim3 threads;
dim3 blocks;
// counter
int i;
int frames_processed;
// frames
char *video_file_name;
avi_t *frames;
fp *frame;
//======================================================================================================================================================
// FRAME
//======================================================================================================================================================
if (argc != 3) {
printf("ERROR: usage: heartwall <inputfile> <num of frames>\n");
exit(1);
}
// open movie file
video_file_name = argv[1];
frames = (avi_t *)AVI_open_input_file(video_file_name, 1); // added casting
if (frames == NULL) {
AVI_print_error((char *)"Error with AVI_open_input_file");
return -1;
}
// common
common.no_frames = AVI_video_frames(frames);
common.frame_rows = AVI_video_height(frames);
common.frame_cols = AVI_video_width(frames);
common.frame_elem = common.frame_rows * common.frame_cols;
common.frame_mem = sizeof(fp) * common.frame_elem;
// pointers
checkCudaErrors(
hipMalloc((void **)&common_change.d_frame, common.frame_mem));
//======================================================================================================================================================
// CHECK INPUT ARGUMENTS
//======================================================================================================================================================
frames_processed = atoi(argv[2]);
if (frames_processed < 0 || frames_processed > common.no_frames) {
printf("ERROR: %d is an incorrect number of frames specified, select "
"in the range of 0-%d\n",
frames_processed, common.no_frames);
return 0;
}
//======================================================================================================================================================
// HARDCODED INPUTS FROM MATLAB
//======================================================================================================================================================
//====================================================================================================
// CONSTANTS
//====================================================================================================
common.sSize = 40;
common.tSize = 25;
common.maxMove = 10;
common.alpha = 0.87;
//====================================================================================================
// ENDO POINTS
//====================================================================================================
common.endoPoints = ENDO_POINTS;
common.endo_mem = sizeof(int) * common.endoPoints;
common.endoRow = (int *)malloc(common.endo_mem);
common.endoRow[0] = 369;
common.endoRow[1] = 400;
common.endoRow[2] = 429;
common.endoRow[3] = 452;
common.endoRow[4] = 476;
common.endoRow[5] = 486;
common.endoRow[6] = 479;
common.endoRow[7] = 458;
common.endoRow[8] = 433;
common.endoRow[9] = 404;
common.endoRow[10] = 374;
common.endoRow[11] = 346;
common.endoRow[12] = 318;
common.endoRow[13] = 294;
common.endoRow[14] = 277;
common.endoRow[15] = 269;
common.endoRow[16] = 275;
common.endoRow[17] = 287;
common.endoRow[18] = 311;
common.endoRow[19] = 339;
checkCudaErrors(hipMalloc((void **)&common.d_endoRow, common.endo_mem));
checkCudaErrors(hipMemcpy(common.d_endoRow, common.endoRow,
common.endo_mem, hipMemcpyHostToDevice));
common.endoCol = (int *)malloc(common.endo_mem);
common.endoCol[0] = 408;
common.endoCol[1] = 406;
common.endoCol[2] = 397;
common.endoCol[3] = 383;
common.endoCol[4] = 354;
common.endoCol[5] = 322;
common.endoCol[6] = 294;
common.endoCol[7] = 270;
common.endoCol[8] = 250;
common.endoCol[9] = 237;
common.endoCol[10] = 235;
common.endoCol[11] = 241;
common.endoCol[12] = 254;
common.endoCol[13] = 273;
common.endoCol[14] = 300;
common.endoCol[15] = 328;
common.endoCol[16] = 356;
common.endoCol[17] = 383;
common.endoCol[18] = 401;
common.endoCol[19] = 411;
checkCudaErrors(hipMalloc((void **)&common.d_endoCol, common.endo_mem));
checkCudaErrors(hipMemcpy(common.d_endoCol, common.endoCol,
common.endo_mem, hipMemcpyHostToDevice));
common.tEndoRowLoc = (int *)malloc(common.endo_mem * common.no_frames);
checkCudaErrors(hipMalloc((void **)&common.d_tEndoRowLoc,
common.endo_mem * common.no_frames));
common.tEndoColLoc = (int *)malloc(common.endo_mem * common.no_frames);
checkCudaErrors(hipMalloc((void **)&common.d_tEndoColLoc,
common.endo_mem * common.no_frames));
//====================================================================================================
// EPI POINTS
//====================================================================================================
common.epiPoints = EPI_POINTS;
common.epi_mem = sizeof(int) * common.epiPoints;
common.epiRow = (int *)malloc(common.epi_mem);
common.epiRow[0] = 390;
common.epiRow[1] = 419;
common.epiRow[2] = 448;
common.epiRow[3] = 474;
common.epiRow[4] = 501;
common.epiRow[5] = 519;
common.epiRow[6] = 535;
common.epiRow[7] = 542;
common.epiRow[8] = 543;
common.epiRow[9] = 538;
common.epiRow[10] = 528;
common.epiRow[11] = 511;
common.epiRow[12] = 491;
common.epiRow[13] = 466;
common.epiRow[14] = 438;
common.epiRow[15] = 406;
common.epiRow[16] = 376;
common.epiRow[17] = 347;
common.epiRow[18] = 318;
common.epiRow[19] = 291;
common.epiRow[20] = 275;
common.epiRow[21] = 259;
common.epiRow[22] = 256;
common.epiRow[23] = 252;
common.epiRow[24] = 252;
common.epiRow[25] = 257;
common.epiRow[26] = 266;
common.epiRow[27] = 283;
common.epiRow[28] = 305;
common.epiRow[29] = 331;
common.epiRow[30] = 360;
checkCudaErrors(hipMalloc((void **)&common.d_epiRow, common.epi_mem));
checkCudaErrors(hipMemcpy(common.d_epiRow, common.epiRow, common.epi_mem,
hipMemcpyHostToDevice));
common.epiCol = (int *)malloc(common.epi_mem);
common.epiCol[0] = 457;
common.epiCol[1] = 454;
common.epiCol[2] = 446;
common.epiCol[3] = 431;
common.epiCol[4] = 411;
common.epiCol[5] = 388;
common.epiCol[6] = 361;
common.epiCol[7] = 331;
common.epiCol[8] = 301;
common.epiCol[9] = 273;
common.epiCol[10] = 243;
common.epiCol[11] = 218;
common.epiCol[12] = 196;
common.epiCol[13] = 178;
common.epiCol[14] = 166;
common.epiCol[15] = 157;
common.epiCol[16] = 155;
common.epiCol[17] = 165;
common.epiCol[18] = 177;
common.epiCol[19] = 197;
common.epiCol[20] = 218;
common.epiCol[21] = 248;
common.epiCol[22] = 276;
common.epiCol[23] = 304;
common.epiCol[24] = 333;
common.epiCol[25] = 361;
common.epiCol[26] = 391;
common.epiCol[27] = 415;
common.epiCol[28] = 434;
common.epiCol[29] = 448;
common.epiCol[30] = 455;
checkCudaErrors(hipMalloc((void **)&common.d_epiCol, common.epi_mem));
checkCudaErrors(hipMemcpy(common.d_epiCol, common.epiCol, common.epi_mem,
hipMemcpyHostToDevice));
common.tEpiRowLoc = (int *)malloc(common.epi_mem * common.no_frames);
checkCudaErrors(hipMalloc((void **)&common.d_tEpiRowLoc,
common.epi_mem * common.no_frames));
common.tEpiColLoc = (int *)malloc(common.epi_mem * common.no_frames);
checkCudaErrors(hipMalloc((void **)&common.d_tEpiColLoc,
common.epi_mem * common.no_frames));
//====================================================================================================
// ALL POINTS
//====================================================================================================
common.allPoints = ALL_POINTS;
//======================================================================================================================================================
// TEMPLATE SIZES
//======================================================================================================================================================
// common
common.in_rows = common.tSize + 1 + common.tSize;
common.in_cols = common.in_rows;
common.in_elem = common.in_rows * common.in_cols;
common.in_mem = sizeof(fp) * common.in_elem;
//======================================================================================================================================================
// CREATE ARRAY OF TEMPLATES FOR ALL POINTS
//======================================================================================================================================================
// common
checkCudaErrors(hipMalloc((void **)&common.d_endoT,
common.in_mem * common.endoPoints));
checkCudaErrors(
hipMalloc((void **)&common.d_epiT, common.in_mem * common.epiPoints));
//======================================================================================================================================================
// SPECIFIC TO ENDO OR EPI TO BE SET HERE
//======================================================================================================================================================
for (i = 0; i < common.endoPoints; i++) {
unique[i].point_no = i;
unique[i].d_Row = common.d_endoRow;
unique[i].d_Col = common.d_endoCol;
unique[i].d_tRowLoc = common.d_tEndoRowLoc;
unique[i].d_tColLoc = common.d_tEndoColLoc;
unique[i].d_T = common.d_endoT;
}
for (i = common.endoPoints; i < common.allPoints; i++) {
unique[i].point_no = i - common.endoPoints;
unique[i].d_Row = common.d_epiRow;
unique[i].d_Col = common.d_epiCol;
unique[i].d_tRowLoc = common.d_tEpiRowLoc;
unique[i].d_tColLoc = common.d_tEpiColLoc;
unique[i].d_T = common.d_epiT;
}
//======================================================================================================================================================
// RIGHT TEMPLATE FROM TEMPLATE ARRAY
//======================================================================================================================================================
// pointers
for (i = 0; i < common.allPoints; i++) {
unique[i].in_pointer = unique[i].point_no * common.in_elem;
}
//======================================================================================================================================================
// AREA AROUND POINT FROM FRAME
//======================================================================================================================================================
// common
common.in2_rows = 2 * common.sSize + 1;
common.in2_cols = 2 * common.sSize + 1;
common.in2_elem = common.in2_rows * common.in2_cols;
common.in2_mem = sizeof(float) * common.in2_elem;
// pointers
for (i = 0; i < common.allPoints; i++) {
checkCudaErrors(hipMalloc((void **)&unique[i].d_in2, common.in2_mem));
}
//======================================================================================================================================================
// CONVOLUTION
//======================================================================================================================================================
// common
common.conv_rows =
common.in_rows + common.in2_rows - 1; // number of rows in I
common.conv_cols =
common.in_cols + common.in2_cols - 1; // number of columns in I
common.conv_elem =
common.conv_rows * common.conv_cols; // number of elements
common.conv_mem = sizeof(float) * common.conv_elem;
common.ioffset = 0;
common.joffset = 0;
// pointers
for (i = 0; i < common.allPoints; i++) {
checkCudaErrors(
hipMalloc((void **)&unique[i].d_conv, common.conv_mem));
}
//======================================================================================================================================================
// CUMULATIVE SUM
//======================================================================================================================================================
//====================================================================================================
// PADDING OF ARRAY, VERTICAL CUMULATIVE SUM
//====================================================================================================
// common
common.in2_pad_add_rows = common.in_rows;
common.in2_pad_add_cols = common.in_cols;
common.in2_pad_cumv_rows = common.in2_rows + 2 * common.in2_pad_add_rows;
common.in2_pad_cumv_cols = common.in2_cols + 2 * common.in2_pad_add_cols;
common.in2_pad_cumv_elem =
common.in2_pad_cumv_rows * common.in2_pad_cumv_cols;
common.in2_pad_cumv_mem = sizeof(float) * common.in2_pad_cumv_elem;
// pointers
for (i = 0; i < common.allPoints; i++) {
checkCudaErrors(hipMalloc((void **)&unique[i].d_in2_pad_cumv,
common.in2_pad_cumv_mem));
}
//====================================================================================================
// SELECTION
//====================================================================================================
// common
common.in2_pad_cumv_sel_rowlow = 1 + common.in_rows; // (1 to n+1)
common.in2_pad_cumv_sel_rowhig = common.in2_pad_cumv_rows - 1;
common.in2_pad_cumv_sel_collow = 1;
common.in2_pad_cumv_sel_colhig = common.in2_pad_cumv_cols;
common.in2_pad_cumv_sel_rows =
common.in2_pad_cumv_sel_rowhig - common.in2_pad_cumv_sel_rowlow + 1;
common.in2_pad_cumv_sel_cols =
common.in2_pad_cumv_sel_colhig - common.in2_pad_cumv_sel_collow + 1;
common.in2_pad_cumv_sel_elem =
common.in2_pad_cumv_sel_rows * common.in2_pad_cumv_sel_cols;
common.in2_pad_cumv_sel_mem = sizeof(float) * common.in2_pad_cumv_sel_elem;
// pointers
for (i = 0; i < common.allPoints; i++) {
checkCudaErrors(hipMalloc((void **)&unique[i].d_in2_pad_cumv_sel,
common.in2_pad_cumv_sel_mem));
}
//====================================================================================================
// SELECTION 2, SUBTRACTION, HORIZONTAL CUMULATIVE SUM
//====================================================================================================
// common
common.in2_pad_cumv_sel2_rowlow = 1;
common.in2_pad_cumv_sel2_rowhig =
common.in2_pad_cumv_rows - common.in_rows - 1;
common.in2_pad_cumv_sel2_collow = 1;
common.in2_pad_cumv_sel2_colhig = common.in2_pad_cumv_cols;
common.in2_sub_cumh_rows =
common.in2_pad_cumv_sel2_rowhig - common.in2_pad_cumv_sel2_rowlow + 1;
common.in2_sub_cumh_cols =
common.in2_pad_cumv_sel2_colhig - common.in2_pad_cumv_sel2_collow + 1;
common.in2_sub_cumh_elem =
common.in2_sub_cumh_rows * common.in2_sub_cumh_cols;
common.in2_sub_cumh_mem = sizeof(float) * common.in2_sub_cumh_elem;
// pointers
for (i = 0; i < common.allPoints; i++) {
checkCudaErrors(hipMalloc((void **)&unique[i].d_in2_sub_cumh,
common.in2_sub_cumh_mem));
}
//====================================================================================================
// SELECTION
//====================================================================================================
// common
common.in2_sub_cumh_sel_rowlow = 1;
common.in2_sub_cumh_sel_rowhig = common.in2_sub_cumh_rows;
common.in2_sub_cumh_sel_collow = 1 + common.in_cols;
common.in2_sub_cumh_sel_colhig = common.in2_sub_cumh_cols - 1;
common.in2_sub_cumh_sel_rows =
common.in2_sub_cumh_sel_rowhig - common.in2_sub_cumh_sel_rowlow + 1;
common.in2_sub_cumh_sel_cols =
common.in2_sub_cumh_sel_colhig - common.in2_sub_cumh_sel_collow + 1;
common.in2_sub_cumh_sel_elem =
common.in2_sub_cumh_sel_rows * common.in2_sub_cumh_sel_cols;
common.in2_sub_cumh_sel_mem = sizeof(float) * common.in2_sub_cumh_sel_elem;
// pointers
for (i = 0; i < common.allPoints; i++) {
checkCudaErrors(hipMalloc((void **)&unique[i].d_in2_sub_cumh_sel,
common.in2_sub_cumh_sel_mem));
}
//====================================================================================================
// SELECTION 2, SUBTRACTION
//====================================================================================================
// common
common.in2_sub_cumh_sel2_rowlow = 1;
common.in2_sub_cumh_sel2_rowhig = common.in2_sub_cumh_rows;
common.in2_sub_cumh_sel2_collow = 1;
common.in2_sub_cumh_sel2_colhig =
common.in2_sub_cumh_cols - common.in_cols - 1;
common.in2_sub2_rows =
common.in2_sub_cumh_sel2_rowhig - common.in2_sub_cumh_sel2_rowlow + 1;
common.in2_sub2_cols =
common.in2_sub_cumh_sel2_colhig - common.in2_sub_cumh_sel2_collow + 1;
common.in2_sub2_elem = common.in2_sub2_rows * common.in2_sub2_cols;
common.in2_sub2_mem = sizeof(float) * common.in2_sub2_elem;
// pointers
for (i = 0; i < common.allPoints; i++) {
checkCudaErrors(
hipMalloc((void **)&unique[i].d_in2_sub2, common.in2_sub2_mem));
}
//======================================================================================================================================================
// CUMULATIVE SUM 2
//======================================================================================================================================================
//====================================================================================================
// MULTIPLICATION
//====================================================================================================
// common
common.in2_sqr_rows = common.in2_rows;
common.in2_sqr_cols = common.in2_cols;
common.in2_sqr_elem = common.in2_elem;
common.in2_sqr_mem = common.in2_mem;
// pointers
for (i = 0; i < common.allPoints; i++) {
checkCudaErrors(
hipMalloc((void **)&unique[i].d_in2_sqr, common.in2_sqr_mem));
}
//====================================================================================================
// SELECTION 2, SUBTRACTION
//====================================================================================================
// common
common.in2_sqr_sub2_rows = common.in2_sub2_rows;
common.in2_sqr_sub2_cols = common.in2_sub2_cols;
common.in2_sqr_sub2_elem = common.in2_sub2_elem;
common.in2_sqr_sub2_mem = common.in2_sub2_mem;
// pointers
for (i = 0; i < common.allPoints; i++) {
checkCudaErrors(hipMalloc((void **)&unique[i].d_in2_sqr_sub2,
common.in2_sqr_sub2_mem));
}
//======================================================================================================================================================
// FINAL
//======================================================================================================================================================
// common
common.in_sqr_rows = common.in_rows;
common.in_sqr_cols = common.in_cols;
common.in_sqr_elem = common.in_elem;
common.in_sqr_mem = common.in_mem;
// pointers
for (i = 0; i < common.allPoints; i++) {
checkCudaErrors(
hipMalloc((void **)&unique[i].d_in_sqr, common.in_sqr_mem));
}
//======================================================================================================================================================
// TEMPLATE MASK CREATE
//======================================================================================================================================================
// common
common.tMask_rows = common.in_rows + (common.sSize + 1 + common.sSize) - 1;
common.tMask_cols = common.tMask_rows;
common.tMask_elem = common.tMask_rows * common.tMask_cols;
common.tMask_mem = sizeof(float) * common.tMask_elem;
// pointers
for (i = 0; i < common.allPoints; i++) {
checkCudaErrors(
hipMalloc((void **)&unique[i].d_tMask, common.tMask_mem));
}
//======================================================================================================================================================
// POINT MASK INITIALIZE
//======================================================================================================================================================
// common
common.mask_rows = common.maxMove;
common.mask_cols = common.mask_rows;
common.mask_elem = common.mask_rows * common.mask_cols;
common.mask_mem = sizeof(float) * common.mask_elem;
//======================================================================================================================================================
// MASK CONVOLUTION
//======================================================================================================================================================
// common
common.mask_conv_rows = common.tMask_rows; // number of rows in I
common.mask_conv_cols = common.tMask_cols; // number of columns in I
common.mask_conv_elem =
common.mask_conv_rows * common.mask_conv_cols; // number of elements
common.mask_conv_mem = sizeof(float) * common.mask_conv_elem;
common.mask_conv_ioffset = (common.mask_rows - 1) / 2;
if ((common.mask_rows - 1) % 2 > 0.5) {
common.mask_conv_ioffset = common.mask_conv_ioffset + 1;
}
common.mask_conv_joffset = (common.mask_cols - 1) / 2;
if ((common.mask_cols - 1) % 2 > 0.5) {
common.mask_conv_joffset = common.mask_conv_joffset + 1;
}
// pointers
for (i = 0; i < common.allPoints; i++) {
checkCudaErrors(
hipMalloc((void **)&unique[i].d_mask_conv, common.mask_conv_mem));
}
//======================================================================================================================================================
// KERNEL
//======================================================================================================================================================
//====================================================================================================
// THREAD BLOCK
//====================================================================================================
// All kernels operations within kernel use same max size of threads. Size
// of block size is set to the size appropriate for max size operation (on
// padded matrix). Other use subsets of that.
threads.x = NUMBER_THREADS; // define the number of threads in the block
threads.y = 1;
blocks.x = common.allPoints; // define the number of blocks in the grid
blocks.y = 1;
//====================================================================================================
// COPY ARGUMENTS
//====================================================================================================
checkCudaErrors(hipMalloc(&d_common, sizeof(params_common)));
checkCudaErrors(hipMemcpy(d_common, &common, sizeof(params_common),
hipMemcpyHostToDevice));
checkCudaErrors(hipMalloc(&d_unique, sizeof(params_unique) * ALL_POINTS));
checkCudaErrors(hipMemcpy(d_unique, &unique,
sizeof(params_unique) * ALL_POINTS,
hipMemcpyHostToDevice));
//====================================================================================================
// PRINT FRAME PROGRESS START
//====================================================================================================
printf("frame progress: ");
fflush(NULL);
//====================================================================================================
// LAUNCH
//====================================================================================================
for (common_change.frame_no = 0; common_change.frame_no < frames_processed;
common_change.frame_no++) {
// Extract a cropped version of the first frame from the video file
frame = get_frame(
frames, // pointer to video file
common_change.frame_no, // number of frame that needs to be returned
0, // cropped?
0, // scaled?
1); // converted
// copy frame to GPU memory
checkCudaErrors(hipMemcpy(common_change.d_frame, frame,
common.frame_mem, hipMemcpyHostToDevice));
checkCudaErrors(
hipMalloc(&d_common_change, sizeof(params_common_change)));
checkCudaErrors(hipMemcpy(d_common_change, &common_change,
sizeof(params_common_change),
hipMemcpyHostToDevice));
// launch GPU kernel
hipLaunchKernelGGL(( kernel), dim3(blocks), dim3(threads), 0, 0, d_common_change, d_common, d_unique);
// free frame after each loop iteration, since AVI library allocates
// memory for every frame fetched
free(frame);
// print frame progress
printf("%d ", common_change.frame_no);
fflush(NULL);
}
//====================================================================================================
// PRINT FRAME PROGRESS END
//====================================================================================================
printf("\n");
fflush(NULL);
//====================================================================================================
// OUTPUT
//====================================================================================================
checkCudaErrors(hipMemcpy(common.tEndoRowLoc, common.d_tEndoRowLoc,
common.endo_mem * common.no_frames,
hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(common.tEndoColLoc, common.d_tEndoColLoc,
common.endo_mem * common.no_frames,
hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(common.tEpiRowLoc, common.d_tEpiRowLoc,
common.epi_mem * common.no_frames,
hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(common.tEpiColLoc, common.d_tEpiColLoc,
common.epi_mem * common.no_frames,
hipMemcpyDeviceToHost));
//==================================================50
// DUMP DATA TO FILE
//==================================================50
if (getenv("OUTPUT")) {
write_data("output.txt", common.no_frames, frames_processed,
common.endoPoints, common.tEndoRowLoc, common.tEndoColLoc,
common.epiPoints, common.tEpiRowLoc, common.tEpiColLoc);
}
//==================================================50
// End
//==================================================50
//======================================================================================================================================================
// DEALLOCATION
//======================================================================================================================================================
//====================================================================================================
// COMMON
//====================================================================================================
// frame
checkCudaErrors(hipFree(common_change.d_frame));
// endo points
free(common.endoRow);
free(common.endoCol);
free(common.tEndoRowLoc);
free(common.tEndoColLoc);
checkCudaErrors(hipFree(common.d_endoRow));
checkCudaErrors(hipFree(common.d_endoCol));
checkCudaErrors(hipFree(common.d_tEndoRowLoc));
checkCudaErrors(hipFree(common.d_tEndoColLoc));
checkCudaErrors(hipFree(common.d_endoT));
// epi points
free(common.epiRow);
free(common.epiCol);
free(common.tEpiRowLoc);
free(common.tEpiColLoc);
checkCudaErrors(hipFree(common.d_epiRow));
checkCudaErrors(hipFree(common.d_epiCol));
checkCudaErrors(hipFree(common.d_tEpiRowLoc));
checkCudaErrors(hipFree(common.d_tEpiColLoc));
checkCudaErrors(hipFree(common.d_epiT));
//====================================================================================================
// POINTERS
//====================================================================================================
for (i = 0; i < common.allPoints; i++) {
checkCudaErrors(hipFree(unique[i].d_in2));
checkCudaErrors(hipFree(unique[i].d_conv));
checkCudaErrors(hipFree(unique[i].d_in2_pad_cumv));
checkCudaErrors(hipFree(unique[i].d_in2_pad_cumv_sel));
checkCudaErrors(hipFree(unique[i].d_in2_sub_cumh));
checkCudaErrors(hipFree(unique[i].d_in2_sub_cumh_sel));
checkCudaErrors(hipFree(unique[i].d_in2_sub2));
checkCudaErrors(hipFree(unique[i].d_in2_sqr));
checkCudaErrors(hipFree(unique[i].d_in2_sqr_sub2));
checkCudaErrors(hipFree(unique[i].d_in_sqr));
checkCudaErrors(hipFree(unique[i].d_tMask));
checkCudaErrors(hipFree(unique[i].d_mask_conv));
}
}
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
// MAIN FUNCTION
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
| f634a61c7aa0c2d4990c87ed06269f71536e8c75.cu | //===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
// DEFINE / INCLUDE
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
//======================================================================================================================================================
// LIBRARIES
//======================================================================================================================================================
#include <helper_cuda.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <avilib.h>
#include <avimod.h>
#include <cuda.h>
//======================================================================================================================================================
// STRUCTURES, GLOBAL STRUCTURE VARIABLES
//======================================================================================================================================================
#include "define.c"
params_common_change common_change;
params_common_change *d_common_change;
params_common common;
params_common *d_common;
params_unique unique[ALL_POINTS]; // cannot determine size dynamically so choose
// more than usually needed
params_unique *d_unique;
//======================================================================================================================================================
// KERNEL CODE
//======================================================================================================================================================
#include "kernel.cu"
// WRITE DATA FUNCTION
//===============================================================================================================================================================================================================200
void write_data(char *filename, int frameNo, int frames_processed,
int endoPoints, int *input_a, int *input_b, int epiPoints,
int *input_2a, int *input_2b) {
//================================================================================80
// VARIABLES
//================================================================================80
FILE *fid;
int i, j;
char c;
//================================================================================80
// OPEN FILE FOR READING
//================================================================================80
fid = fopen(filename, "w+");
if (fid == NULL) {
printf("The file was not opened for writing\n");
return;
}
//================================================================================80
// WRITE VALUES TO THE FILE
//================================================================================80
fprintf(fid, "Total AVI Frames: %d\n", frameNo);
fprintf(fid, "Frames Processed: %d\n", frames_processed);
fprintf(fid, "endoPoints: %d\n", endoPoints);
fprintf(fid, "epiPoints: %d", epiPoints);
for (j = 0; j < frames_processed; j++) {
fprintf(fid, "\n---Frame %d---", j);
fprintf(fid, "\n--endo--\n", j);
for (i = 0; i < endoPoints; i++) {
fprintf(fid, "%d\t", input_a[j + i * frameNo]);
}
fprintf(fid, "\n");
for (i = 0; i < endoPoints; i++) {
// if(input_b[j*size+i] > 2000) input_b[j*size+i]=0;
fprintf(fid, "%d\t", input_b[j + i * frameNo]);
}
fprintf(fid, "\n--epi--\n", j);
for (i = 0; i < epiPoints; i++) {
// if(input_2a[j*size_2+i] > 2000) input_2a[j*size_2+i]=0;
fprintf(fid, "%d\t", input_2a[j + i * frameNo]);
}
fprintf(fid, "\n");
for (i = 0; i < epiPoints; i++) {
// if(input_2b[j*size_2+i] > 2000) input_2b[j*size_2+i]=0;
fprintf(fid, "%d\t", input_2b[j + i * frameNo]);
}
}
// ================================================================================80
// CLOSE FILE
// ================================================================================80
fclose(fid);
}
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
// MAIN FUNCTION
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
int main(int argc, char *argv[]) {
printf("WG size of kernel = %d \n", NUMBER_THREADS);
//======================================================================================================================================================
// VARIABLES
//======================================================================================================================================================
// CUDA kernel execution parameters
dim3 threads;
dim3 blocks;
// counter
int i;
int frames_processed;
// frames
char *video_file_name;
avi_t *frames;
fp *frame;
//======================================================================================================================================================
// FRAME
//======================================================================================================================================================
if (argc != 3) {
printf("ERROR: usage: heartwall <inputfile> <num of frames>\n");
exit(1);
}
// open movie file
video_file_name = argv[1];
frames = (avi_t *)AVI_open_input_file(video_file_name, 1); // added casting
if (frames == NULL) {
AVI_print_error((char *)"Error with AVI_open_input_file");
return -1;
}
// common
common.no_frames = AVI_video_frames(frames);
common.frame_rows = AVI_video_height(frames);
common.frame_cols = AVI_video_width(frames);
common.frame_elem = common.frame_rows * common.frame_cols;
common.frame_mem = sizeof(fp) * common.frame_elem;
// pointers
checkCudaErrors(
cudaMalloc((void **)&common_change.d_frame, common.frame_mem));
//======================================================================================================================================================
// CHECK INPUT ARGUMENTS
//======================================================================================================================================================
frames_processed = atoi(argv[2]);
if (frames_processed < 0 || frames_processed > common.no_frames) {
printf("ERROR: %d is an incorrect number of frames specified, select "
"in the range of 0-%d\n",
frames_processed, common.no_frames);
return 0;
}
//======================================================================================================================================================
// HARDCODED INPUTS FROM MATLAB
//======================================================================================================================================================
//====================================================================================================
// CONSTANTS
//====================================================================================================
common.sSize = 40;
common.tSize = 25;
common.maxMove = 10;
common.alpha = 0.87;
//====================================================================================================
// ENDO POINTS
//====================================================================================================
common.endoPoints = ENDO_POINTS;
common.endo_mem = sizeof(int) * common.endoPoints;
common.endoRow = (int *)malloc(common.endo_mem);
common.endoRow[0] = 369;
common.endoRow[1] = 400;
common.endoRow[2] = 429;
common.endoRow[3] = 452;
common.endoRow[4] = 476;
common.endoRow[5] = 486;
common.endoRow[6] = 479;
common.endoRow[7] = 458;
common.endoRow[8] = 433;
common.endoRow[9] = 404;
common.endoRow[10] = 374;
common.endoRow[11] = 346;
common.endoRow[12] = 318;
common.endoRow[13] = 294;
common.endoRow[14] = 277;
common.endoRow[15] = 269;
common.endoRow[16] = 275;
common.endoRow[17] = 287;
common.endoRow[18] = 311;
common.endoRow[19] = 339;
checkCudaErrors(cudaMalloc((void **)&common.d_endoRow, common.endo_mem));
checkCudaErrors(cudaMemcpy(common.d_endoRow, common.endoRow,
common.endo_mem, cudaMemcpyHostToDevice));
common.endoCol = (int *)malloc(common.endo_mem);
common.endoCol[0] = 408;
common.endoCol[1] = 406;
common.endoCol[2] = 397;
common.endoCol[3] = 383;
common.endoCol[4] = 354;
common.endoCol[5] = 322;
common.endoCol[6] = 294;
common.endoCol[7] = 270;
common.endoCol[8] = 250;
common.endoCol[9] = 237;
common.endoCol[10] = 235;
common.endoCol[11] = 241;
common.endoCol[12] = 254;
common.endoCol[13] = 273;
common.endoCol[14] = 300;
common.endoCol[15] = 328;
common.endoCol[16] = 356;
common.endoCol[17] = 383;
common.endoCol[18] = 401;
common.endoCol[19] = 411;
checkCudaErrors(cudaMalloc((void **)&common.d_endoCol, common.endo_mem));
checkCudaErrors(cudaMemcpy(common.d_endoCol, common.endoCol,
common.endo_mem, cudaMemcpyHostToDevice));
common.tEndoRowLoc = (int *)malloc(common.endo_mem * common.no_frames);
checkCudaErrors(cudaMalloc((void **)&common.d_tEndoRowLoc,
common.endo_mem * common.no_frames));
common.tEndoColLoc = (int *)malloc(common.endo_mem * common.no_frames);
checkCudaErrors(cudaMalloc((void **)&common.d_tEndoColLoc,
common.endo_mem * common.no_frames));
//====================================================================================================
// EPI POINTS
//====================================================================================================
common.epiPoints = EPI_POINTS;
common.epi_mem = sizeof(int) * common.epiPoints;
common.epiRow = (int *)malloc(common.epi_mem);
common.epiRow[0] = 390;
common.epiRow[1] = 419;
common.epiRow[2] = 448;
common.epiRow[3] = 474;
common.epiRow[4] = 501;
common.epiRow[5] = 519;
common.epiRow[6] = 535;
common.epiRow[7] = 542;
common.epiRow[8] = 543;
common.epiRow[9] = 538;
common.epiRow[10] = 528;
common.epiRow[11] = 511;
common.epiRow[12] = 491;
common.epiRow[13] = 466;
common.epiRow[14] = 438;
common.epiRow[15] = 406;
common.epiRow[16] = 376;
common.epiRow[17] = 347;
common.epiRow[18] = 318;
common.epiRow[19] = 291;
common.epiRow[20] = 275;
common.epiRow[21] = 259;
common.epiRow[22] = 256;
common.epiRow[23] = 252;
common.epiRow[24] = 252;
common.epiRow[25] = 257;
common.epiRow[26] = 266;
common.epiRow[27] = 283;
common.epiRow[28] = 305;
common.epiRow[29] = 331;
common.epiRow[30] = 360;
checkCudaErrors(cudaMalloc((void **)&common.d_epiRow, common.epi_mem));
checkCudaErrors(cudaMemcpy(common.d_epiRow, common.epiRow, common.epi_mem,
cudaMemcpyHostToDevice));
common.epiCol = (int *)malloc(common.epi_mem);
common.epiCol[0] = 457;
common.epiCol[1] = 454;
common.epiCol[2] = 446;
common.epiCol[3] = 431;
common.epiCol[4] = 411;
common.epiCol[5] = 388;
common.epiCol[6] = 361;
common.epiCol[7] = 331;
common.epiCol[8] = 301;
common.epiCol[9] = 273;
common.epiCol[10] = 243;
common.epiCol[11] = 218;
common.epiCol[12] = 196;
common.epiCol[13] = 178;
common.epiCol[14] = 166;
common.epiCol[15] = 157;
common.epiCol[16] = 155;
common.epiCol[17] = 165;
common.epiCol[18] = 177;
common.epiCol[19] = 197;
common.epiCol[20] = 218;
common.epiCol[21] = 248;
common.epiCol[22] = 276;
common.epiCol[23] = 304;
common.epiCol[24] = 333;
common.epiCol[25] = 361;
common.epiCol[26] = 391;
common.epiCol[27] = 415;
common.epiCol[28] = 434;
common.epiCol[29] = 448;
common.epiCol[30] = 455;
checkCudaErrors(cudaMalloc((void **)&common.d_epiCol, common.epi_mem));
checkCudaErrors(cudaMemcpy(common.d_epiCol, common.epiCol, common.epi_mem,
cudaMemcpyHostToDevice));
common.tEpiRowLoc = (int *)malloc(common.epi_mem * common.no_frames);
checkCudaErrors(cudaMalloc((void **)&common.d_tEpiRowLoc,
common.epi_mem * common.no_frames));
common.tEpiColLoc = (int *)malloc(common.epi_mem * common.no_frames);
checkCudaErrors(cudaMalloc((void **)&common.d_tEpiColLoc,
common.epi_mem * common.no_frames));
//====================================================================================================
// ALL POINTS
//====================================================================================================
common.allPoints = ALL_POINTS;
//======================================================================================================================================================
// TEMPLATE SIZES
//======================================================================================================================================================
// common
common.in_rows = common.tSize + 1 + common.tSize;
common.in_cols = common.in_rows;
common.in_elem = common.in_rows * common.in_cols;
common.in_mem = sizeof(fp) * common.in_elem;
//======================================================================================================================================================
// CREATE ARRAY OF TEMPLATES FOR ALL POINTS
//======================================================================================================================================================
// common
checkCudaErrors(cudaMalloc((void **)&common.d_endoT,
common.in_mem * common.endoPoints));
checkCudaErrors(
cudaMalloc((void **)&common.d_epiT, common.in_mem * common.epiPoints));
//======================================================================================================================================================
// SPECIFIC TO ENDO OR EPI TO BE SET HERE
//======================================================================================================================================================
for (i = 0; i < common.endoPoints; i++) {
unique[i].point_no = i;
unique[i].d_Row = common.d_endoRow;
unique[i].d_Col = common.d_endoCol;
unique[i].d_tRowLoc = common.d_tEndoRowLoc;
unique[i].d_tColLoc = common.d_tEndoColLoc;
unique[i].d_T = common.d_endoT;
}
for (i = common.endoPoints; i < common.allPoints; i++) {
unique[i].point_no = i - common.endoPoints;
unique[i].d_Row = common.d_epiRow;
unique[i].d_Col = common.d_epiCol;
unique[i].d_tRowLoc = common.d_tEpiRowLoc;
unique[i].d_tColLoc = common.d_tEpiColLoc;
unique[i].d_T = common.d_epiT;
}
//======================================================================================================================================================
// RIGHT TEMPLATE FROM TEMPLATE ARRAY
//======================================================================================================================================================
// pointers
for (i = 0; i < common.allPoints; i++) {
unique[i].in_pointer = unique[i].point_no * common.in_elem;
}
//======================================================================================================================================================
// AREA AROUND POINT FROM FRAME
//======================================================================================================================================================
// common
common.in2_rows = 2 * common.sSize + 1;
common.in2_cols = 2 * common.sSize + 1;
common.in2_elem = common.in2_rows * common.in2_cols;
common.in2_mem = sizeof(float) * common.in2_elem;
// pointers
for (i = 0; i < common.allPoints; i++) {
checkCudaErrors(cudaMalloc((void **)&unique[i].d_in2, common.in2_mem));
}
//======================================================================================================================================================
// CONVOLUTION
//======================================================================================================================================================
// common
common.conv_rows =
common.in_rows + common.in2_rows - 1; // number of rows in I
common.conv_cols =
common.in_cols + common.in2_cols - 1; // number of columns in I
common.conv_elem =
common.conv_rows * common.conv_cols; // number of elements
common.conv_mem = sizeof(float) * common.conv_elem;
common.ioffset = 0;
common.joffset = 0;
// pointers
for (i = 0; i < common.allPoints; i++) {
checkCudaErrors(
cudaMalloc((void **)&unique[i].d_conv, common.conv_mem));
}
//======================================================================================================================================================
// CUMULATIVE SUM
//======================================================================================================================================================
//====================================================================================================
// PADDING OF ARRAY, VERTICAL CUMULATIVE SUM
//====================================================================================================
// common
common.in2_pad_add_rows = common.in_rows;
common.in2_pad_add_cols = common.in_cols;
common.in2_pad_cumv_rows = common.in2_rows + 2 * common.in2_pad_add_rows;
common.in2_pad_cumv_cols = common.in2_cols + 2 * common.in2_pad_add_cols;
common.in2_pad_cumv_elem =
common.in2_pad_cumv_rows * common.in2_pad_cumv_cols;
common.in2_pad_cumv_mem = sizeof(float) * common.in2_pad_cumv_elem;
// pointers
for (i = 0; i < common.allPoints; i++) {
checkCudaErrors(cudaMalloc((void **)&unique[i].d_in2_pad_cumv,
common.in2_pad_cumv_mem));
}
//====================================================================================================
// SELECTION
//====================================================================================================
// common
common.in2_pad_cumv_sel_rowlow = 1 + common.in_rows; // (1 to n+1)
common.in2_pad_cumv_sel_rowhig = common.in2_pad_cumv_rows - 1;
common.in2_pad_cumv_sel_collow = 1;
common.in2_pad_cumv_sel_colhig = common.in2_pad_cumv_cols;
common.in2_pad_cumv_sel_rows =
common.in2_pad_cumv_sel_rowhig - common.in2_pad_cumv_sel_rowlow + 1;
common.in2_pad_cumv_sel_cols =
common.in2_pad_cumv_sel_colhig - common.in2_pad_cumv_sel_collow + 1;
common.in2_pad_cumv_sel_elem =
common.in2_pad_cumv_sel_rows * common.in2_pad_cumv_sel_cols;
common.in2_pad_cumv_sel_mem = sizeof(float) * common.in2_pad_cumv_sel_elem;
// pointers
for (i = 0; i < common.allPoints; i++) {
checkCudaErrors(cudaMalloc((void **)&unique[i].d_in2_pad_cumv_sel,
common.in2_pad_cumv_sel_mem));
}
//====================================================================================================
// SELECTION 2, SUBTRACTION, HORIZONTAL CUMULATIVE SUM
//====================================================================================================
// common
common.in2_pad_cumv_sel2_rowlow = 1;
common.in2_pad_cumv_sel2_rowhig =
common.in2_pad_cumv_rows - common.in_rows - 1;
common.in2_pad_cumv_sel2_collow = 1;
common.in2_pad_cumv_sel2_colhig = common.in2_pad_cumv_cols;
common.in2_sub_cumh_rows =
common.in2_pad_cumv_sel2_rowhig - common.in2_pad_cumv_sel2_rowlow + 1;
common.in2_sub_cumh_cols =
common.in2_pad_cumv_sel2_colhig - common.in2_pad_cumv_sel2_collow + 1;
common.in2_sub_cumh_elem =
common.in2_sub_cumh_rows * common.in2_sub_cumh_cols;
common.in2_sub_cumh_mem = sizeof(float) * common.in2_sub_cumh_elem;
// pointers
for (i = 0; i < common.allPoints; i++) {
checkCudaErrors(cudaMalloc((void **)&unique[i].d_in2_sub_cumh,
common.in2_sub_cumh_mem));
}
//====================================================================================================
// SELECTION
//====================================================================================================
// common
common.in2_sub_cumh_sel_rowlow = 1;
common.in2_sub_cumh_sel_rowhig = common.in2_sub_cumh_rows;
common.in2_sub_cumh_sel_collow = 1 + common.in_cols;
common.in2_sub_cumh_sel_colhig = common.in2_sub_cumh_cols - 1;
common.in2_sub_cumh_sel_rows =
common.in2_sub_cumh_sel_rowhig - common.in2_sub_cumh_sel_rowlow + 1;
common.in2_sub_cumh_sel_cols =
common.in2_sub_cumh_sel_colhig - common.in2_sub_cumh_sel_collow + 1;
common.in2_sub_cumh_sel_elem =
common.in2_sub_cumh_sel_rows * common.in2_sub_cumh_sel_cols;
common.in2_sub_cumh_sel_mem = sizeof(float) * common.in2_sub_cumh_sel_elem;
// pointers
for (i = 0; i < common.allPoints; i++) {
checkCudaErrors(cudaMalloc((void **)&unique[i].d_in2_sub_cumh_sel,
common.in2_sub_cumh_sel_mem));
}
//====================================================================================================
// SELECTION 2, SUBTRACTION
//====================================================================================================
// common
common.in2_sub_cumh_sel2_rowlow = 1;
common.in2_sub_cumh_sel2_rowhig = common.in2_sub_cumh_rows;
common.in2_sub_cumh_sel2_collow = 1;
common.in2_sub_cumh_sel2_colhig =
common.in2_sub_cumh_cols - common.in_cols - 1;
common.in2_sub2_rows =
common.in2_sub_cumh_sel2_rowhig - common.in2_sub_cumh_sel2_rowlow + 1;
common.in2_sub2_cols =
common.in2_sub_cumh_sel2_colhig - common.in2_sub_cumh_sel2_collow + 1;
common.in2_sub2_elem = common.in2_sub2_rows * common.in2_sub2_cols;
common.in2_sub2_mem = sizeof(float) * common.in2_sub2_elem;
// pointers
for (i = 0; i < common.allPoints; i++) {
checkCudaErrors(
cudaMalloc((void **)&unique[i].d_in2_sub2, common.in2_sub2_mem));
}
//======================================================================================================================================================
// CUMULATIVE SUM 2
//======================================================================================================================================================
//====================================================================================================
// MULTIPLICATION
//====================================================================================================
// common
common.in2_sqr_rows = common.in2_rows;
common.in2_sqr_cols = common.in2_cols;
common.in2_sqr_elem = common.in2_elem;
common.in2_sqr_mem = common.in2_mem;
// pointers
for (i = 0; i < common.allPoints; i++) {
checkCudaErrors(
cudaMalloc((void **)&unique[i].d_in2_sqr, common.in2_sqr_mem));
}
//====================================================================================================
// SELECTION 2, SUBTRACTION
//====================================================================================================
// common
common.in2_sqr_sub2_rows = common.in2_sub2_rows;
common.in2_sqr_sub2_cols = common.in2_sub2_cols;
common.in2_sqr_sub2_elem = common.in2_sub2_elem;
common.in2_sqr_sub2_mem = common.in2_sub2_mem;
// pointers
for (i = 0; i < common.allPoints; i++) {
checkCudaErrors(cudaMalloc((void **)&unique[i].d_in2_sqr_sub2,
common.in2_sqr_sub2_mem));
}
//======================================================================================================================================================
// FINAL
//======================================================================================================================================================
// common
common.in_sqr_rows = common.in_rows;
common.in_sqr_cols = common.in_cols;
common.in_sqr_elem = common.in_elem;
common.in_sqr_mem = common.in_mem;
// pointers
for (i = 0; i < common.allPoints; i++) {
checkCudaErrors(
cudaMalloc((void **)&unique[i].d_in_sqr, common.in_sqr_mem));
}
//======================================================================================================================================================
// TEMPLATE MASK CREATE
//======================================================================================================================================================
// common
common.tMask_rows = common.in_rows + (common.sSize + 1 + common.sSize) - 1;
common.tMask_cols = common.tMask_rows;
common.tMask_elem = common.tMask_rows * common.tMask_cols;
common.tMask_mem = sizeof(float) * common.tMask_elem;
// pointers
for (i = 0; i < common.allPoints; i++) {
checkCudaErrors(
cudaMalloc((void **)&unique[i].d_tMask, common.tMask_mem));
}
//======================================================================================================================================================
// POINT MASK INITIALIZE
//======================================================================================================================================================
// common
common.mask_rows = common.maxMove;
common.mask_cols = common.mask_rows;
common.mask_elem = common.mask_rows * common.mask_cols;
common.mask_mem = sizeof(float) * common.mask_elem;
//======================================================================================================================================================
// MASK CONVOLUTION
//======================================================================================================================================================
// common
common.mask_conv_rows = common.tMask_rows; // number of rows in I
common.mask_conv_cols = common.tMask_cols; // number of columns in I
common.mask_conv_elem =
common.mask_conv_rows * common.mask_conv_cols; // number of elements
common.mask_conv_mem = sizeof(float) * common.mask_conv_elem;
common.mask_conv_ioffset = (common.mask_rows - 1) / 2;
if ((common.mask_rows - 1) % 2 > 0.5) {
common.mask_conv_ioffset = common.mask_conv_ioffset + 1;
}
common.mask_conv_joffset = (common.mask_cols - 1) / 2;
if ((common.mask_cols - 1) % 2 > 0.5) {
common.mask_conv_joffset = common.mask_conv_joffset + 1;
}
// pointers
for (i = 0; i < common.allPoints; i++) {
checkCudaErrors(
cudaMalloc((void **)&unique[i].d_mask_conv, common.mask_conv_mem));
}
//======================================================================================================================================================
// KERNEL
//======================================================================================================================================================
//====================================================================================================
// THREAD BLOCK
//====================================================================================================
// All kernels operations within kernel use same max size of threads. Size
// of block size is set to the size appropriate for max size operation (on
// padded matrix). Other use subsets of that.
threads.x = NUMBER_THREADS; // define the number of threads in the block
threads.y = 1;
blocks.x = common.allPoints; // define the number of blocks in the grid
blocks.y = 1;
//====================================================================================================
// COPY ARGUMENTS
//====================================================================================================
checkCudaErrors(cudaMalloc(&d_common, sizeof(params_common)));
checkCudaErrors(cudaMemcpy(d_common, &common, sizeof(params_common),
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc(&d_unique, sizeof(params_unique) * ALL_POINTS));
checkCudaErrors(cudaMemcpy(d_unique, &unique,
sizeof(params_unique) * ALL_POINTS,
cudaMemcpyHostToDevice));
//====================================================================================================
// PRINT FRAME PROGRESS START
//====================================================================================================
printf("frame progress: ");
fflush(NULL);
//====================================================================================================
// LAUNCH
//====================================================================================================
for (common_change.frame_no = 0; common_change.frame_no < frames_processed;
common_change.frame_no++) {
// Extract a cropped version of the first frame from the video file
frame = get_frame(
frames, // pointer to video file
common_change.frame_no, // number of frame that needs to be returned
0, // cropped?
0, // scaled?
1); // converted
// copy frame to GPU memory
checkCudaErrors(cudaMemcpy(common_change.d_frame, frame,
common.frame_mem, cudaMemcpyHostToDevice));
checkCudaErrors(
cudaMalloc(&d_common_change, sizeof(params_common_change)));
checkCudaErrors(cudaMemcpy(d_common_change, &common_change,
sizeof(params_common_change),
cudaMemcpyHostToDevice));
// launch GPU kernel
kernel<<<blocks, threads>>>(d_common_change, d_common, d_unique);
// free frame after each loop iteration, since AVI library allocates
// memory for every frame fetched
free(frame);
// print frame progress
printf("%d ", common_change.frame_no);
fflush(NULL);
}
//====================================================================================================
// PRINT FRAME PROGRESS END
//====================================================================================================
printf("\n");
fflush(NULL);
//====================================================================================================
// OUTPUT
//====================================================================================================
checkCudaErrors(cudaMemcpy(common.tEndoRowLoc, common.d_tEndoRowLoc,
common.endo_mem * common.no_frames,
cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(common.tEndoColLoc, common.d_tEndoColLoc,
common.endo_mem * common.no_frames,
cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(common.tEpiRowLoc, common.d_tEpiRowLoc,
common.epi_mem * common.no_frames,
cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(common.tEpiColLoc, common.d_tEpiColLoc,
common.epi_mem * common.no_frames,
cudaMemcpyDeviceToHost));
//==================================================50
// DUMP DATA TO FILE
//==================================================50
if (getenv("OUTPUT")) {
write_data("output.txt", common.no_frames, frames_processed,
common.endoPoints, common.tEndoRowLoc, common.tEndoColLoc,
common.epiPoints, common.tEpiRowLoc, common.tEpiColLoc);
}
//==================================================50
// End
//==================================================50
//======================================================================================================================================================
// DEALLOCATION
//======================================================================================================================================================
//====================================================================================================
// COMMON
//====================================================================================================
// frame
checkCudaErrors(cudaFree(common_change.d_frame));
// endo points
free(common.endoRow);
free(common.endoCol);
free(common.tEndoRowLoc);
free(common.tEndoColLoc);
checkCudaErrors(cudaFree(common.d_endoRow));
checkCudaErrors(cudaFree(common.d_endoCol));
checkCudaErrors(cudaFree(common.d_tEndoRowLoc));
checkCudaErrors(cudaFree(common.d_tEndoColLoc));
checkCudaErrors(cudaFree(common.d_endoT));
// epi points
free(common.epiRow);
free(common.epiCol);
free(common.tEpiRowLoc);
free(common.tEpiColLoc);
checkCudaErrors(cudaFree(common.d_epiRow));
checkCudaErrors(cudaFree(common.d_epiCol));
checkCudaErrors(cudaFree(common.d_tEpiRowLoc));
checkCudaErrors(cudaFree(common.d_tEpiColLoc));
checkCudaErrors(cudaFree(common.d_epiT));
//====================================================================================================
// POINTERS
//====================================================================================================
for (i = 0; i < common.allPoints; i++) {
checkCudaErrors(cudaFree(unique[i].d_in2));
checkCudaErrors(cudaFree(unique[i].d_conv));
checkCudaErrors(cudaFree(unique[i].d_in2_pad_cumv));
checkCudaErrors(cudaFree(unique[i].d_in2_pad_cumv_sel));
checkCudaErrors(cudaFree(unique[i].d_in2_sub_cumh));
checkCudaErrors(cudaFree(unique[i].d_in2_sub_cumh_sel));
checkCudaErrors(cudaFree(unique[i].d_in2_sub2));
checkCudaErrors(cudaFree(unique[i].d_in2_sqr));
checkCudaErrors(cudaFree(unique[i].d_in2_sqr_sub2));
checkCudaErrors(cudaFree(unique[i].d_in_sqr));
checkCudaErrors(cudaFree(unique[i].d_tMask));
checkCudaErrors(cudaFree(unique[i].d_mask_conv));
}
}
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
// MAIN FUNCTION
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
|
21614225bb7d0b578efa749e5c0bdc43d0b3194b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <opencv2/core/cuda.hpp>
#include <opencv2/cudev.hpp>
#include <iostream>
#include "mvc_blend.h"
using namespace cv::cuda;
using cv::cudev::divUp;
using namespace std;
__global__ void CloneZero(const PtrStepSz<uchar3> image,
PtrStepSz<uchar3> result_image,
int pano_w, int pano_h) {
const int y = blockIdx.x * blockDim.x + threadIdx.x;
const int x = blockIdx.y * blockDim.y + threadIdx.y;
if (x < 0 || y < 0 || x >= pano_w || y >= pano_h) {
return;
}
result_image(y, x) = image(y, x);
}
__global__ void CalculateBoundaryDiff(const PtrStepSz<uchar3> image,
PtrStepSz<uchar3> result_image,
int * seam_element,
int * boundary,
double * boundary_diff,
int iter, int seam_size,
int pano_w, int pano_h) {
const int seam_idx = blockIdx.x * blockDim.x + threadIdx.x;
const int point_idx = seam_element[seam_idx];
if (seam_idx < 0 || seam_idx >= seam_size) {
return;
}
int x = boundary[point_idx * 2];
int y = boundary[point_idx * 2 + 1];
uchar3 color_src = image(y + (iter + 1) * pano_h, x % pano_w);
uchar3 color_dst = result_image(y, x % pano_w);
double3 diff = make_double3(color_dst.x - color_src.x,
color_dst.y - color_src.y,
color_dst.z - color_src.z);
boundary_diff[seam_idx * 3] = diff.x;
boundary_diff[seam_idx * 3 + 1] = diff.y;
boundary_diff[seam_idx * 3 + 2] = diff.z;
}
__global__ void CalculateMembrane(double* mvc_coord,
double* boundary_diff,
double* membrane,
int seam_size, int vertex_size) {
const int vertex_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (vertex_idx < 0 || vertex_idx >= vertex_size) {
return;
}
double3 offset = make_double3(0.0, 0.0, 0.0);
for (int i = 0; i < seam_size; i++) {
offset.x += mvc_coord[vertex_idx * seam_size + i] * boundary_diff[i * 3];
offset.y += mvc_coord[vertex_idx * seam_size + i] * boundary_diff[i * 3 + 1];
offset.z += mvc_coord[vertex_idx * seam_size + i] * boundary_diff[i * 3 + 2];
}
membrane[vertex_idx * 3] = offset.x;
membrane[vertex_idx * 3 + 1] = offset.y;
membrane[vertex_idx * 3 + 2] = offset.z;
}
__global__ void CalculateSeamVertex(const PtrStepSz<uchar3> image,
PtrStepSz<uchar3> result_image,
int * diff_vertex,
double * mvc_diff_coord,
double * boundary_diff,
int iter, int seam_size, int vertex_size,
int pano_w, int pano_h) {
const int vertex_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (vertex_idx < 0 || vertex_idx >= vertex_size) {
return;
}
const int x = diff_vertex[vertex_idx * 2];
const int y = diff_vertex[vertex_idx * 2 + 1];
double3 offset = make_double3(0.0, 0.0, 0.0);
for (int i = 0; i < seam_size; i++) {
offset.x += mvc_diff_coord[vertex_idx * seam_size + i] * boundary_diff[i * 3];
offset.y += mvc_diff_coord[vertex_idx * seam_size + i] * boundary_diff[i * 3 + 1];
offset.z += mvc_diff_coord[vertex_idx * seam_size + i] * boundary_diff[i * 3 + 2];
}
uchar3 color_src = image(y + (iter + 1) * pano_h, x % pano_w);
uchar3 result_color;
for (int k = 0; k < 3; k++) {
result_color.x = uchar(MAX(MIN((color_src.x + offset.x), 255.0), 0.0));
result_color.y = uchar(MAX(MIN((color_src.y + offset.y), 255.0), 0.0));
result_color.z = uchar(MAX(MIN((color_src.z + offset.z), 255.0), 0.0));
}
result_image(y, x % pano_w) = result_color;
}
__global__ void CalculateColors(const PtrStepSz<uchar3> image,
PtrStepSz<uchar3> result_image,
const PtrStepSz<int2> triangle_map,
const PtrStepSz<double3> triangle_component,
double ** membranes,
int ** triangle_elements,
int pano_w, int pano_h) {
const int y = blockDim.x * blockIdx.x + threadIdx.x;
const int x = blockDim.y * blockIdx.y + threadIdx.y;
if (x < 0 || y < 0 || x >= pano_w || y >= pano_h) {
return;
}
int2 info = triangle_map(y, x);
int image_idx = info.x;
int triangle_idx = info.y;
if (image_idx <= 0) return;
double3 v = triangle_component(y, x);
const double * membrane = membranes[image_idx - 1];
const int * triangle = triangle_elements[image_idx - 1];
double3 color = make_double3(0.0, 0.0, 0.0);
int p1 = triangle[triangle_idx];
int p2 = triangle[triangle_idx + 1];
int p3 = triangle[triangle_idx + 2];
color.x = membrane[p1 * 3] * v.x + \
membrane[p2 * 3] * v.y + \
membrane[p3 * 3] * v.z;
color.y = membrane[p1 * 3 + 1] * v.x + \
membrane[p2 * 3 + 1] * v.y + \
membrane[p3 * 3 + 1] * v.z;
color.z = membrane[p1 * 3 + 2] * v.x + \
membrane[p2 * 3 + 2] * v.y + \
membrane[p3 * 3 + 2] * v.z;
uchar3 color_src = image(y + image_idx * pano_h, x);
uchar3 result_color;
result_color.x = uchar(MAX(MIN((color_src.x + color.x), 255.0), 0.0));
result_color.y = uchar(MAX(MIN((color_src.y + color.y), 255.0), 0.0));
result_color.z = uchar(MAX(MIN((color_src.z + color.z), 255.0), 0.0));
result_image(y, x) = result_color;
}
void MVCBlend::CalculateVertexes(const GpuMat& image, GpuMat& result_image) {
cout << pano_h << ' ' << pano_w << endl;
const uint thread_size = 512;
const dim3 thread_size_2d = dim3(1, 512);
int blend_num = boundaries.size();
// precompute
result_image.create(cv::Size(pano_w, pano_h), CV_8UC3);
const dim3 clone_blocks = dim3(divUp(pano_h, thread_size_2d.x), divUp(pano_w, thread_size_2d.y));
CloneZero << < clone_blocks, thread_size_2d >> > (image, result_image, pano_w, pano_h);
for (int iter = 0; iter < blend_num; iter++) {
int vertex_size = vertexes[iter].size();
int seam_size = seam_elements[iter].size();
const uint boundary_blocks = divUp(seam_size, thread_size);
CalculateBoundaryDiff << < boundary_blocks, thread_size >> > (image, result_image,
h_seam_elements[iter],
h_boundaries[iter],
h_boundary_diff[iter],
iter, seam_size,
pano_w, pano_h);
const uint membrane_blocks = divUp(vertex_size, thread_size);
CalculateMembrane << < membrane_blocks, thread_size >> > (h_mvc_coords[iter],
h_boundary_diff[iter],
h_membranes[iter],
seam_size, vertex_size);
if (iter < blend_num - 1) {
int diff_vertex_size = diff_vertexes[iter].size();
const uint seam_vertex_blocks = divUp(diff_vertex_size, thread_size);
CalculateSeamVertex << < seam_vertex_blocks, thread_size >> > (image, result_image,
h_diff_vertexes[iter],
h_mvc_diff_coords[iter],
h_boundary_diff[iter],
iter, seam_size, diff_vertex_size,
pano_w, pano_h);
}
}
}
void MVCBlend::CalculateFragments(const GpuMat & image, GpuMat & result_image) {
const dim3 thread_size_2d = dim3(1, 512);
const dim3 color_blocks = dim3(divUp(pano_h, thread_size_2d.x), divUp(pano_w, thread_size_2d.y));
CalculateColors << < color_blocks, thread_size_2d >> > (image, result_image,
d_triangle_map, d_triangle_component,
d_membranes, d_triangle_elements,
pano_w, pano_h);
} | 21614225bb7d0b578efa749e5c0bdc43d0b3194b.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <opencv2/core/cuda.hpp>
#include <opencv2/cudev.hpp>
#include <iostream>
#include "mvc_blend.h"
using namespace cv::cuda;
using cv::cudev::divUp;
using namespace std;
__global__ void CloneZero(const PtrStepSz<uchar3> image,
PtrStepSz<uchar3> result_image,
int pano_w, int pano_h) {
const int y = blockIdx.x * blockDim.x + threadIdx.x;
const int x = blockIdx.y * blockDim.y + threadIdx.y;
if (x < 0 || y < 0 || x >= pano_w || y >= pano_h) {
return;
}
result_image(y, x) = image(y, x);
}
__global__ void CalculateBoundaryDiff(const PtrStepSz<uchar3> image,
PtrStepSz<uchar3> result_image,
int * seam_element,
int * boundary,
double * boundary_diff,
int iter, int seam_size,
int pano_w, int pano_h) {
const int seam_idx = blockIdx.x * blockDim.x + threadIdx.x;
const int point_idx = seam_element[seam_idx];
if (seam_idx < 0 || seam_idx >= seam_size) {
return;
}
int x = boundary[point_idx * 2];
int y = boundary[point_idx * 2 + 1];
uchar3 color_src = image(y + (iter + 1) * pano_h, x % pano_w);
uchar3 color_dst = result_image(y, x % pano_w);
double3 diff = make_double3(color_dst.x - color_src.x,
color_dst.y - color_src.y,
color_dst.z - color_src.z);
boundary_diff[seam_idx * 3] = diff.x;
boundary_diff[seam_idx * 3 + 1] = diff.y;
boundary_diff[seam_idx * 3 + 2] = diff.z;
}
__global__ void CalculateMembrane(double* mvc_coord,
double* boundary_diff,
double* membrane,
int seam_size, int vertex_size) {
const int vertex_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (vertex_idx < 0 || vertex_idx >= vertex_size) {
return;
}
double3 offset = make_double3(0.0, 0.0, 0.0);
for (int i = 0; i < seam_size; i++) {
offset.x += mvc_coord[vertex_idx * seam_size + i] * boundary_diff[i * 3];
offset.y += mvc_coord[vertex_idx * seam_size + i] * boundary_diff[i * 3 + 1];
offset.z += mvc_coord[vertex_idx * seam_size + i] * boundary_diff[i * 3 + 2];
}
membrane[vertex_idx * 3] = offset.x;
membrane[vertex_idx * 3 + 1] = offset.y;
membrane[vertex_idx * 3 + 2] = offset.z;
}
__global__ void CalculateSeamVertex(const PtrStepSz<uchar3> image,
PtrStepSz<uchar3> result_image,
int * diff_vertex,
double * mvc_diff_coord,
double * boundary_diff,
int iter, int seam_size, int vertex_size,
int pano_w, int pano_h) {
const int vertex_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (vertex_idx < 0 || vertex_idx >= vertex_size) {
return;
}
const int x = diff_vertex[vertex_idx * 2];
const int y = diff_vertex[vertex_idx * 2 + 1];
double3 offset = make_double3(0.0, 0.0, 0.0);
for (int i = 0; i < seam_size; i++) {
offset.x += mvc_diff_coord[vertex_idx * seam_size + i] * boundary_diff[i * 3];
offset.y += mvc_diff_coord[vertex_idx * seam_size + i] * boundary_diff[i * 3 + 1];
offset.z += mvc_diff_coord[vertex_idx * seam_size + i] * boundary_diff[i * 3 + 2];
}
uchar3 color_src = image(y + (iter + 1) * pano_h, x % pano_w);
uchar3 result_color;
for (int k = 0; k < 3; k++) {
result_color.x = uchar(MAX(MIN((color_src.x + offset.x), 255.0), 0.0));
result_color.y = uchar(MAX(MIN((color_src.y + offset.y), 255.0), 0.0));
result_color.z = uchar(MAX(MIN((color_src.z + offset.z), 255.0), 0.0));
}
result_image(y, x % pano_w) = result_color;
}
__global__ void CalculateColors(const PtrStepSz<uchar3> image,
PtrStepSz<uchar3> result_image,
const PtrStepSz<int2> triangle_map,
const PtrStepSz<double3> triangle_component,
double ** membranes,
int ** triangle_elements,
int pano_w, int pano_h) {
const int y = blockDim.x * blockIdx.x + threadIdx.x;
const int x = blockDim.y * blockIdx.y + threadIdx.y;
if (x < 0 || y < 0 || x >= pano_w || y >= pano_h) {
return;
}
int2 info = triangle_map(y, x);
int image_idx = info.x;
int triangle_idx = info.y;
if (image_idx <= 0) return;
double3 v = triangle_component(y, x);
const double * membrane = membranes[image_idx - 1];
const int * triangle = triangle_elements[image_idx - 1];
double3 color = make_double3(0.0, 0.0, 0.0);
int p1 = triangle[triangle_idx];
int p2 = triangle[triangle_idx + 1];
int p3 = triangle[triangle_idx + 2];
color.x = membrane[p1 * 3] * v.x + \
membrane[p2 * 3] * v.y + \
membrane[p3 * 3] * v.z;
color.y = membrane[p1 * 3 + 1] * v.x + \
membrane[p2 * 3 + 1] * v.y + \
membrane[p3 * 3 + 1] * v.z;
color.z = membrane[p1 * 3 + 2] * v.x + \
membrane[p2 * 3 + 2] * v.y + \
membrane[p3 * 3 + 2] * v.z;
uchar3 color_src = image(y + image_idx * pano_h, x);
uchar3 result_color;
result_color.x = uchar(MAX(MIN((color_src.x + color.x), 255.0), 0.0));
result_color.y = uchar(MAX(MIN((color_src.y + color.y), 255.0), 0.0));
result_color.z = uchar(MAX(MIN((color_src.z + color.z), 255.0), 0.0));
result_image(y, x) = result_color;
}
void MVCBlend::CalculateVertexes(const GpuMat& image, GpuMat& result_image) {
cout << pano_h << ' ' << pano_w << endl;
const uint thread_size = 512;
const dim3 thread_size_2d = dim3(1, 512);
int blend_num = boundaries.size();
// precompute
result_image.create(cv::Size(pano_w, pano_h), CV_8UC3);
const dim3 clone_blocks = dim3(divUp(pano_h, thread_size_2d.x), divUp(pano_w, thread_size_2d.y));
CloneZero << < clone_blocks, thread_size_2d >> > (image, result_image, pano_w, pano_h);
for (int iter = 0; iter < blend_num; iter++) {
int vertex_size = vertexes[iter].size();
int seam_size = seam_elements[iter].size();
const uint boundary_blocks = divUp(seam_size, thread_size);
CalculateBoundaryDiff << < boundary_blocks, thread_size >> > (image, result_image,
h_seam_elements[iter],
h_boundaries[iter],
h_boundary_diff[iter],
iter, seam_size,
pano_w, pano_h);
const uint membrane_blocks = divUp(vertex_size, thread_size);
CalculateMembrane << < membrane_blocks, thread_size >> > (h_mvc_coords[iter],
h_boundary_diff[iter],
h_membranes[iter],
seam_size, vertex_size);
if (iter < blend_num - 1) {
int diff_vertex_size = diff_vertexes[iter].size();
const uint seam_vertex_blocks = divUp(diff_vertex_size, thread_size);
CalculateSeamVertex << < seam_vertex_blocks, thread_size >> > (image, result_image,
h_diff_vertexes[iter],
h_mvc_diff_coords[iter],
h_boundary_diff[iter],
iter, seam_size, diff_vertex_size,
pano_w, pano_h);
}
}
}
void MVCBlend::CalculateFragments(const GpuMat & image, GpuMat & result_image) {
const dim3 thread_size_2d = dim3(1, 512);
const dim3 color_blocks = dim3(divUp(pano_h, thread_size_2d.x), divUp(pano_w, thread_size_2d.y));
CalculateColors << < color_blocks, thread_size_2d >> > (image, result_image,
d_triangle_map, d_triangle_component,
d_membranes, d_triangle_elements,
pano_w, pano_h);
} |
82ad8d9377908809b21fce1e33fce62c1a645326.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "saber/saber_funcs_param.h"
#include "saber/funcs/impl/cuda/saber_reverse_input.h"
#include "saber/funcs/saber_util.h"
namespace anakin {
namespace saber {
template<DataType OpDtype>
SaberStatus SaberReverseInput<NV, OpDtype>::init(const std::vector<OpTensor*>& inputs,
std::vector<OpTensor*>& outputs,
EmptyParam<NV>& param,
Context<NV>& ctx) {
this->_ctx = &ctx;
for (int i = 0; i < inputs.size(); ++i) {
_offset_map_vec.push_back(*new Tensor<NVHX86>());
_offset_map_vec[i].set_dtype(AK_INT32);
_offset_map_cu_vec.push_back(*new OpTensor());
_offset_map_cu_vec[i].set_dtype(AK_INT32);
}
return create(inputs, outputs, param, ctx);
};
template<DataType OpDtype>
SaberStatus SaberReverseInput<NV, OpDtype>::create(const std::vector<OpTensor*>& inputs,
std::vector<OpTensor*>& outputs,
EmptyParam<NV>& param,
Context<NV>& ctx) {
if (this->_ctx = &ctx) {
this->_ctx = &ctx;
}
return SaberSuccess;
};
template <typename Dtype>
__global__ static void ker_reverse_input(const Dtype* in, Dtype* out, int length, int* offset) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < length) {
out[offset[tid]] = in[tid];
}
}
template<DataType OpDtype>
SaberStatus SaberReverseInput<NV, OpDtype>::dispatch(const std::vector<OpTensor*>& inputs,
std::vector<OpTensor*>& outputs,
EmptyParam<NV>& param) {
int input_size = inputs.size();
hipStream_t stream = this->_ctx->get_compute_stream();
for (int input_id = 0; input_id < input_size; ++input_id) {
std::vector<std::vector<int>> offset_vec = inputs[input_id]->get_seq_offset();
std::vector<int> offset = offset_vec[offset_vec.size() - 1];
int word_sum = offset[offset.size() - 1];
utils::try_expand_tensor(_offset_map_vec[input_id], word_sum);
utils::try_expand_tensor(_offset_map_cu_vec[input_id], word_sum);
int* offset_map_ptr = static_cast<int*>(_offset_map_vec[input_id].mutable_data());
int* offset_map_cu_ptr = static_cast<int*>(_offset_map_cu_vec[input_id].mutable_data());
for (int sequence_id = 0; sequence_id < offset.size() - 1; sequence_id++) {
int start = offset[sequence_id];
int end = offset[sequence_id + 1] - 1;
for (int index = 0; index <= end - start; index++) {
offset_map_ptr[end - index] = start + index;
}
}
CUDA_CHECK(hipMemcpyAsync(offset_map_cu_ptr, offset_map_ptr, sizeof(int)*word_sum,
hipMemcpyHostToDevice, stream));
int block_dim = 256;
if (word_sum < block_dim) {
block_dim = word_sum;
}
int grid_dim = utils::div_up(word_sum, block_dim);
const OpDataType* in = static_cast<const OpDataType*>(inputs[input_id]->data());
OpDataType* out = static_cast<OpDataType*>(outputs[input_id]->mutable_data());
hipLaunchKernelGGL(( ker_reverse_input) , dim3(grid_dim), dim3(block_dim), 0, stream, in, out, word_sum, offset_map_cu_ptr);
}
return SaberSuccess;
};
template class SaberReverseInput<NV, AK_INT32>;
template class SaberReverseInput<NV, AK_FLOAT>;
template class SaberReverseInput<NV, AK_HALF>;
template class SaberReverseInput<NV, AK_INT8>;
}
} | 82ad8d9377908809b21fce1e33fce62c1a645326.cu |
#include "saber/saber_funcs_param.h"
#include "saber/funcs/impl/cuda/saber_reverse_input.h"
#include "saber/funcs/saber_util.h"
namespace anakin {
namespace saber {
template<DataType OpDtype>
SaberStatus SaberReverseInput<NV, OpDtype>::init(const std::vector<OpTensor*>& inputs,
std::vector<OpTensor*>& outputs,
EmptyParam<NV>& param,
Context<NV>& ctx) {
this->_ctx = &ctx;
for (int i = 0; i < inputs.size(); ++i) {
_offset_map_vec.push_back(*new Tensor<NVHX86>());
_offset_map_vec[i].set_dtype(AK_INT32);
_offset_map_cu_vec.push_back(*new OpTensor());
_offset_map_cu_vec[i].set_dtype(AK_INT32);
}
return create(inputs, outputs, param, ctx);
};
template<DataType OpDtype>
SaberStatus SaberReverseInput<NV, OpDtype>::create(const std::vector<OpTensor*>& inputs,
std::vector<OpTensor*>& outputs,
EmptyParam<NV>& param,
Context<NV>& ctx) {
if (this->_ctx = &ctx) {
this->_ctx = &ctx;
}
return SaberSuccess;
};
template <typename Dtype>
__global__ static void ker_reverse_input(const Dtype* in, Dtype* out, int length, int* offset) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < length) {
out[offset[tid]] = in[tid];
}
}
template<DataType OpDtype>
SaberStatus SaberReverseInput<NV, OpDtype>::dispatch(const std::vector<OpTensor*>& inputs,
std::vector<OpTensor*>& outputs,
EmptyParam<NV>& param) {
int input_size = inputs.size();
cudaStream_t stream = this->_ctx->get_compute_stream();
for (int input_id = 0; input_id < input_size; ++input_id) {
std::vector<std::vector<int>> offset_vec = inputs[input_id]->get_seq_offset();
std::vector<int> offset = offset_vec[offset_vec.size() - 1];
int word_sum = offset[offset.size() - 1];
utils::try_expand_tensor(_offset_map_vec[input_id], word_sum);
utils::try_expand_tensor(_offset_map_cu_vec[input_id], word_sum);
int* offset_map_ptr = static_cast<int*>(_offset_map_vec[input_id].mutable_data());
int* offset_map_cu_ptr = static_cast<int*>(_offset_map_cu_vec[input_id].mutable_data());
for (int sequence_id = 0; sequence_id < offset.size() - 1; sequence_id++) {
int start = offset[sequence_id];
int end = offset[sequence_id + 1] - 1;
for (int index = 0; index <= end - start; index++) {
offset_map_ptr[end - index] = start + index;
}
}
CUDA_CHECK(cudaMemcpyAsync(offset_map_cu_ptr, offset_map_ptr, sizeof(int)*word_sum,
cudaMemcpyHostToDevice, stream));
int block_dim = 256;
if (word_sum < block_dim) {
block_dim = word_sum;
}
int grid_dim = utils::div_up(word_sum, block_dim);
const OpDataType* in = static_cast<const OpDataType*>(inputs[input_id]->data());
OpDataType* out = static_cast<OpDataType*>(outputs[input_id]->mutable_data());
ker_reverse_input <<< grid_dim, block_dim, 0, stream>>>(in, out, word_sum, offset_map_cu_ptr);
}
return SaberSuccess;
};
template class SaberReverseInput<NV, AK_INT32>;
template class SaberReverseInput<NV, AK_FLOAT>;
template class SaberReverseInput<NV, AK_HALF>;
template class SaberReverseInput<NV, AK_INT8>;
}
} |
1fdb3cc898940615eba559cac2a88f0a214807d6.hip | // !!! This is a file automatically generated by hipify!!!
//
// Created by Huy Vo on 10/30/18.
//
#include <armadillo>
#include <iostream>
#include <time.h>
#include <hip/hip_runtime.h>
#include <hipsparse.h>
#include <thrust/transform.h>
#include <thrust/execution_policy.h>
#include <thrust/device_vector.h>
#include <cvode/cvode.h>
#include <nvector/nvector_cuda.h>
#include <sunlinsol/sunlinsol_spgmr.h>
#include <sunlinsol/sunlinsol_spbcgs.h>
#include <cvode/cvode_spils.h>
#include <sundials/sundials_types.h>
#include <sundials/sundials_math.h>
#include "cme_util.h"
#include "FSPMat.h"
/* Parameters for the propensity functions */
const double ayx{2.6e-3}, axy{6.1e-3}, nyx{3e0}, nxy{2.1e0},
kx0{2.2e-3}, kx{1.7e-2}, dx{3.8e-4}, ky0{6.8e-5}, ky{1.6e-2}, dy{3.8e-4};
__device__ __host__
double toggle_propensity(int *x, int reaction) {
double prop_val;
switch (reaction) {
case 0:
prop_val = 1.0;
break;
case 1:
prop_val = 1.0 / (1.0 + ayx*::pow(1.0 * x[1], nyx));
break;
case 2:
prop_val = 1.0 * x[0];
break;
case 3:
prop_val = 1.0;
break;
case 4:
prop_val = 1.0 / (1.0 + axy*::pow(1.0 * x[0], nxy));
break;
case 5:
prop_val = 1.0 * x[1];
break;
}
return prop_val;
}
__device__ cuFSP::PropFun prop_pointer = &toggle_propensity;
__device__ __host__
void t_func(double t, double* out){
// return {(1.0 + std::cos(t))*kx0, kx, dx, (1.0 + std::sin(t))*ky0, ky, dy};
out[0] = kx0;
out[1] = kx*(1.0 + cos(t/1000));
out[2] = dx*exp(-t/3600.0);
out[3] = ky0;
out[4] = ky;
out[5] = dy*exp(-t/3600.0);
}
/* RHS of CME routine. */
__host__
static int cvode_rhs(double t, N_Vector u, N_Vector udot, void* FSPMat_ptr){
double* udata = N_VGetDeviceArrayPointer_Cuda(u);
double* udotdata = N_VGetDeviceArrayPointer_Cuda(udot);
thrust::fill(thrust::device_pointer_cast<double>(udotdata),
thrust::device_pointer_cast<double>(udotdata+ ((cuFSP::FSPMat*) FSPMat_ptr)->get_n_rows()), 0.0);
((cuFSP::FSPMat*) FSPMat_ptr)->action(t, udata, udotdata);
CUDACHKERR();
return 0;
}
/* Jacobian-times-vector routine. */
__host__
static int cvode_jac(N_Vector v, N_Vector Jv, realtype t,
N_Vector u, N_Vector fu,
void *FSPMat_ptr, N_Vector tmp)
{
double* vdata = N_VGetDeviceArrayPointer_Cuda(v);
double* Jvdata = N_VGetDeviceArrayPointer_Cuda(Jv);
thrust::fill(thrust::device_pointer_cast<double>(Jvdata),
thrust::device_pointer_cast<double>(Jvdata+ ((cuFSP::FSPMat*) FSPMat_ptr)->get_n_rows()), 0.0);
((cuFSP::FSPMat*) FSPMat_ptr)->action(t, vdata, Jvdata);
CUDACHKERR();
return 0;
}
static int check_flag(void *flagvalue, const char *funcname, int opt);
int main()
{
int n_species = 2;
int n_reactions = 6;
double t_final = 8*3600;
double rel_tol = 1.0, abs_tol = 1.0e-8;
int flag;
int stoich_vals[] = {1, 1, -1, 1,1, -1};
int stoich_colidxs[] = {0, 0, 0, 1, 1, 1};
int stoich_rowptrs[] = {0, 1, 2, 3, 4, 5,6};
cuFSP::CSRMatInt stoich;
stoich.vals = &stoich_vals[0];
stoich.col_idxs = &stoich_colidxs[0];
stoich.row_ptrs = &stoich_rowptrs[0];
stoich.n_rows = 6;
stoich.n_cols = 2;
stoich.nnz = 6;
int n_bounds[] = {1<<10, 1<<10};
std::cout << n_bounds[0] << " " << n_bounds[1] << "\n";
int n_states = cuFSP::rect_fsp_num_states(n_species, n_bounds);
std::cout << "Total number of states:" << n_states << "\n";
cuFSP::PropFun host_prop_ptr;
hipMemcpyFromSymbol(&host_prop_ptr, prop_pointer, sizeof(cuFSP::PropFun)); CUDACHKERR();
cuFSP::FSPMat A
(n_reactions, n_species, n_bounds,
stoich, &t_func, host_prop_ptr, cuFSP::HYB); CUDACHKERR();
hipDeviceSynchronize();
/* Create a CUDA vector with initial values */
N_Vector p0 = N_VNew_Cuda(n_states); /* Allocate p0 vector */
if(check_flag((void*)p0, "N_VNew_Cuda", 0)) return(1);
double* p0_h = N_VGetHostArrayPointer_Cuda(p0);
for (int i = 0; i < n_states; ++i){
p0_h[i] = 0.0;
}
p0_h[0] = 1.0;
N_VCopyToDevice_Cuda(p0);
/* Call CVodeCreate to create the solver memory and specify the
* Backward Differentiation Formula and the use of a Newton iteration */
void *cvode_mem = CVodeCreate(CV_BDF, CV_NEWTON);
if(check_flag((void *)cvode_mem, "CVodeCreate", 0)) return(1);
/* Call CVodeInit to initialize the integrator memory and specify the
* user's right hand side function in u'=f(t,u), the initial time T0, and
* the initial dependent variable vector u. */
flag = CVodeInit(cvode_mem, cvode_rhs, 0.0, p0);
if(check_flag(&flag, "CVodeInit", 1)) return(1);
/* Call CVodeSStolerances to specify the scalar relative tolerance
* and scalar absolute tolerance */
flag = CVodeSStolerances(cvode_mem, rel_tol, abs_tol);
if (check_flag(&flag, "CVodeSStolerances", 1)) return(1);
/* Set the pointer to user-defined data */
flag = CVodeSetUserData(cvode_mem, (void*) &A);
if(check_flag(&flag, "CVodeSetUserData", 1)) return(1);
flag = CVodeSetMaxNumSteps(cvode_mem, 10000000);
flag = CVodeSetMaxConvFails(cvode_mem, 10000000);
flag = CVodeSetStabLimDet(cvode_mem, 1);
flag = CVodeSetMaxNonlinIters(cvode_mem, 100000);
/* Create SPGMR solver structure without preconditioning
* and the maximum Krylov dimension maxl */
// SUNLinearSolver LS = SUNSPGMR(p0, PREC_NONE, 10);
// if(check_flag(&flag, "SUNSPGMR", 1)) return(1);
SUNLinearSolver LS = SUNSPBCGS(p0, PREC_NONE, 0);
if(check_flag(&flag, "SUNSPBCGS", 1)) return(1);
/* Set CVSpils linear solver to LS */
flag = CVSpilsSetLinearSolver(cvode_mem, LS);
if(check_flag(&flag, "CVSpilsSetLinearSolver", 1)) return(1);
/* Set the JAcobian-times-vector function */
flag = CVSpilsSetJacTimes(cvode_mem, NULL, cvode_jac);
if(check_flag(&flag, "CVSpilsSetJacTimesVecFn", 1)) return(1);
double t = 0.0;
double psum = 0.0;
double *p0_d = N_VGetDeviceArrayPointer_Cuda(p0);
while (t < t_final){
flag = CVode(cvode_mem, t_final, p0, &t, CV_ONE_STEP);
if(check_flag(&flag, "CVode", 1)) break;
psum = thrust::reduce(thrust::device_pointer_cast<double>(p0_d), thrust::device_pointer_cast<double>(p0_d+n_states));
std::cout << "t = " << t << " psum = " << psum << "\n";
}
assert(std::abs(1.0 - psum) <= 1.0e-10);
long num_step;
flag = CVodeGetNumSteps(cvode_mem, &num_step);
check_flag(&flag, "CVodeGetNumSteps", 1);
std::cout << "CVODE takes " << num_step << " steps.\n";
return 0;
}
/* Check function return value...
opt == 0 means SUNDIALS function allocates memory so check if
returned NULL pointer
opt == 1 means SUNDIALS function returns a flag so check if
flag >= 0
opt == 2 means function allocates memory so check if returned
NULL pointer */
static int check_flag(void *flagvalue, const char *funcname, int opt)
{
int *errflag;
/* Check if SUNDIALS function returned NULL pointer - no memory allocated */
if (opt == 0 && flagvalue == NULL) {
fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return(1); }
/* Check if flag < 0 */
else if (opt == 1) {
errflag = (int *) flagvalue;
if (*errflag < 0) {
fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed with flag = %d\n\n",
funcname, *errflag);
return(1); }}
/* Check if function returned NULL pointer - no memory allocated */
else if (opt == 2 && flagvalue == NULL) {
fprintf(stderr, "\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return(1); }
return(0);
} | 1fdb3cc898940615eba559cac2a88f0a214807d6.cu | //
// Created by Huy Vo on 10/30/18.
//
#include <armadillo>
#include <iostream>
#include <time.h>
#include <cuda_runtime.h>
#include <cusparse.h>
#include <thrust/transform.h>
#include <thrust/execution_policy.h>
#include <thrust/device_vector.h>
#include <cvode/cvode.h>
#include <nvector/nvector_cuda.h>
#include <sunlinsol/sunlinsol_spgmr.h>
#include <sunlinsol/sunlinsol_spbcgs.h>
#include <cvode/cvode_spils.h>
#include <sundials/sundials_types.h>
#include <sundials/sundials_math.h>
#include "cme_util.h"
#include "FSPMat.h"
/* Parameters for the propensity functions */
const double ayx{2.6e-3}, axy{6.1e-3}, nyx{3e0}, nxy{2.1e0},
kx0{2.2e-3}, kx{1.7e-2}, dx{3.8e-4}, ky0{6.8e-5}, ky{1.6e-2}, dy{3.8e-4};
__device__ __host__
double toggle_propensity(int *x, int reaction) {
double prop_val;
switch (reaction) {
case 0:
prop_val = 1.0;
break;
case 1:
prop_val = 1.0 / (1.0 + ayx*std::pow(1.0 * x[1], nyx));
break;
case 2:
prop_val = 1.0 * x[0];
break;
case 3:
prop_val = 1.0;
break;
case 4:
prop_val = 1.0 / (1.0 + axy*std::pow(1.0 * x[0], nxy));
break;
case 5:
prop_val = 1.0 * x[1];
break;
}
return prop_val;
}
__device__ cuFSP::PropFun prop_pointer = &toggle_propensity;
__device__ __host__
void t_func(double t, double* out){
// return {(1.0 + std::cos(t))*kx0, kx, dx, (1.0 + std::sin(t))*ky0, ky, dy};
out[0] = kx0;
out[1] = kx*(1.0 + cos(t/1000));
out[2] = dx*exp(-t/3600.0);
out[3] = ky0;
out[4] = ky;
out[5] = dy*exp(-t/3600.0);
}
/* RHS of CME routine. */
__host__
static int cvode_rhs(double t, N_Vector u, N_Vector udot, void* FSPMat_ptr){
double* udata = N_VGetDeviceArrayPointer_Cuda(u);
double* udotdata = N_VGetDeviceArrayPointer_Cuda(udot);
thrust::fill(thrust::device_pointer_cast<double>(udotdata),
thrust::device_pointer_cast<double>(udotdata+ ((cuFSP::FSPMat*) FSPMat_ptr)->get_n_rows()), 0.0);
((cuFSP::FSPMat*) FSPMat_ptr)->action(t, udata, udotdata);
CUDACHKERR();
return 0;
}
/* Jacobian-times-vector routine. */
__host__
static int cvode_jac(N_Vector v, N_Vector Jv, realtype t,
N_Vector u, N_Vector fu,
void *FSPMat_ptr, N_Vector tmp)
{
double* vdata = N_VGetDeviceArrayPointer_Cuda(v);
double* Jvdata = N_VGetDeviceArrayPointer_Cuda(Jv);
thrust::fill(thrust::device_pointer_cast<double>(Jvdata),
thrust::device_pointer_cast<double>(Jvdata+ ((cuFSP::FSPMat*) FSPMat_ptr)->get_n_rows()), 0.0);
((cuFSP::FSPMat*) FSPMat_ptr)->action(t, vdata, Jvdata);
CUDACHKERR();
return 0;
}
static int check_flag(void *flagvalue, const char *funcname, int opt);
int main()
{
int n_species = 2;
int n_reactions = 6;
double t_final = 8*3600;
double rel_tol = 1.0, abs_tol = 1.0e-8;
int flag;
int stoich_vals[] = {1, 1, -1, 1,1, -1};
int stoich_colidxs[] = {0, 0, 0, 1, 1, 1};
int stoich_rowptrs[] = {0, 1, 2, 3, 4, 5,6};
cuFSP::CSRMatInt stoich;
stoich.vals = &stoich_vals[0];
stoich.col_idxs = &stoich_colidxs[0];
stoich.row_ptrs = &stoich_rowptrs[0];
stoich.n_rows = 6;
stoich.n_cols = 2;
stoich.nnz = 6;
int n_bounds[] = {1<<10, 1<<10};
std::cout << n_bounds[0] << " " << n_bounds[1] << "\n";
int n_states = cuFSP::rect_fsp_num_states(n_species, n_bounds);
std::cout << "Total number of states:" << n_states << "\n";
cuFSP::PropFun host_prop_ptr;
cudaMemcpyFromSymbol(&host_prop_ptr, prop_pointer, sizeof(cuFSP::PropFun)); CUDACHKERR();
cuFSP::FSPMat A
(n_reactions, n_species, n_bounds,
stoich, &t_func, host_prop_ptr, cuFSP::HYB); CUDACHKERR();
cudaDeviceSynchronize();
/* Create a CUDA vector with initial values */
N_Vector p0 = N_VNew_Cuda(n_states); /* Allocate p0 vector */
if(check_flag((void*)p0, "N_VNew_Cuda", 0)) return(1);
double* p0_h = N_VGetHostArrayPointer_Cuda(p0);
for (int i = 0; i < n_states; ++i){
p0_h[i] = 0.0;
}
p0_h[0] = 1.0;
N_VCopyToDevice_Cuda(p0);
/* Call CVodeCreate to create the solver memory and specify the
* Backward Differentiation Formula and the use of a Newton iteration */
void *cvode_mem = CVodeCreate(CV_BDF, CV_NEWTON);
if(check_flag((void *)cvode_mem, "CVodeCreate", 0)) return(1);
/* Call CVodeInit to initialize the integrator memory and specify the
* user's right hand side function in u'=f(t,u), the initial time T0, and
* the initial dependent variable vector u. */
flag = CVodeInit(cvode_mem, cvode_rhs, 0.0, p0);
if(check_flag(&flag, "CVodeInit", 1)) return(1);
/* Call CVodeSStolerances to specify the scalar relative tolerance
* and scalar absolute tolerance */
flag = CVodeSStolerances(cvode_mem, rel_tol, abs_tol);
if (check_flag(&flag, "CVodeSStolerances", 1)) return(1);
/* Set the pointer to user-defined data */
flag = CVodeSetUserData(cvode_mem, (void*) &A);
if(check_flag(&flag, "CVodeSetUserData", 1)) return(1);
flag = CVodeSetMaxNumSteps(cvode_mem, 10000000);
flag = CVodeSetMaxConvFails(cvode_mem, 10000000);
flag = CVodeSetStabLimDet(cvode_mem, 1);
flag = CVodeSetMaxNonlinIters(cvode_mem, 100000);
/* Create SPGMR solver structure without preconditioning
* and the maximum Krylov dimension maxl */
// SUNLinearSolver LS = SUNSPGMR(p0, PREC_NONE, 10);
// if(check_flag(&flag, "SUNSPGMR", 1)) return(1);
SUNLinearSolver LS = SUNSPBCGS(p0, PREC_NONE, 0);
if(check_flag(&flag, "SUNSPBCGS", 1)) return(1);
/* Set CVSpils linear solver to LS */
flag = CVSpilsSetLinearSolver(cvode_mem, LS);
if(check_flag(&flag, "CVSpilsSetLinearSolver", 1)) return(1);
/* Set the JAcobian-times-vector function */
flag = CVSpilsSetJacTimes(cvode_mem, NULL, cvode_jac);
if(check_flag(&flag, "CVSpilsSetJacTimesVecFn", 1)) return(1);
double t = 0.0;
double psum = 0.0;
double *p0_d = N_VGetDeviceArrayPointer_Cuda(p0);
while (t < t_final){
flag = CVode(cvode_mem, t_final, p0, &t, CV_ONE_STEP);
if(check_flag(&flag, "CVode", 1)) break;
psum = thrust::reduce(thrust::device_pointer_cast<double>(p0_d), thrust::device_pointer_cast<double>(p0_d+n_states));
std::cout << "t = " << t << " psum = " << psum << "\n";
}
assert(std::abs(1.0 - psum) <= 1.0e-10);
long num_step;
flag = CVodeGetNumSteps(cvode_mem, &num_step);
check_flag(&flag, "CVodeGetNumSteps", 1);
std::cout << "CVODE takes " << num_step << " steps.\n";
return 0;
}
/* Check function return value...
opt == 0 means SUNDIALS function allocates memory so check if
returned NULL pointer
opt == 1 means SUNDIALS function returns a flag so check if
flag >= 0
opt == 2 means function allocates memory so check if returned
NULL pointer */
static int check_flag(void *flagvalue, const char *funcname, int opt)
{
int *errflag;
/* Check if SUNDIALS function returned NULL pointer - no memory allocated */
if (opt == 0 && flagvalue == NULL) {
fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return(1); }
/* Check if flag < 0 */
else if (opt == 1) {
errflag = (int *) flagvalue;
if (*errflag < 0) {
fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed with flag = %d\n\n",
funcname, *errflag);
return(1); }}
/* Check if function returned NULL pointer - no memory allocated */
else if (opt == 2 && flagvalue == NULL) {
fprintf(stderr, "\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return(1); }
return(0);
} |
8b4ba411318e5ab497f9b44f2b1e17d5047340be.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/BinaryOps.h>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at { namespace native {
void atan2_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "atan2_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return ::atan2(a, b);
});
});
}
void smooth_l1_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "smooth_l1_cuda", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA (scalar_t a, scalar_t b) -> scalar_t {
auto z = ::abs(a - b);
return z < scalar_t(1.) ? scalar_t(0.5) * z * z : z - scalar_t(0.5);
});
});
}
void mse_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "mse_cuda", [&]() {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "mse_cuda", [&] {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
auto diff = a - b;
return diff * diff;
});
});
});
}
void logaddexp_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES(iter.dtype(), "logaddexp_cuda", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA (scalar_t a, scalar_t b) -> scalar_t {
if (::isinf(a) && a == b) {
return a;
}
else {
scalar_t m = ::max(a, b);
return m + ::log((scalar_t)(1.0) + ::exp(-::abs(a - b)));
}
});
});
}
void logaddexp2_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES(iter.dtype(), "logaddexp2_cuda", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA (scalar_t a, scalar_t b) -> scalar_t {
if (::isinf(a) && a == b) {
return a;
}
else {
scalar_t m = ::max(a, b);
return m + ::log2((scalar_t)(1.0) + ::pow((scalar_t)(2.0), -::abs(a - b)));
}
});
});
}
REGISTER_DISPATCH(atan2_stub, &atan2_kernel_cuda);
REGISTER_DISPATCH(smooth_l1_stub, &smooth_l1_kernel_cuda);
REGISTER_DISPATCH(mse_stub, &mse_kernel_cuda);
REGISTER_DISPATCH(logaddexp_stub, &logaddexp_kernel_cuda);
REGISTER_DISPATCH(logaddexp2_stub, &logaddexp2_kernel_cuda);
}} // namespace at::native
| 8b4ba411318e5ab497f9b44f2b1e17d5047340be.cu | #include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/BinaryOps.h>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at { namespace native {
void atan2_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "atan2_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return ::atan2(a, b);
});
});
}
void smooth_l1_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "smooth_l1_cuda", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA (scalar_t a, scalar_t b) -> scalar_t {
auto z = ::abs(a - b);
return z < scalar_t(1.) ? scalar_t(0.5) * z * z : z - scalar_t(0.5);
});
});
}
void mse_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "mse_cuda", [&]() {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "mse_cuda", [&] {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
auto diff = a - b;
return diff * diff;
});
});
});
}
void logaddexp_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES(iter.dtype(), "logaddexp_cuda", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA (scalar_t a, scalar_t b) -> scalar_t {
if (::isinf(a) && a == b) {
return a;
}
else {
scalar_t m = ::max(a, b);
return m + ::log((scalar_t)(1.0) + ::exp(-::abs(a - b)));
}
});
});
}
void logaddexp2_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES(iter.dtype(), "logaddexp2_cuda", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA (scalar_t a, scalar_t b) -> scalar_t {
if (::isinf(a) && a == b) {
return a;
}
else {
scalar_t m = ::max(a, b);
return m + ::log2((scalar_t)(1.0) + ::pow((scalar_t)(2.0), -::abs(a - b)));
}
});
});
}
REGISTER_DISPATCH(atan2_stub, &atan2_kernel_cuda);
REGISTER_DISPATCH(smooth_l1_stub, &smooth_l1_kernel_cuda);
REGISTER_DISPATCH(mse_stub, &mse_kernel_cuda);
REGISTER_DISPATCH(logaddexp_stub, &logaddexp_kernel_cuda);
REGISTER_DISPATCH(logaddexp2_stub, &logaddexp2_kernel_cuda);
}} // namespace at::native
|
88dc5040ffcdb8b6e14248dcd649574078e7614e.hip | // !!! This is a file automatically generated by hipify!!!
//#include "cuSolverDn_AtA.cu"
#include "Solver_manager.hh"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
#include <assert.h>
#include <cmath>
#include <hip/hip_runtime.h>
#include "SI.h"
#include "rocblas.h"
#include "hipsparse.h"
#include "cusolverDn.h"
#include "helper_cuda.h"
#include "helper_cusolver.h"
int linearSolverCHOL(
hipsolverDnHandle_t handle,
int n,
const float *Acopy,
int lda,
const float *b,
float *x)
{
int bufferSize = 0;
int *info = NULL;
float *buffer = NULL;
float *A = NULL;
int h_info = 0;
float start, stop;
float time_solve;
hipblasFillMode_t uplo = HIPBLAS_FILL_MODE_LOWER;
checkCudaErrors(hipsolverDnSpotrf_bufferSize(handle, uplo, n, (float*)Acopy, lda, &bufferSize));
checkCudaErrors(hipMalloc(&info, sizeof(int)));
checkCudaErrors(hipMalloc(&buffer, sizeof(float)*bufferSize));
checkCudaErrors(hipMalloc(&A, sizeof(float)*lda*n));
// prepare a copy of A because potrf will overwrite A with L
checkCudaErrors(hipMemcpy(A, Acopy, sizeof(float)*lda*n, hipMemcpyDeviceToDevice));
checkCudaErrors(hipMemset(info, 0, sizeof(int)));
start = second();
start = second();
checkCudaErrors(hipsolverDnSpotrf(handle, uplo, n, A, lda, buffer, bufferSize, info));
checkCudaErrors(hipMemcpy(&h_info, info, sizeof(int), hipMemcpyDeviceToHost));
if ( 0 != h_info ){
fprintf(stderr, "Error: Cholesky factorization failed, check %d parameter\n", h_info);
}
checkCudaErrors(hipMemcpy(x, b, sizeof(float)*n, hipMemcpyDeviceToDevice));
checkCudaErrors(hipsolverDnSpotrs(handle, uplo, n, 1, A, lda, x, n, info));
checkCudaErrors(hipDeviceSynchronize());
stop = second();
time_solve = stop - start;
fprintf (stdout, "timing: cholesky = %10.6f sec\n", time_solve);
if (info ) { checkCudaErrors(hipFree(info)); }
if (buffer) { checkCudaErrors(hipFree(buffer)); }
if (A ) { checkCudaErrors(hipFree(A)); }
return 0;
}
/*
* solve A*x = b by LU with partial pivoting
*
*/
int linearSolverLU(
hipsolverDnHandle_t handle,
int n,
const float *Acopy,
int lda,
const float *b,
float *x)
{
int bufferSize = 0;
int *info = NULL;
float *buffer = NULL;
float *A = NULL;
int *ipiv = NULL; // pivoting sequence
int h_info = 0;
float start, stop;
float time_solve;
checkCudaErrors(hipsolverDnSgetrf_bufferSize(handle, n, n, (float*)Acopy, lda, &bufferSize));
checkCudaErrors(hipMalloc(&info, sizeof(int)));
checkCudaErrors(hipMalloc(&buffer, sizeof(float)*bufferSize));
checkCudaErrors(hipMalloc(&A, sizeof(float)*lda*n));
checkCudaErrors(hipMalloc(&ipiv, sizeof(int)*n));
// prepare a copy of A because getrf will overwrite A with L
checkCudaErrors(hipMemcpy(A, Acopy, sizeof(float)*lda*n, hipMemcpyDeviceToDevice));
checkCudaErrors(hipMemset(info, 0, sizeof(int)));
start = second();
start = second();
checkCudaErrors(hipsolverDnSgetrf(handle, n, n, A, lda, buffer, ipiv, info));
checkCudaErrors(hipMemcpy(&h_info, info, sizeof(int), hipMemcpyDeviceToHost));
if ( 0 != h_info ){
fprintf(stderr, "Error: LU factorization failed, check %d parameter\n", h_info);
}
checkCudaErrors(hipMemcpy(x, b, sizeof(float)*n, hipMemcpyDeviceToDevice));
checkCudaErrors(hipsolverDnSgetrs(handle, HIPBLAS_OP_N, n, 1, A, lda, ipiv, x, n, info));
checkCudaErrors(hipDeviceSynchronize());
stop = second();
time_solve = stop - start;
fprintf (stdout, "timing: LU = %10.6f sec\n", time_solve);
if (info ) { checkCudaErrors(hipFree(info )); }
if (buffer) { checkCudaErrors(hipFree(buffer)); }
if (A ) { checkCudaErrors(hipFree(A)); }
if (ipiv ) { checkCudaErrors(hipFree(ipiv));}
return 0;
}
void linearSolverSVD(
hipsolverDnHandle_t handle,
int n,
const float *Acopy,
int lda,
const float *bcopy,
float *x)
{
hipblasHandle_t cublasHandle = NULL; // used in residual evaluation
int m = lda;
int bufferSize = 0;
int *info = NULL;
int h_info = 0;
float start, stop;
float time_solve;
const float one = 1.0;
// float U[lda*m]; // m-by-m unitary matrix
// float VT[lda*n]; // n-by-n unitary matrix
// float S[n]; //singular value
float *d_A = NULL; float *d_SI = NULL;
float *d_b = NULL; float *d_S = NULL;
float *d_U = NULL; float *d_VT = NULL;
float *d_work = NULL;
float *d_rwork = NULL;
float *d_W = NULL;
signed char jobu = 'A'; // all m columns of U
signed char jobvt = 'A'; // all n columns of VT
// step 1: create cusolverDn/cublas handle
checkCudaErrors(hipblasCreate(&cublasHandle));
checkCudaErrors(hipMalloc((void**)&d_A , sizeof(float)*lda*n)); \
checkCudaErrors(hipMalloc((void**)&d_b , sizeof(float)*m));
checkCudaErrors(hipMalloc((void**)&d_S , sizeof(float)*n));
checkCudaErrors(hipMalloc((void**)&d_SI , sizeof(float)*lda*n));
checkCudaErrors(hipMalloc((void**)&d_U , sizeof(float)*lda*m));
checkCudaErrors(hipMalloc((void**)&d_VT , sizeof(float)*lda*n));
checkCudaErrors(hipMalloc((void**)&info, sizeof(int)));
checkCudaErrors(hipMalloc((void**)&d_W , sizeof(float)*lda*n));
checkCudaErrors(hipMemcpy(d_A, Acopy, sizeof(float)*lda*n, hipMemcpyDeviceToDevice)); //gesvd destroys d_A on exit
checkCudaErrors(hipMemcpy(d_b, bcopy, sizeof(float)*m, hipMemcpyDeviceToDevice));
// checkMatrix(m, n, d_SI, lda, "zero_SI");
// checkMatrix(m, n , d_A, lda, "SVD_AtA");
// checkArray(d_b, m, "SVD_Atb");
checkCudaErrors(hipsolverDnSgesvd_bufferSize( handle, m, n, &bufferSize ));
checkCudaErrors(hipMalloc((void**)&d_work , sizeof(float)*bufferSize));
start = second();
checkCudaErrors(hipsolverDnSgesvd(
handle, jobu, jobvt, m, n, d_A, lda, d_S, d_U, lda, d_VT, lda, d_work, bufferSize, d_rwork, info));
//checkCudaErrors(hipDeviceSynchronize());
// checkCudaErrors(hipMemcpy(U , d_U , sizeof(float)*lda*m, hipMemcpyDeviceToHost));
// checkCudaErrors(hipMemcpy(VT, d_VT, sizeof(float)*lda*n, hipMemcpyDeviceToHost));
// checkCudaErrors(hipMemcpy(S , d_S , sizeof(float)*n , hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(&h_info, info, sizeof(int), hipMemcpyDeviceToHost));
if ( 0 != h_info ){
fprintf(stderr, "Error: SVD failed, check %d parameter\n", h_info);
}
// int BLOCK_DIM_X = 32; int BLOCK_DIM_Y = 32;
// dim3 blockDim(BLOCK_DIM_X, BLOCK_DIM_Y);
// dim3 gridDim((n + BLOCK_DIM_X - 1) / BLOCK_DIM_X, (m + BLOCK_DIM_Y - 1) / BLOCK_DIM_Y);
// initSIGPU<<<gridDim, blockDim>>>(d_SI, d_S, m, n);
float epsilon = 1.e-9;
printf("epsilon = %f \n", epsilon);
initSI<float>(d_SI, d_S, m, n, epsilon, 256);
//int initStat = initSICPU(d_SI, d_S, m, n, epsilon);
// U*S*V*x=b; x = VT*Si*UT*b
// checkMatrix(m, n, d_SI, lda, "SVD_SI");
// checkArray(d_S, n, "dS");
// checkMatrix(m, m, d_U, lda, "SVD_U");
// checkMatrix(n, n, d_VT, lda, "SVD_VT");
float al = 1.0;// al =1
float bet = 0.0;// bet =0
// checkArray(d_b, n, "db");
checkCudaErrors(hipblasSgemv(cublasHandle,HIPBLAS_OP_T, m, m, &al,d_U, m, d_b,1,&bet,d_b,1));
// checkArray(d_b, n, "dUtb");
checkCudaErrors(hipblasSgemv(cublasHandle,HIPBLAS_OP_N, m, n, &al,d_SI, m, d_b,1,&bet,d_b,1));
// checkArray(d_b, n, "dSiUtb");
checkCudaErrors(hipblasSgemv(cublasHandle,HIPBLAS_OP_T, n, n, &al,d_VT, n, d_b, 1,&bet,x,1));
checkCudaErrors(hipDeviceSynchronize());
stop = second();
time_solve = stop - start;
fprintf (stdout, "timing: SVD = %10.6f sec\n", time_solve);
// checkArray(x, 20, "d_x");
if (d_A ) hipFree(d_A);
if (d_S ) hipFree(d_S);
if (d_SI ) hipFree(d_SI);
if (d_U ) hipFree(d_U);
if (d_VT ) hipFree(d_VT);
if (info) hipFree(info);
if (d_work ) hipFree(d_work);
if (d_rwork) hipFree(d_rwork);
if (d_W ) hipFree(d_W);
if (cublasHandle ) hipblasDestroy(cublasHandle);
// if (cusolverH) hipsolverDnDestroy(cusolverH);
}
/*
* solve A*x = b by QR
*
*/
int linearSolverQR(
hipsolverDnHandle_t handle,
int n,
const float *Acopy,
int lda,
const float *b,
float *x)
{
hipblasHandle_t cublasHandle = NULL; // used in residual evaluation
int bufferSize = 0;
int bufferSize_geqrf = 0;
int bufferSize_ormqr = 0;
int *info = NULL;
float *buffer = NULL;
float *A = NULL;
float *tau = NULL;
int h_info = 0;
float start, stop;
float time_solve;
const float one = 1.0;
checkCudaErrors(hipblasCreate(&cublasHandle));
checkCudaErrors(hipsolverDnSgeqrf_bufferSize(handle, n, n, (float*)Acopy, lda, &bufferSize_geqrf));
checkCudaErrors(hipsolverDnSormqr_bufferSize(
handle,
HIPBLAS_SIDE_LEFT,
HIPBLAS_OP_T,
n,
1,
n,
A,
lda,
NULL,
x,
n,
&bufferSize_ormqr));
//printf("buffer_geqrf = %d, buffer_ormqr = %d \n", bufferSize_geqrf, bufferSize_ormqr);
bufferSize = (bufferSize_geqrf > bufferSize_ormqr)? bufferSize_geqrf : bufferSize_ormqr ;
checkCudaErrors(hipMalloc(&info, sizeof(int)));
checkCudaErrors(hipMalloc(&buffer, sizeof(float)*bufferSize));
checkCudaErrors(hipMalloc(&A, sizeof(float)*lda*n));
checkCudaErrors(hipMalloc ((void**)&tau, sizeof(float)*n));
// prepare a copy of A because getrf will overwrite A with L
checkCudaErrors(hipMemcpy(A, Acopy, sizeof(float)*lda*n, hipMemcpyDeviceToDevice));
checkCudaErrors(hipMemset(info, 0, sizeof(int)));
start = second();
start = second();
// compute QR factorization
checkCudaErrors(hipsolverDnSgeqrf(handle, n, n, A, lda, tau, buffer, bufferSize, info));
checkCudaErrors(hipMemcpy(&h_info, info, sizeof(int), hipMemcpyDeviceToHost));
if ( 0 != h_info ){
fprintf(stderr, "Error: QR factorization failed, check %d parameter\n", h_info);
}
checkCudaErrors(hipMemcpy(x, b, sizeof(float)*n, hipMemcpyDeviceToDevice));
// compute Q^T*b
checkCudaErrors(hipsolverDnSormqr(
handle,
HIPBLAS_SIDE_LEFT,
HIPBLAS_OP_T,
n,
1,
n,
A,
lda,
tau,
x,
n,
buffer,
bufferSize,
info));
// x = R \ Q^T*b
checkCudaErrors(hipblasStrsm(
cublasHandle,
HIPBLAS_SIDE_LEFT,
HIPBLAS_FILL_MODE_UPPER,
HIPBLAS_OP_N,
HIPBLAS_DIAG_NON_UNIT,
n,
1,
&one,
A,
lda,
x,
n));
checkCudaErrors(hipDeviceSynchronize());
stop = second();
time_solve = stop - start;
fprintf (stdout, "timing: QR = %10.6f sec\n", time_solve);
if (cublasHandle) { checkCudaErrors(hipblasDestroy(cublasHandle)); }
if (info ) { checkCudaErrors(hipFree(info )); }
if (buffer) { checkCudaErrors(hipFree(buffer)); }
if (A ) { checkCudaErrors(hipFree(A)); }
if (tau ) { checkCudaErrors(hipFree(tau)); }
return 0;
}
DnSolver::DnSolver (int rows_, int cols_)
{
DnSolver::~DnSolver();
rowsA = rows_;
colsA = cols_;
lda = rows_;
checkCudaErrors(hipsolverDnCreate(&handle));
checkCudaErrors(hipblasCreate(&cublasHandle));
checkCudaErrors(hipStreamCreate(&stream));
checkCudaErrors(hipsparseCreate(&cusparseHandle));
checkCudaErrors(hipsparseCreateMatDescr(&descrA));
checkCudaErrors(hipsolverDnSetStream(handle, stream));
checkCudaErrors(hipblasSetStream(cublasHandle, stream));
checkCudaErrors(hipsparseSetStream(cusparseHandle, stream));
h_A = (float*)malloc(sizeof(float)*lda*colsA);
h_x = (float*)malloc(sizeof(float)*colsA);
h_b = (float*)malloc(sizeof(float)*rowsA);
checkCudaErrors(hipMalloc((void **)&d_x, sizeof(float)*colsA));
checkCudaErrors(hipMalloc((void **)&d_b, sizeof(float)*rowsA));
checkCudaErrors(hipMalloc((void **)&d_A, sizeof(float)*lda*colsA));
}
void DnSolver::from_dense(float* array_host_, float* rhs_){
h_A = array_host_;
h_b = rhs_;
checkCudaErrors(hipMemcpy(d_A, h_A, sizeof(float)*lda*colsA, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_b, h_b, sizeof(float)*rowsA, hipMemcpyHostToDevice));
}
void DnSolver::from_csr(int* indptr_, int* indices_, float* data_, float* rhs_){
h_b = rhs_;
h_csrRowPtrA = indptr_;
h_csrColIndA = indices_;
h_csrValA = data_;
baseA = h_csrRowPtrA[0];
nnzA = h_csrRowPtrA[rowsA] - baseA;
hipsparseStatus_t cpstat;
//checkMatrix(nnzA, 1, h_csrValA, nnzA, "h_valA");
hipsparseSetMatType(descrA,HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatIndexBase(descrA,HIPSPARSE_INDEX_BASE_ZERO);
if (d_csrRowPtrA == NULL ){
printf("allocating pointers \n");
checkCudaErrors(hipMalloc((void **)&d_csrRowPtrA, sizeof(int)*(rowsA+1)));
checkCudaErrors(hipMalloc((void **)&d_csrColIndA, sizeof(int)*nnzA));
checkCudaErrors(hipMalloc((void **)&d_csrValA, sizeof(float)*nnzA));
}
checkCudaErrors(hipMemcpy(d_csrRowPtrA, h_csrRowPtrA, sizeof(int)*(rowsA+1), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_csrColIndA, h_csrColIndA, sizeof(int)*nnzA, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_csrValA, h_csrValA, sizeof(float)*nnzA, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_b, h_b, sizeof(float)*rowsA, hipMemcpyHostToDevice));
cpstat = hipsparseScsr2dense(
cusparseHandle,
rowsA, colsA,
descrA,
d_csrValA,
d_csrRowPtrA,
d_csrColIndA,
d_A, rowsA);
if (cpstat != HIPSPARSE_STATUS_SUCCESS) {
printf ("%s\n", "CuSparse CSR to dense conversion failed");
return;
}
//if (d_csrValA ) { checkCudaErrors(hipFree(d_csrValA)); }
//if (d_csrRowPtrA) { checkCudaErrors(hipFree(d_csrRowPtrA)); }
//if (d_csrColIndA) { checkCudaErrors(hipFree(d_csrColIndA)); }
}
void DnSolver::from_coo(int* indptr_, int* indices_, float* data_, int nnz_, float* rhs_){
h_b = rhs_;
h_cooRowIndA = indptr_;
h_csrColIndA = indices_;
h_csrValA = data_;
nnzA = nnz_;
hipsparseStatus_t cpstat;
//checkMatrix(nnzA, 1, h_csrValA, nnzA, "h_valA");
hipsparseSetMatType(descrA,HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatIndexBase(descrA,HIPSPARSE_INDEX_BASE_ZERO);
if (d_csrRowPtrA == NULL ){
printf("allocating pointers \n");
checkCudaErrors(hipMalloc((void **)&d_cooRowIndA, sizeof(int)*nnzA));
checkCudaErrors(hipMalloc((void **)&d_csrRowPtrA, sizeof(int)*(rowsA+1)));
checkCudaErrors(hipMalloc((void **)&d_csrColIndA, sizeof(int)*nnzA));
checkCudaErrors(hipMalloc((void **)&d_csrValA, sizeof(float)*nnzA));
}
checkCudaErrors(hipMemcpy(d_cooRowIndA, h_cooRowIndA, sizeof(int)*nnzA, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_csrColIndA, h_csrColIndA, sizeof(int)*nnzA, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_csrValA, h_csrValA, sizeof(float)*nnzA, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_b, h_b, sizeof(float)*rowsA, hipMemcpyHostToDevice));
cpstat = hipsparseXcoo2csr(cusparseHandle,
d_cooRowIndA, nnzA, rowsA,
d_csrRowPtrA,
HIPSPARSE_INDEX_BASE_ZERO);
cpstat = hipsparseScsr2dense(
cusparseHandle,
rowsA, colsA,
descrA,
d_csrValA,
d_csrRowPtrA,
d_csrColIndA,
d_A, rowsA);
if (cpstat != HIPSPARSE_STATUS_SUCCESS) {
printf ("%s\n", "CuSparse CSR to dense conversion failed");
return;
}
}
void DnSolver::solve(int multFunc, int Func) {
//printf("step 6: compute AtA \n");
hipblasStatus_t cbstat;
hipsparseStatus_t cpstat;
//if (dAtA == NULL){
// checkCudaErrors(hipMalloc(&dAtA, sizeof(float)*colsA*colsA));
// checkCudaErrors(hipMalloc((void **)&d_Atb, sizeof(float)*colsA));
//}
float* dAtA;
float* d_Atb;
checkCudaErrors(hipMalloc((void **)&dAtA, sizeof(float)*colsA*colsA));
checkCudaErrors(hipMalloc((void **)&d_Atb, sizeof(float)*colsA));
if ( multFunc == 0){
printf("using sparse multiply\n");
cpstat = hipsparseScsrmm(cusparseHandle,
HIPSPARSE_OPERATION_TRANSPOSE,
rowsA,colsA,colsA,nnzA, &al,
descrA,
d_csrValA,
d_csrRowPtrA,
d_csrColIndA,
d_A,rowsA,
&bet,dAtA,colsA);
cpstat = hipsparseScsrmv(cusparseHandle,
HIPSPARSE_OPERATION_TRANSPOSE,
rowsA,colsA, nnzA, &al,
descrA,
d_csrValA,
d_csrRowPtrA,
d_csrColIndA,
d_b,&bet,d_Atb);
}
else {
cbstat = hipblasSgemm(cublasHandle,
HIPBLAS_OP_T,HIPBLAS_OP_N,
colsA,colsA,rowsA,&al,
d_A,rowsA,d_A,rowsA,
&bet,dAtA,colsA);
cbstat = hipblasSgemv(cublasHandle,
HIPBLAS_OP_T,
rowsA,colsA,&al,
d_A,rowsA,d_b,
1,&bet,d_Atb,1);
}
//print out for debug
//checkMatrix(rowsA, colsA , d_A, lda, "A");
//checkMatrix(rowsA, 1 , d_b, rowsA, "b");
//checkMatrix(colsA, colsA , dAtA, lda, "AtA");
//checkMatrix(colsA, 1 , d_Atb, lda, "Atb");
//if (cublasHandle) { checkCudaErrors(hipblasDestroy(cublasHandle)); }
//checkCudaErrors(hipblasCreate(&cublasHandle));
//checkCudaErrors(hipblasSetStream(cublasHandle, stream));
//printf("step 8: solves AtA*x = At*b \n");
if ( 0 == Func )
{
linearSolverQR(handle, colsA, dAtA, colsA, d_Atb, d_x);
}
else if ( 1 == Func )
{
linearSolverCHOL(handle, colsA, dAtA, colsA, d_Atb, d_x);
}
else if ( 2 == Func )
{
linearSolverLU(handle, colsA, dAtA, colsA, d_Atb, d_x);
}
else if ( 3 == Func )
{
linearSolverSVD(handle, colsA, dAtA, colsA, d_Atb, d_x);
}
else
{
fprintf(stderr, "Error: %d is unknown function\n", Func);
exit(EXIT_FAILURE);
}
if (dAtA) { checkCudaErrors(hipFree(dAtA)); }
if (d_Atb) { checkCudaErrors(hipFree(d_Atb)); }
//if (d_A) { checkCudaErrors(hipFree(d_A)); }
//if (d_b) { checkCudaErrors(hipFree(d_b)); }
}
void DnSolver::solve_Axb(int Func) {
if ( 0 == Func )
{
linearSolverQR(handle, colsA, d_A, colsA, d_b, d_x);
}
else if ( 1 == Func )
{
linearSolverCHOL(handle, colsA, d_A, colsA, d_b, d_x);
}
else if ( 2 == Func )
{
linearSolverLU(handle, colsA, d_A, colsA, d_b, d_x);
}
else if ( 3 == Func )
{
linearSolverSVD(handle, colsA, d_A, colsA, d_b, d_x);
}
else
{
fprintf(stderr, "Error: %d is unknown function\n", Func);
exit(EXIT_FAILURE);
}
//if (d_A) { checkCudaErrors(hipFree(d_A)); }
//if (d_b) { checkCudaErrors(hipFree(d_b)); }
}
void DnSolver::retrieve_to(float* h_x)
{
checkCudaErrors(hipMemcpy(h_x, d_x, sizeof(float)*colsA, hipMemcpyDeviceToHost));
//checkCudaErrors(hipFree(d_x));
//if (d_A) { checkCudaErrors(hipFree(d_A)); }
//if (d_b) { checkCudaErrors(hipFree(d_b)); }
}
DnSolver::~DnSolver()
{
if (handle) { checkCudaErrors(hipsolverDnDestroy(handle)); }
if (cublasHandle) { checkCudaErrors(hipblasDestroy(cublasHandle)); }
if (cusparseHandle) { checkCudaErrors(hipsparseDestroy(cusparseHandle)); }
if (descrA) { checkCudaErrors(hipsparseDestroyMatDescr(descrA)); }
if (stream) { checkCudaErrors(hipStreamDestroy(stream)); }
if (h_A) { free(h_A); }
if (h_x) { free(h_x); }
if (h_b) { free(h_b); }
if (h_csrValA ) { free(h_csrValA); }
if (h_csrRowPtrA) { free(h_csrRowPtrA); }
if (h_csrColIndA) { free(h_csrColIndA); }
if (d_A) { checkCudaErrors(hipFree(d_A)); }
if (d_x) { checkCudaErrors(hipFree(d_x)); }
if (d_b) { checkCudaErrors(hipFree(d_b)); }
if (d_csrValA ) { checkCudaErrors(hipFree(d_csrValA)); }
if (d_csrRowPtrA) { checkCudaErrors(hipFree(d_csrRowPtrA)); }
if (d_csrColIndA) { checkCudaErrors(hipFree(d_csrColIndA)); }
}
| 88dc5040ffcdb8b6e14248dcd649574078e7614e.cu | //#include "cuSolverDn_AtA.cu"
#include "Solver_manager.hh"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
#include <assert.h>
#include <cmath>
#include <cuda_runtime.h>
#include "SI.h"
#include "cublas_v2.h"
#include "cusparse.h"
#include "cusolverDn.h"
#include "helper_cuda.h"
#include "helper_cusolver.h"
int linearSolverCHOL(
cusolverDnHandle_t handle,
int n,
const float *Acopy,
int lda,
const float *b,
float *x)
{
int bufferSize = 0;
int *info = NULL;
float *buffer = NULL;
float *A = NULL;
int h_info = 0;
float start, stop;
float time_solve;
cublasFillMode_t uplo = CUBLAS_FILL_MODE_LOWER;
checkCudaErrors(cusolverDnSpotrf_bufferSize(handle, uplo, n, (float*)Acopy, lda, &bufferSize));
checkCudaErrors(cudaMalloc(&info, sizeof(int)));
checkCudaErrors(cudaMalloc(&buffer, sizeof(float)*bufferSize));
checkCudaErrors(cudaMalloc(&A, sizeof(float)*lda*n));
// prepare a copy of A because potrf will overwrite A with L
checkCudaErrors(cudaMemcpy(A, Acopy, sizeof(float)*lda*n, cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemset(info, 0, sizeof(int)));
start = second();
start = second();
checkCudaErrors(cusolverDnSpotrf(handle, uplo, n, A, lda, buffer, bufferSize, info));
checkCudaErrors(cudaMemcpy(&h_info, info, sizeof(int), cudaMemcpyDeviceToHost));
if ( 0 != h_info ){
fprintf(stderr, "Error: Cholesky factorization failed, check %d parameter\n", h_info);
}
checkCudaErrors(cudaMemcpy(x, b, sizeof(float)*n, cudaMemcpyDeviceToDevice));
checkCudaErrors(cusolverDnSpotrs(handle, uplo, n, 1, A, lda, x, n, info));
checkCudaErrors(cudaDeviceSynchronize());
stop = second();
time_solve = stop - start;
fprintf (stdout, "timing: cholesky = %10.6f sec\n", time_solve);
if (info ) { checkCudaErrors(cudaFree(info)); }
if (buffer) { checkCudaErrors(cudaFree(buffer)); }
if (A ) { checkCudaErrors(cudaFree(A)); }
return 0;
}
/*
* solve A*x = b by LU with partial pivoting
*
*/
int linearSolverLU(
cusolverDnHandle_t handle,
int n,
const float *Acopy,
int lda,
const float *b,
float *x)
{
int bufferSize = 0;
int *info = NULL;
float *buffer = NULL;
float *A = NULL;
int *ipiv = NULL; // pivoting sequence
int h_info = 0;
float start, stop;
float time_solve;
checkCudaErrors(cusolverDnSgetrf_bufferSize(handle, n, n, (float*)Acopy, lda, &bufferSize));
checkCudaErrors(cudaMalloc(&info, sizeof(int)));
checkCudaErrors(cudaMalloc(&buffer, sizeof(float)*bufferSize));
checkCudaErrors(cudaMalloc(&A, sizeof(float)*lda*n));
checkCudaErrors(cudaMalloc(&ipiv, sizeof(int)*n));
// prepare a copy of A because getrf will overwrite A with L
checkCudaErrors(cudaMemcpy(A, Acopy, sizeof(float)*lda*n, cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemset(info, 0, sizeof(int)));
start = second();
start = second();
checkCudaErrors(cusolverDnSgetrf(handle, n, n, A, lda, buffer, ipiv, info));
checkCudaErrors(cudaMemcpy(&h_info, info, sizeof(int), cudaMemcpyDeviceToHost));
if ( 0 != h_info ){
fprintf(stderr, "Error: LU factorization failed, check %d parameter\n", h_info);
}
checkCudaErrors(cudaMemcpy(x, b, sizeof(float)*n, cudaMemcpyDeviceToDevice));
checkCudaErrors(cusolverDnSgetrs(handle, CUBLAS_OP_N, n, 1, A, lda, ipiv, x, n, info));
checkCudaErrors(cudaDeviceSynchronize());
stop = second();
time_solve = stop - start;
fprintf (stdout, "timing: LU = %10.6f sec\n", time_solve);
if (info ) { checkCudaErrors(cudaFree(info )); }
if (buffer) { checkCudaErrors(cudaFree(buffer)); }
if (A ) { checkCudaErrors(cudaFree(A)); }
if (ipiv ) { checkCudaErrors(cudaFree(ipiv));}
return 0;
}
void linearSolverSVD(
cusolverDnHandle_t handle,
int n,
const float *Acopy,
int lda,
const float *bcopy,
float *x)
{
cublasHandle_t cublasHandle = NULL; // used in residual evaluation
int m = lda;
int bufferSize = 0;
int *info = NULL;
int h_info = 0;
float start, stop;
float time_solve;
const float one = 1.0;
// float U[lda*m]; // m-by-m unitary matrix
// float VT[lda*n]; // n-by-n unitary matrix
// float S[n]; //singular value
float *d_A = NULL; float *d_SI = NULL;
float *d_b = NULL; float *d_S = NULL;
float *d_U = NULL; float *d_VT = NULL;
float *d_work = NULL;
float *d_rwork = NULL;
float *d_W = NULL;
signed char jobu = 'A'; // all m columns of U
signed char jobvt = 'A'; // all n columns of VT
// step 1: create cusolverDn/cublas handle
checkCudaErrors(cublasCreate(&cublasHandle));
checkCudaErrors(cudaMalloc((void**)&d_A , sizeof(float)*lda*n)); \
checkCudaErrors(cudaMalloc((void**)&d_b , sizeof(float)*m));
checkCudaErrors(cudaMalloc((void**)&d_S , sizeof(float)*n));
checkCudaErrors(cudaMalloc((void**)&d_SI , sizeof(float)*lda*n));
checkCudaErrors(cudaMalloc((void**)&d_U , sizeof(float)*lda*m));
checkCudaErrors(cudaMalloc((void**)&d_VT , sizeof(float)*lda*n));
checkCudaErrors(cudaMalloc((void**)&info, sizeof(int)));
checkCudaErrors(cudaMalloc((void**)&d_W , sizeof(float)*lda*n));
checkCudaErrors(cudaMemcpy(d_A, Acopy, sizeof(float)*lda*n, cudaMemcpyDeviceToDevice)); //gesvd destroys d_A on exit
checkCudaErrors(cudaMemcpy(d_b, bcopy, sizeof(float)*m, cudaMemcpyDeviceToDevice));
// checkMatrix(m, n, d_SI, lda, "zero_SI");
// checkMatrix(m, n , d_A, lda, "SVD_AtA");
// checkArray(d_b, m, "SVD_Atb");
checkCudaErrors(cusolverDnSgesvd_bufferSize( handle, m, n, &bufferSize ));
checkCudaErrors(cudaMalloc((void**)&d_work , sizeof(float)*bufferSize));
start = second();
checkCudaErrors(cusolverDnSgesvd(
handle, jobu, jobvt, m, n, d_A, lda, d_S, d_U, lda, d_VT, lda, d_work, bufferSize, d_rwork, info));
//checkCudaErrors(cudaDeviceSynchronize());
// checkCudaErrors(cudaMemcpy(U , d_U , sizeof(float)*lda*m, cudaMemcpyDeviceToHost));
// checkCudaErrors(cudaMemcpy(VT, d_VT, sizeof(float)*lda*n, cudaMemcpyDeviceToHost));
// checkCudaErrors(cudaMemcpy(S , d_S , sizeof(float)*n , cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(&h_info, info, sizeof(int), cudaMemcpyDeviceToHost));
if ( 0 != h_info ){
fprintf(stderr, "Error: SVD failed, check %d parameter\n", h_info);
}
// int BLOCK_DIM_X = 32; int BLOCK_DIM_Y = 32;
// dim3 blockDim(BLOCK_DIM_X, BLOCK_DIM_Y);
// dim3 gridDim((n + BLOCK_DIM_X - 1) / BLOCK_DIM_X, (m + BLOCK_DIM_Y - 1) / BLOCK_DIM_Y);
// initSIGPU<<<gridDim, blockDim>>>(d_SI, d_S, m, n);
float epsilon = 1.e-9;
printf("epsilon = %f \n", epsilon);
initSI<float>(d_SI, d_S, m, n, epsilon, 256);
//int initStat = initSICPU(d_SI, d_S, m, n, epsilon);
// U*S*V*x=b; x = VT*Si*UT*b
// checkMatrix(m, n, d_SI, lda, "SVD_SI");
// checkArray(d_S, n, "dS");
// checkMatrix(m, m, d_U, lda, "SVD_U");
// checkMatrix(n, n, d_VT, lda, "SVD_VT");
float al = 1.0;// al =1
float bet = 0.0;// bet =0
// checkArray(d_b, n, "db");
checkCudaErrors(cublasSgemv(cublasHandle,CUBLAS_OP_T, m, m, &al,d_U, m, d_b,1,&bet,d_b,1));
// checkArray(d_b, n, "dUtb");
checkCudaErrors(cublasSgemv(cublasHandle,CUBLAS_OP_N, m, n, &al,d_SI, m, d_b,1,&bet,d_b,1));
// checkArray(d_b, n, "dSiUtb");
checkCudaErrors(cublasSgemv(cublasHandle,CUBLAS_OP_T, n, n, &al,d_VT, n, d_b, 1,&bet,x,1));
checkCudaErrors(cudaDeviceSynchronize());
stop = second();
time_solve = stop - start;
fprintf (stdout, "timing: SVD = %10.6f sec\n", time_solve);
// checkArray(x, 20, "d_x");
if (d_A ) cudaFree(d_A);
if (d_S ) cudaFree(d_S);
if (d_SI ) cudaFree(d_SI);
if (d_U ) cudaFree(d_U);
if (d_VT ) cudaFree(d_VT);
if (info) cudaFree(info);
if (d_work ) cudaFree(d_work);
if (d_rwork) cudaFree(d_rwork);
if (d_W ) cudaFree(d_W);
if (cublasHandle ) cublasDestroy(cublasHandle);
// if (cusolverH) cusolverDnDestroy(cusolverH);
}
/*
* solve A*x = b by QR
*
*/
int linearSolverQR(
cusolverDnHandle_t handle,
int n,
const float *Acopy,
int lda,
const float *b,
float *x)
{
cublasHandle_t cublasHandle = NULL; // used in residual evaluation
int bufferSize = 0;
int bufferSize_geqrf = 0;
int bufferSize_ormqr = 0;
int *info = NULL;
float *buffer = NULL;
float *A = NULL;
float *tau = NULL;
int h_info = 0;
float start, stop;
float time_solve;
const float one = 1.0;
checkCudaErrors(cublasCreate(&cublasHandle));
checkCudaErrors(cusolverDnSgeqrf_bufferSize(handle, n, n, (float*)Acopy, lda, &bufferSize_geqrf));
checkCudaErrors(cusolverDnSormqr_bufferSize(
handle,
CUBLAS_SIDE_LEFT,
CUBLAS_OP_T,
n,
1,
n,
A,
lda,
NULL,
x,
n,
&bufferSize_ormqr));
//printf("buffer_geqrf = %d, buffer_ormqr = %d \n", bufferSize_geqrf, bufferSize_ormqr);
bufferSize = (bufferSize_geqrf > bufferSize_ormqr)? bufferSize_geqrf : bufferSize_ormqr ;
checkCudaErrors(cudaMalloc(&info, sizeof(int)));
checkCudaErrors(cudaMalloc(&buffer, sizeof(float)*bufferSize));
checkCudaErrors(cudaMalloc(&A, sizeof(float)*lda*n));
checkCudaErrors(cudaMalloc ((void**)&tau, sizeof(float)*n));
// prepare a copy of A because getrf will overwrite A with L
checkCudaErrors(cudaMemcpy(A, Acopy, sizeof(float)*lda*n, cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemset(info, 0, sizeof(int)));
start = second();
start = second();
// compute QR factorization
checkCudaErrors(cusolverDnSgeqrf(handle, n, n, A, lda, tau, buffer, bufferSize, info));
checkCudaErrors(cudaMemcpy(&h_info, info, sizeof(int), cudaMemcpyDeviceToHost));
if ( 0 != h_info ){
fprintf(stderr, "Error: QR factorization failed, check %d parameter\n", h_info);
}
checkCudaErrors(cudaMemcpy(x, b, sizeof(float)*n, cudaMemcpyDeviceToDevice));
// compute Q^T*b
checkCudaErrors(cusolverDnSormqr(
handle,
CUBLAS_SIDE_LEFT,
CUBLAS_OP_T,
n,
1,
n,
A,
lda,
tau,
x,
n,
buffer,
bufferSize,
info));
// x = R \ Q^T*b
checkCudaErrors(cublasStrsm(
cublasHandle,
CUBLAS_SIDE_LEFT,
CUBLAS_FILL_MODE_UPPER,
CUBLAS_OP_N,
CUBLAS_DIAG_NON_UNIT,
n,
1,
&one,
A,
lda,
x,
n));
checkCudaErrors(cudaDeviceSynchronize());
stop = second();
time_solve = stop - start;
fprintf (stdout, "timing: QR = %10.6f sec\n", time_solve);
if (cublasHandle) { checkCudaErrors(cublasDestroy(cublasHandle)); }
if (info ) { checkCudaErrors(cudaFree(info )); }
if (buffer) { checkCudaErrors(cudaFree(buffer)); }
if (A ) { checkCudaErrors(cudaFree(A)); }
if (tau ) { checkCudaErrors(cudaFree(tau)); }
return 0;
}
DnSolver::DnSolver (int rows_, int cols_)
{
DnSolver::~DnSolver();
rowsA = rows_;
colsA = cols_;
lda = rows_;
checkCudaErrors(cusolverDnCreate(&handle));
checkCudaErrors(cublasCreate(&cublasHandle));
checkCudaErrors(cudaStreamCreate(&stream));
checkCudaErrors(cusparseCreate(&cusparseHandle));
checkCudaErrors(cusparseCreateMatDescr(&descrA));
checkCudaErrors(cusolverDnSetStream(handle, stream));
checkCudaErrors(cublasSetStream(cublasHandle, stream));
checkCudaErrors(cusparseSetStream(cusparseHandle, stream));
h_A = (float*)malloc(sizeof(float)*lda*colsA);
h_x = (float*)malloc(sizeof(float)*colsA);
h_b = (float*)malloc(sizeof(float)*rowsA);
checkCudaErrors(cudaMalloc((void **)&d_x, sizeof(float)*colsA));
checkCudaErrors(cudaMalloc((void **)&d_b, sizeof(float)*rowsA));
checkCudaErrors(cudaMalloc((void **)&d_A, sizeof(float)*lda*colsA));
}
void DnSolver::from_dense(float* array_host_, float* rhs_){
h_A = array_host_;
h_b = rhs_;
checkCudaErrors(cudaMemcpy(d_A, h_A, sizeof(float)*lda*colsA, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_b, h_b, sizeof(float)*rowsA, cudaMemcpyHostToDevice));
}
void DnSolver::from_csr(int* indptr_, int* indices_, float* data_, float* rhs_){
h_b = rhs_;
h_csrRowPtrA = indptr_;
h_csrColIndA = indices_;
h_csrValA = data_;
baseA = h_csrRowPtrA[0];
nnzA = h_csrRowPtrA[rowsA] - baseA;
cusparseStatus_t cpstat;
//checkMatrix(nnzA, 1, h_csrValA, nnzA, "h_valA");
cusparseSetMatType(descrA,CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(descrA,CUSPARSE_INDEX_BASE_ZERO);
if (d_csrRowPtrA == NULL ){
printf("allocating pointers \n");
checkCudaErrors(cudaMalloc((void **)&d_csrRowPtrA, sizeof(int)*(rowsA+1)));
checkCudaErrors(cudaMalloc((void **)&d_csrColIndA, sizeof(int)*nnzA));
checkCudaErrors(cudaMalloc((void **)&d_csrValA, sizeof(float)*nnzA));
}
checkCudaErrors(cudaMemcpy(d_csrRowPtrA, h_csrRowPtrA, sizeof(int)*(rowsA+1), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_csrColIndA, h_csrColIndA, sizeof(int)*nnzA, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_csrValA, h_csrValA, sizeof(float)*nnzA, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_b, h_b, sizeof(float)*rowsA, cudaMemcpyHostToDevice));
cpstat = cusparseScsr2dense(
cusparseHandle,
rowsA, colsA,
descrA,
d_csrValA,
d_csrRowPtrA,
d_csrColIndA,
d_A, rowsA);
if (cpstat != CUSPARSE_STATUS_SUCCESS) {
printf ("%s\n", "CuSparse CSR to dense conversion failed");
return;
}
//if (d_csrValA ) { checkCudaErrors(cudaFree(d_csrValA)); }
//if (d_csrRowPtrA) { checkCudaErrors(cudaFree(d_csrRowPtrA)); }
//if (d_csrColIndA) { checkCudaErrors(cudaFree(d_csrColIndA)); }
}
void DnSolver::from_coo(int* indptr_, int* indices_, float* data_, int nnz_, float* rhs_){
h_b = rhs_;
h_cooRowIndA = indptr_;
h_csrColIndA = indices_;
h_csrValA = data_;
nnzA = nnz_;
cusparseStatus_t cpstat;
//checkMatrix(nnzA, 1, h_csrValA, nnzA, "h_valA");
cusparseSetMatType(descrA,CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(descrA,CUSPARSE_INDEX_BASE_ZERO);
if (d_csrRowPtrA == NULL ){
printf("allocating pointers \n");
checkCudaErrors(cudaMalloc((void **)&d_cooRowIndA, sizeof(int)*nnzA));
checkCudaErrors(cudaMalloc((void **)&d_csrRowPtrA, sizeof(int)*(rowsA+1)));
checkCudaErrors(cudaMalloc((void **)&d_csrColIndA, sizeof(int)*nnzA));
checkCudaErrors(cudaMalloc((void **)&d_csrValA, sizeof(float)*nnzA));
}
checkCudaErrors(cudaMemcpy(d_cooRowIndA, h_cooRowIndA, sizeof(int)*nnzA, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_csrColIndA, h_csrColIndA, sizeof(int)*nnzA, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_csrValA, h_csrValA, sizeof(float)*nnzA, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_b, h_b, sizeof(float)*rowsA, cudaMemcpyHostToDevice));
cpstat = cusparseXcoo2csr(cusparseHandle,
d_cooRowIndA, nnzA, rowsA,
d_csrRowPtrA,
CUSPARSE_INDEX_BASE_ZERO);
cpstat = cusparseScsr2dense(
cusparseHandle,
rowsA, colsA,
descrA,
d_csrValA,
d_csrRowPtrA,
d_csrColIndA,
d_A, rowsA);
if (cpstat != CUSPARSE_STATUS_SUCCESS) {
printf ("%s\n", "CuSparse CSR to dense conversion failed");
return;
}
}
void DnSolver::solve(int multFunc, int Func) {
//printf("step 6: compute AtA \n");
cublasStatus_t cbstat;
cusparseStatus_t cpstat;
//if (dAtA == NULL){
// checkCudaErrors(cudaMalloc(&dAtA, sizeof(float)*colsA*colsA));
// checkCudaErrors(cudaMalloc((void **)&d_Atb, sizeof(float)*colsA));
//}
float* dAtA;
float* d_Atb;
checkCudaErrors(cudaMalloc((void **)&dAtA, sizeof(float)*colsA*colsA));
checkCudaErrors(cudaMalloc((void **)&d_Atb, sizeof(float)*colsA));
if ( multFunc == 0){
printf("using sparse multiply\n");
cpstat = cusparseScsrmm(cusparseHandle,
CUSPARSE_OPERATION_TRANSPOSE,
rowsA,colsA,colsA,nnzA, &al,
descrA,
d_csrValA,
d_csrRowPtrA,
d_csrColIndA,
d_A,rowsA,
&bet,dAtA,colsA);
cpstat = cusparseScsrmv(cusparseHandle,
CUSPARSE_OPERATION_TRANSPOSE,
rowsA,colsA, nnzA, &al,
descrA,
d_csrValA,
d_csrRowPtrA,
d_csrColIndA,
d_b,&bet,d_Atb);
}
else {
cbstat = cublasSgemm(cublasHandle,
CUBLAS_OP_T,CUBLAS_OP_N,
colsA,colsA,rowsA,&al,
d_A,rowsA,d_A,rowsA,
&bet,dAtA,colsA);
cbstat = cublasSgemv(cublasHandle,
CUBLAS_OP_T,
rowsA,colsA,&al,
d_A,rowsA,d_b,
1,&bet,d_Atb,1);
}
//print out for debug
//checkMatrix(rowsA, colsA , d_A, lda, "A");
//checkMatrix(rowsA, 1 , d_b, rowsA, "b");
//checkMatrix(colsA, colsA , dAtA, lda, "AtA");
//checkMatrix(colsA, 1 , d_Atb, lda, "Atb");
//if (cublasHandle) { checkCudaErrors(cublasDestroy(cublasHandle)); }
//checkCudaErrors(cublasCreate(&cublasHandle));
//checkCudaErrors(cublasSetStream(cublasHandle, stream));
//printf("step 8: solves AtA*x = At*b \n");
if ( 0 == Func )
{
linearSolverQR(handle, colsA, dAtA, colsA, d_Atb, d_x);
}
else if ( 1 == Func )
{
linearSolverCHOL(handle, colsA, dAtA, colsA, d_Atb, d_x);
}
else if ( 2 == Func )
{
linearSolverLU(handle, colsA, dAtA, colsA, d_Atb, d_x);
}
else if ( 3 == Func )
{
linearSolverSVD(handle, colsA, dAtA, colsA, d_Atb, d_x);
}
else
{
fprintf(stderr, "Error: %d is unknown function\n", Func);
exit(EXIT_FAILURE);
}
if (dAtA) { checkCudaErrors(cudaFree(dAtA)); }
if (d_Atb) { checkCudaErrors(cudaFree(d_Atb)); }
//if (d_A) { checkCudaErrors(cudaFree(d_A)); }
//if (d_b) { checkCudaErrors(cudaFree(d_b)); }
}
void DnSolver::solve_Axb(int Func) {
if ( 0 == Func )
{
linearSolverQR(handle, colsA, d_A, colsA, d_b, d_x);
}
else if ( 1 == Func )
{
linearSolverCHOL(handle, colsA, d_A, colsA, d_b, d_x);
}
else if ( 2 == Func )
{
linearSolverLU(handle, colsA, d_A, colsA, d_b, d_x);
}
else if ( 3 == Func )
{
linearSolverSVD(handle, colsA, d_A, colsA, d_b, d_x);
}
else
{
fprintf(stderr, "Error: %d is unknown function\n", Func);
exit(EXIT_FAILURE);
}
//if (d_A) { checkCudaErrors(cudaFree(d_A)); }
//if (d_b) { checkCudaErrors(cudaFree(d_b)); }
}
void DnSolver::retrieve_to(float* h_x)
{
checkCudaErrors(cudaMemcpy(h_x, d_x, sizeof(float)*colsA, cudaMemcpyDeviceToHost));
//checkCudaErrors(cudaFree(d_x));
//if (d_A) { checkCudaErrors(cudaFree(d_A)); }
//if (d_b) { checkCudaErrors(cudaFree(d_b)); }
}
DnSolver::~DnSolver()
{
if (handle) { checkCudaErrors(cusolverDnDestroy(handle)); }
if (cublasHandle) { checkCudaErrors(cublasDestroy(cublasHandle)); }
if (cusparseHandle) { checkCudaErrors(cusparseDestroy(cusparseHandle)); }
if (descrA) { checkCudaErrors(cusparseDestroyMatDescr(descrA)); }
if (stream) { checkCudaErrors(cudaStreamDestroy(stream)); }
if (h_A) { free(h_A); }
if (h_x) { free(h_x); }
if (h_b) { free(h_b); }
if (h_csrValA ) { free(h_csrValA); }
if (h_csrRowPtrA) { free(h_csrRowPtrA); }
if (h_csrColIndA) { free(h_csrColIndA); }
if (d_A) { checkCudaErrors(cudaFree(d_A)); }
if (d_x) { checkCudaErrors(cudaFree(d_x)); }
if (d_b) { checkCudaErrors(cudaFree(d_b)); }
if (d_csrValA ) { checkCudaErrors(cudaFree(d_csrValA)); }
if (d_csrRowPtrA) { checkCudaErrors(cudaFree(d_csrRowPtrA)); }
if (d_csrColIndA) { checkCudaErrors(cudaFree(d_csrColIndA)); }
}
|
d6e847d0e63d5a138c0bc3d444729c250cee00dc.hip | // !!! This is a file automatically generated by hipify!!!
#include "util/blitz_gpu_function.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <rocblas.h>
#include <cudnn.h>
#include <hiprand/hiprand_kernel.h>
namespace blitz {
boost::scoped_ptr<hipblasHandle_t> CuBlasHandle::instance_(0);
boost::once_flag CuBlasHandle::flag_ = BOOST_ONCE_INIT;
template<>
void BlitzGPUGemm(const bool transa, const bool transb,
const int M, const int N, const int K,
float* A, float* B, float* C, float alpha, float beta) {
hipblasOperation_t TransA = transa ? HIPBLAS_OP_T : HIPBLAS_OP_N;
int lda = transa ? M : K;
hipblasOperation_t TransB = transb ? HIPBLAS_OP_T : HIPBLAS_OP_N;
int ldb = transb ? K : N;
hipblasSgemm(CuBlasHandle::GetInstance(), TransB, TransA, N, M, K,
&alpha, B, ldb, A, lda, &beta, C, N);
}
template<>
void BlitzGPUGemm(const bool transa, const bool transb,
const int M, const int N, const int K,
double* A, double* B, double* C, double alpha, double beta) {
hipblasOperation_t TransA = transa ? HIPBLAS_OP_T : HIPBLAS_OP_N;
int lda = transa ? M : K;
hipblasOperation_t TransB = transb ? HIPBLAS_OP_T : HIPBLAS_OP_N;
int ldb = transb ? K : N;
hipblasDgemm(CuBlasHandle::GetInstance(), TransB, TransA, N, M, K,
&alpha, B, ldb, A, lda, &beta, C, N);
}
template<>
void BlitzGPUTrans(const int M, const int N, float* A) {
hipblasHandle_t handle;
hipblasStatus_t stat = hipblasCreate(&handle);
const float alpha = 1.0f;
const float beta = 0.0f;
hipblasSgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, M, N,
&alpha, A, N, &beta, A, N, A, N);
}
template<>
void BlitzGPUTrans(const int M, const int N, double* A) {
hipblasHandle_t handle;
hipblasStatus_t stat = hipblasCreate(&handle);
const double alpha = 1.0f;
const double beta = 0.0f;
hipblasDgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, M, N,
&alpha, A, N, &beta, A, N, A, N);
}
template<>
float BlitzGPUASum(const int N, const float* data) {
hipblasHandle_t handle;
hipblasStatus_t stat = hipblasCreate(&handle);
float sum = 0.0f;
hipblasSasum(handle, N, data, 1, &sum);
return sum;
}
template<>
double BlitzGPUASum(const int N, const double* data) {
hipblasHandle_t handle;
hipblasStatus_t stat = hipblasCreate(&handle);
double sum = 0.0f;
hipblasDasum(handle, N, data, 1, &sum);
return sum;
}
template<>
void BlitzGenerateNormal(hiprandGenerator_t* gen, float* data,
const int size, const float loc, const float scale) {
hiprandGenerateNormal(*gen, data, size, loc, scale);
}
template<>
void BlitzGenerateNormal(hiprandGenerator_t* gen, double* data,
const int size, const double loc, const double scale) {
hiprandGenerateNormalDouble(*gen, data, size, loc, scale);
}
template<>
void BlitzGenerateUniform(hiprandGenerator_t* gen, float* data, const int size) {
hiprandGenerateUniform(*gen, data, size);
}
template<>
void BlitzGenerateUniform(hiprandGenerator_t* gen, double* data, const int size) {
hiprandGenerateUniformDouble(*gen, data, size);
}
} // namespace blitz
| d6e847d0e63d5a138c0bc3d444729c250cee00dc.cu | #include "util/blitz_gpu_function.h"
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <cublas.h>
#include <cudnn.h>
#include <curand_kernel.h>
namespace blitz {
boost::scoped_ptr<cublasHandle_t> CuBlasHandle::instance_(0);
boost::once_flag CuBlasHandle::flag_ = BOOST_ONCE_INIT;
template<>
void BlitzGPUGemm(const bool transa, const bool transb,
const int M, const int N, const int K,
float* A, float* B, float* C, float alpha, float beta) {
cublasOperation_t TransA = transa ? CUBLAS_OP_T : CUBLAS_OP_N;
int lda = transa ? M : K;
cublasOperation_t TransB = transb ? CUBLAS_OP_T : CUBLAS_OP_N;
int ldb = transb ? K : N;
cublasSgemm_v2(CuBlasHandle::GetInstance(), TransB, TransA, N, M, K,
&alpha, B, ldb, A, lda, &beta, C, N);
}
template<>
void BlitzGPUGemm(const bool transa, const bool transb,
const int M, const int N, const int K,
double* A, double* B, double* C, double alpha, double beta) {
cublasOperation_t TransA = transa ? CUBLAS_OP_T : CUBLAS_OP_N;
int lda = transa ? M : K;
cublasOperation_t TransB = transb ? CUBLAS_OP_T : CUBLAS_OP_N;
int ldb = transb ? K : N;
cublasDgemm_v2(CuBlasHandle::GetInstance(), TransB, TransA, N, M, K,
&alpha, B, ldb, A, lda, &beta, C, N);
}
template<>
void BlitzGPUTrans(const int M, const int N, float* A) {
cublasHandle_t handle;
cublasStatus_t stat = cublasCreate_v2(&handle);
const float alpha = 1.0f;
const float beta = 0.0f;
cublasSgeam(handle, CUBLAS_OP_T, CUBLAS_OP_N, M, N,
&alpha, A, N, &beta, A, N, A, N);
}
template<>
void BlitzGPUTrans(const int M, const int N, double* A) {
cublasHandle_t handle;
cublasStatus_t stat = cublasCreate_v2(&handle);
const double alpha = 1.0f;
const double beta = 0.0f;
cublasDgeam(handle, CUBLAS_OP_T, CUBLAS_OP_N, M, N,
&alpha, A, N, &beta, A, N, A, N);
}
template<>
float BlitzGPUASum(const int N, const float* data) {
cublasHandle_t handle;
cublasStatus_t stat = cublasCreate_v2(&handle);
float sum = 0.0f;
cublasSasum_v2(handle, N, data, 1, &sum);
return sum;
}
template<>
double BlitzGPUASum(const int N, const double* data) {
cublasHandle_t handle;
cublasStatus_t stat = cublasCreate_v2(&handle);
double sum = 0.0f;
cublasDasum_v2(handle, N, data, 1, &sum);
return sum;
}
template<>
void BlitzGenerateNormal(curandGenerator_t* gen, float* data,
const int size, const float loc, const float scale) {
curandGenerateNormal(*gen, data, size, loc, scale);
}
template<>
void BlitzGenerateNormal(curandGenerator_t* gen, double* data,
const int size, const double loc, const double scale) {
curandGenerateNormalDouble(*gen, data, size, loc, scale);
}
template<>
void BlitzGenerateUniform(curandGenerator_t* gen, float* data, const int size) {
curandGenerateUniform(*gen, data, size);
}
template<>
void BlitzGenerateUniform(curandGenerator_t* gen, double* data, const int size) {
curandGenerateUniformDouble(*gen, data, size);
}
} // namespace blitz
|
f9d2e3eab92a7fda459bd4b68f8b82f9eb1123e5.hip | // !!! This is a file automatically generated by hipify!!!
//Needs Header Files for the functions; The header file should have both C and CUDA functions
//This file uses 6 hourly data. Each day is 6 hours long and skipping a day means to add 6
//to the counter that counts the timesteps (l).
//The birds start at 00:00 UTC which is 6pm in central time examplewhen there is no day light savings
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <pthread.h>
#include <string.h>
#include <math.h>
#include <float.h>
#include <time.h>
#include <sys/time.h>
#include <stdlib.h>
#include <getopt.h>
#include <math.h>
//#include "birds_CUDA.h"
//#define CUDA_API_PER_THREAD_DEFAULT_STREAM
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#define PI 3.14159
#define LONG_SIZE 429
#define LAT_SIZE 429
#define LINESIZE 15*LONG_SIZE+LONG_SIZE - 3
#define TOTAL_DAYS 122
#define TIMESTEPS_PER_DAY 24
#define TIMESTEPS TOTAL_DAYS*TIMESTEPS_PER_DAY
#define SKIP_TIMESTEPS 0
//This is the number of timesteps that the bird will skip in the beginning to get to the desired
//takeoff time. Since the data starts at 7 pm, the birds will skip the first 23 hours to get to
//6pm.
#define INITIAL_SKIP_TIMESTEPS 23
//The maximum lattitude south that the model cares about bird flight. If birds go below
//that lattitude the model stops
//Counted from the North;
#define MAX_LAT_SOUTH 300
//Stopover days; As of now, if 0 then the bird flies without stopping continiously;
//If 1, then the bird waits for 18 hours after successful 6 hours of flight to fly again
#define STOPOVER_DAYS 0
//#define DESIRED_SPEED 3.6 //Birds want to travel at 10m/s, it is 36km/hr(in the grid it is 3.6 units per hour)
#define DESIRED_SPEED 10.5 //Air speed; Desired speed = flightspeed + windspeed ; Only used in windprofit calculation
#define STD_BIRDANGLE 10.0 //Standard deviation * 6 = the total difference from max to min angle possible
//If STD_BIRDANGLE = 10 then the angle can differ +- (10*6)/2 = +- 30 from mean
#define glCompAcc 1e-8 //If the difference is equal to or less than this then equal
#define MIN_PROFIT -10
//Defining the x-variable size, it's sum and
//sum of squares as needed for slope calculation
#define REGRESSION_HRS 6
//Precipitation (mm/hr) below which birds can fly
#define MAX_PRECIP 2
//HRS_SUM = sum(1 to 12) before. Now has to be sum(1 to 6) = 21
#define HRS_SUM 21
#define HRS_SQUARE_SUM 91
#define DENOM_SLOPE (REGRESSION_HRS * HRS_SQUARE_SUM)-(HRS_SUM * HRS_SUM)
// Barometric pressure
// Bird finds the pressure at the time it leaves and compares it with the data from
// the previous day.
//The angle that the bird flies when it is out at sea and needs to get back to land.
//To make the birds head back directly west the angle must be set to 180.
#define BIRD_SEA_ANGLE 180
//The maximum number of hours that the birds can fly continiously
#define BIRD_HRS_LIMIT 72
#define TOTAL_DATA_FILES 9
//Total number of data files or variables bird flight depends on;Does not include direction files and land water data
#define NUM_DATA_FILES 6
#define THREADS_PER_BLOCK 512
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
//------------------------------Notes---------------------------------------------------------------------------------------
/*
Altitude = 850 millibars
Year = 2009
22 Jan 2015 No upper limit to the bird flight speed currently; Birds can fly well above 10m/s
Precipitation = millimeters
*/
//--------------------------------------------------------------------------------------------------------------------------
__global__ void setup_kernel(unsigned int seed,hiprandState_t *states);
__global__ void generate_kernel(hiprandState_t *states,float* numbers,float* angles);
__global__ void bird_movement(float* rowArray,float* colArray,int NumOfBirds,long int start_l,long int cur_l,long int max_timesteps,float* udata,float* vdata,float* u10data,
float* v10data,float* d_dirData,float* rand_norm_nums,float* precipData,float* pressureData,float* lwData,uint8_t* birdStatus);
__device__ float bilinear_interpolation_SmallData(float x,float y,float* data_array);
__device__ float bilinear_interpolation_LargeData(float x,float y,float* data_array,long l);
__device__ float WrappedNormal (float MeanAngle,float AngStdDev);
__device__ float getProfitValue(float u_val,float v_val,float dirVal,float dir_u,float dir_v);
__device__ long int bird_AtSea(int id,int arrLength,float* rowArray,float* colArray,long int start_l,long int l,float* udata,float* vdata,float* lwData,uint8_t* birdStatus);
static void* write_dataVars(void* arguments);
static void* read_dataFiles(void* arguments);
long int convert_to_month(int month,int day);
static void HandleError( hipError_t err,const char *file, int line );
long Get_GPU_devices();
//-------------------------------------------------------------------------------------------------------------------------------------
struct file_IO {
FILE *fp;
float* inpVals;
float* streamArray;
size_t dataSize;
}inpStruct[8];
//-------------------------------------------------------------------------------------------------------------------------------------
//Global Variables
float* udata;
float* vdata;
float* u10data;
float* v10data;
float* precipData;
float* pressureData;
float* dir_u;
float* dir_v;
float* lwData;
float* dirData;
//###########################################################################################################################################//
__device__ long int bird_AtSea(int id,int arrLength,float* rowArray,float* colArray,long int start_l,long int l,float* udata,float* vdata,float* lwData,uint8_t* birdStatus)
{
printf("Inside the bird_atSea() function\n");
//long int count_timeSteps = l;
float u_val,v_val,u_dir,v_dir,pos_row,pos_col;
int index = 0;
pos_row = rowArray[id * arrLength + l ];
pos_col = colArray[id * arrLength + l ];
printf("After getting the positions of row and columns\n");
//index = lwData[(int)(rintf(pos_row)) * LONG_SIZE + (int)(rintf(pos_col))];
printf("After getting index\n");
float count_timeSteps = 0;
long int bckp_l;
//int i;
//Does not check the first time?
//while(index != 1){
for(count_timeSteps = 0;count_timeSteps<(BIRD_HRS_LIMIT - 10);count_timeSteps++,l++){
/** Bilinear interpolation for u and v data **/
u_val = bilinear_interpolation_LargeData(pos_col,pos_row,udata,l-start_l);
v_val = bilinear_interpolation_LargeData(pos_col,pos_row,vdata,l-start_l);
u_dir = DESIRED_SPEED * cosf(BIRD_SEA_ANGLE * (PI/180));
v_dir = DESIRED_SPEED * sinf(BIRD_SEA_ANGLE * (PI/180));
/** Desired speed needs to change in the case of column position or the birds
will not fly west **/
pos_row = pos_row + (v_val + v_dir) * 0.36 * -1;
pos_col = pos_col + (u_val + u_dir) * 0.36;
//position[(l-l_start)* PosRowLen + (id *2)] = pos_row ;
//position[(l-l_start)* PosRowLen + (id *2) + 1] = pos_col ;
rowArray[id * arrLength + l + 1] = pos_row;
colArray[id * arrLength + l + 1] = pos_col;
printf("Storing row and column data\n");
index = lwData[__float2int_rd(pos_row * LAT_SIZE + pos_col)];
if(index == 1){
//l--;
bckp_l = l;
//This takes it back to the starting time of the previous day
l = l - (6 + 4 + count_timeSteps);
//Use casting to float to get round up value;Add to l
//Then do, l=l+ roundup((float)((count_timeSteps + 10)/24)) * 24; __float2ull_ru
l = l + __float2ull_ru((count_timeSteps + 10)/24) * 24 + 24 * STOPOVER_DAYS;
for(bckp_l;bckp_l <= l;bckp_l++){
rowArray[id * arrLength + bckp_l + 1 ] = pos_row;
colArray[id * arrLength + bckp_l + 1 ] = pos_col;
}
return l;
}
if(pos_row >= MAX_LAT_SOUTH){
printf("Bird reached maximum lattitude; Exiting program\n");
birdStatus[id] = 0;
return -1;
}
}
if(count_timeSteps >= (BIRD_HRS_LIMIT-10)){
printf("Dead Bird! Bird has been flying for 80 hours straight!\n");
birdStatus[id] = 0;
return -1;
}
return l;
}
//###########################################################################################################################################//
__device__ float getProfitValue(float u_val,float v_val,float dirVal,float dir_u,float dir_v)
{
/** All wind data in m/s **/
float diffAngle,magnitude,magnitude_squared,tailComponent,crossComponent,profit_value;
tailComponent = 0;
magnitude = hypotf(u_val,v_val);
magnitude_squared = magnitude * magnitude;
/** Getting the tail component of the wind; or the component of the wind in the desired direction of flight
From formula of getting the vector projection of wind onto the desired direction **/
tailComponent = (dir_v * v_val + dir_u * u_val);
tailComponent = tailComponent/hypotf(dir_u,dir_u);
/** DiffAngle is the angle between the desired direction of the bird and the direction of the wind
DiffAngle has to be calculated such that both the vectors are pointing away from where they meet.
Using the formula to get angle between two vectors **/
diffAngle = acosf( (u_val*dir_u + v_val * dir_v)/ (( hypotf(u_val,v_val) * hypotf(dir_u,dir_v) )) ) * 180/PI;
/** Separate profit value methods have to be used if the tail component is less that equal to or greater than the desired speed of the birds **/
if(tailComponent <= DESIRED_SPEED) {
profit_value = (DESIRED_SPEED * DESIRED_SPEED) + magnitude_squared - 2 * DESIRED_SPEED * magnitude * cosf(diffAngle * PI/180);
profit_value = DESIRED_SPEED - sqrtf(profit_value);
}
else {
/** Perpendicular to a vector (x,y) is (y,-x) or (-y,x) Cross component is always positive **/
crossComponent = fabsf((-dir_v*u_val + dir_u*v_val)/hypotf(dir_v,dir_u));
profit_value = tailComponent - crossComponent;
}
return profit_value;
}
//###########################################################################################################################################//
__device__ float bilinear_interpolation_SmallData(float x,float y,float* data_array)
{
float x1,y1,x2,y2;
float Q11,Q12,Q21,Q22,R1,R2,R;
//float val_x1,val_x2,val_y1,val_y2;
x1 = floorf(x);
x2 = ceilf(x);
y1 = floorf(y);
y2 = ceilf(y);
R = 0;
Q11 = data_array[(int)(y1 * LONG_SIZE + x1)];
Q12 = data_array[(int)(y2 * LONG_SIZE + x1)];
Q21 = data_array[(int)(y1 * LONG_SIZE + x2)];
Q22 = data_array[(int)(y2 * LONG_SIZE + x2)];
R1 = Q11 + (x - x1)*(Q21 - Q11);
R2 = Q12 + (x - x1)*(Q22 - Q12);
R = R1 + (y - y1)*(R2 - R1);
//printf("Q11:%f,Q12:%f,Q21:%f,Q22:%f; And Value=%f\n",Q11,Q12,Q21,Q22,value);
return R;
}
//###########################################################################################################################################//
__device__ float bilinear_interpolation_LargeData(float x,float y,float* data_array,long l)
{
float x1,y1,x2,y2;
float Q11,Q12,Q21,Q22,R1,R2,R;
//float val_x1,val_x2,val_y1,val_y2;
x1 = floorf(x);
x2 = ceilf(x);
y1 = floorf(y);
y2 = ceilf(y);
R = 0;
Q11 = data_array[(int)(l * LAT_SIZE * LONG_SIZE + y1 * LONG_SIZE + x1) ];
Q12 = data_array[(int)(l * LAT_SIZE * LONG_SIZE + y2 * LONG_SIZE + x1) ];
Q21 = data_array[(int)(l * LAT_SIZE * LONG_SIZE + y1 * LONG_SIZE + x2) ];
Q22 = data_array[(int)(l * LAT_SIZE * LONG_SIZE + y2 * LONG_SIZE + x2) ];
R1 = Q11 + (x - x1)*(Q21 - Q11);
R2 = Q12 + (x - x1)*(Q22 - Q12);
R = R1 + (y - y1)*(R2 - R1);
//printf("Q11:%f,Q12:%f,Q21:%f,Q22:%f; And Value=%f\n",Q11,Q12,Q21,Q22,value);
return R;
}
//###########################################################################################################################################//
__global__ void setup_kernel(unsigned int seed,hiprandState_t *states)
{
//Thread indices
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
//int id = y * LONG_SIZE + x; <<---Needs to change from LONG_SIZE to something else
hiprand_init(seed,id,0,&states[id]);
}
//###########################################################################################################################################//
__global__ void generate_kernel(hiprandState_t *states,float* numbers)
{
//Thread indices
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
// int id = y * LONG_SIZE + x; <<----Needs to change from LONG_SIZE
numbers[id] = hiprand_normal(&states[id]);
}
//###########################################################################################################################################//
__device__ float WrappedNormal(int id,float MeanAngle,float AngStdDev,float* rand_norm_nums)
{
float z,x,y,u1,u2;
u1 = rand_norm_nums[id * TIMESTEPS * 2 + cur_timestep];
u2 = rand_norm_nums[id * TIMESTEPS * 2 + TIMESTEPS + cur_timestep];
while(1){
z = 1.715538 * (u1 - 0.5)/u2;
x = 0.25 * z * z;
if((x - (1- u2)) < glCompAcc){
break;
}else if(x -(-logf(u2)) < glCompAcc){
break;
}
}
y = AngStdDev * z + MeanAngle;
if((y - 360) > (-glCompAcc)){
y = y - 360;
}
if(y < 0){
y = 360 + y;
}
return y;
}
//###########################################################################################################################################//
//###########################################################################################################################################//
//###########################################################################################################################################//
__global__ void bird_movement(float* rowArray,float* colArray,int NumOfBirds,long int start_l,long int cur_l,long int max_timesteps,float* udata,float* vdata,float* u10data,float* v10data,
float* dirData,float* rand_norm_nums,float* precipData,float* pressureData,float* lwData,uint8_t* birdStatus)
{
//Thread indices
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
// int id = y * LONG_SIZE + x; <<---- This needs to change
//printf("Inside the kernel\n");
if(id > (NumOfBirds -1)||(birdStatus[id]==0)||(cur_l > max_timesteps)){
return;
}
else{
//Making a local copy of the timstep variable
long int l;
long l_old;
float profit_value,actualAngle,wrappedAngle;
float last_pressure,pressure_sum,pressure_MultSum,slope;
float u_ten,v_ten,u_val,v_val,uDir_value,vDir_value,precip_val;
int k,i;
float pos_row,pos_col;
int arrLength;//Length of the row and column array for each bird
int index;
l = cur_l;
arrLength = (TIMESTEPS + 1);
index = (int)(id * (TIMESTEPS + 1) + l);
slope = 0;
printf("Value of l is %ld\n",l);
// pos_row = id * arrLength + (l - l_start);
printf("Array length per bird is %d\n",arrLength);
printf("id is %d\n",id);
//printf("Current l is: %d\n",current_l);
printf("id * arrayLength is:%d\n",id*arrLength);
printf("Calculated array index value is: %d\n",index);
//return;
//while(l < (TOTAL_DAYS * TIMESTEPS_PER_DAY - 24)){
while(l < max_timesteps){
//current_l = (int)(l -l_start);
printf("Inside the while loop\n");
//printf("Index here is %d\n",id * arrLength + current_l);
//printf("Before printing pos_row and pos_col\n");
printf("Starting pos_row is %f , pos_col is: %f\n",*(rowArray + id * arrLength + l),*(colArray + id * arrLength + l));
printf("After printing pos_row and pos_col\n");
printf("Before any computation; Timestep #: %ld\n",l);
pos_row = rowArray[id * arrLength + l ];
pos_col = colArray[id * arrLength + l];
if((pos_row > LAT_SIZE) || (pos_col >LONG_SIZE)||(pos_row < 0)||(pos_col < 0 )){
birdStatus[id] = 0;
return;
}
//printf("After position calculations\n");
actualAngle = dirData[__float2int_rd(pos_row * LAT_SIZE + pos_col)];
wrappedAngle = WrappedNormal(id,actualAngle,STD_BIRDANGLE,float* rand_norm_nums);
uDir_value = DESIRED_SPEED * cosf(wrappedAngle * (PI/180));
vDir_value = DESIRED_SPEED * sinf(wrappedAngle * (PI/180));
//##################Accesing should be relative; The l values should be adjusted when accesing as the arrays brought in
//start index at 0(?)
printf("Current l is: %ld\n",l);
u_ten = bilinear_interpolation_LargeData(pos_col,pos_row,u10data,l-start_l);
v_ten = bilinear_interpolation_LargeData(pos_col,pos_row,v10data,l-start_l);
profit_value = getProfitValue(u_ten,v_ten,wrappedAngle,uDir_value,vDir_value);
if((profit_value >= MIN_PROFIT) && ((last_pressure>=1009)||(slope >-1))){
//printf("Profit value greater than MIN_PROFIT\n");
for(k=0;k<6 && l<max_timesteps;k++,l++) {
//l = (int)(l -l_start);
u_val = bilinear_interpolation_LargeData(pos_col,pos_row,udata,l-start_l);
v_val = bilinear_interpolation_LargeData(pos_col,pos_row,vdata,l-start_l);
precip_val = bilinear_interpolation_LargeData(pos_col,pos_row,precipData,l-start_l);
//printf("End of bilinear interp for precip\n");
//Getting new position values for row and column
pos_row = rowArray[id * arrLength + l ];
pos_col = colArray[id * arrLength + l ];
//printf("Calculating row and col values\n");
if((pos_row > LAT_SIZE) || (pos_col >LONG_SIZE)||(pos_row < 0)||(pos_col < 0 )){
birdStatus[id] = 0;
return;
}
//Storing the new values
rowArray[id * arrLength + l + 1] = pos_row + (v_val + vDir_value ) * 0.36 * -1;
colArray[id * arrLength + l + 1] = pos_col + (u_val + uDir_value) * 0.36;
//printf("Storing row and col values\n");
printf("6 Hour Flight\tRow: %f,Col:%f\n",rowArray[id * arrLength + l + 1],colArray[id * arrLength + l + 1]);
printf("6 hour flight;Timestep #: %ld\n",l);
}
printf("After 6 hour flight over\n");
pos_row = rowArray[id * arrLength + l];
pos_col = colArray[id * arrLength + l];
printf("After getting row and col values\n");
//printf("End of 6 hour flight\n");
// If the bird is at sea after the first 6 hours of flight
if(lwData[__float2int_rd(pos_row * LAT_SIZE + pos_col)] != 1){
printf("Birds at sea after 6 hours\n");
for(k=6;k<10 && l<max_timesteps;k++,l++){
printf("Timestep # (+4 Hours): %ld\n",l);
uDir_value = DESIRED_SPEED * cosf(wrappedAngle * (PI/180));
vDir_value = DESIRED_SPEED * sinf(wrappedAngle * (PI/180));
u_val = bilinear_interpolation_LargeData(pos_col,pos_row,udata,l-start_l);
v_val = bilinear_interpolation_LargeData(pos_col,pos_row,vdata,l-start_l);
//Getting new position values for row and column and storing it
pos_row += (v_val + vDir_value ) * 0.36 * -1;
pos_col += (u_val + uDir_value) * 0.36;
if((pos_row > LAT_SIZE) || (pos_col >LONG_SIZE)||(pos_row < 0)||(pos_col < 0 )){
return;
}
rowArray[id * arrLength + l + 1] = pos_row;
colArray[id * arrLength + l + 1] = pos_col;
printf("+4 Hour Flight\tRow: %f,Col:%f\n",rowArray[id * arrLength + l + 1],colArray[id * arrLength + l + 1]);
}
// If at sea even after the 4 hours
if(lwData[__float2int_rd(pos_row * LAT_SIZE + pos_col)] != 1){
printf("Birds were at sea even after 10 hours \n");
l = bird_AtSea(id,arrLength,colArray,rowArray,start_l,l,udata,vdata,lwData,birdStatus);
if( l == -1){
return;
}
//printf("After the function bird_AtSea() \n");
}
//printf("End of +4 hours of flight at sea\n");
}else{
for(i=6;i<24;i++,l++){
printf("Timestep # (Not at sea after 6 hours): %ld\n",l);
rowArray[id * arrLength + l + 1] = pos_row;
colArray[id * arrLength + l + 1] = pos_col;
}
}
}
else{
//l += 24;
//l = (int)(l -l_start);
for(i=0;i<18;i++,l++){
printf("Timestep #: %ld\n",l);
rowArray[id * arrLength + l + 1] = pos_row;
colArray[id * arrLength + l + 1] = pos_col;
}
}
l_old = l - REGRESSION_HRS;
pressure_sum = 0;
pressure_MultSum = 0;
//Taking the pressure from 6 hours earlier of the location where the bird landed
for(k=1; (l_old < l) && (k<=REGRESSION_HRS) && (l_old<max_timesteps); l_old++,k++){
pressure_sum += bilinear_interpolation_LargeData(pos_col,pos_row,pressureData,l_old-start_l); //<----------------ERROR HERE
pressure_MultSum += k * bilinear_interpolation_LargeData(pos_col,pos_row,pressureData,l_old-start_l);
//last_pressure is the last day or the day of flight
if(k == REGRESSION_HRS) {
last_pressure = bilinear_interpolation_LargeData(pos_col,pos_row,pressureData,l_old-start_l);
}
}
slope = ((REGRESSION_HRS * pressure_MultSum) - (pressure_sum * HRS_SUM))/(DENOM_SLOPE);
}
}
}
//###########################################################################################################################################//
long Get_GPU_devices()
{
hipDeviceProp_t prop;
int whichDevice,DeviceCount;
long deviceMemory;
HANDLE_ERROR(hipGetDevice(&whichDevice));
HANDLE_ERROR(hipGetDeviceProperties(&prop,whichDevice));
if(!prop.deviceOverlap){
printf("Device does not handle overlaps so streams are not possible\n");
return 0;
}
DeviceCount = 0;
HANDLE_ERROR(hipGetDeviceCount(&DeviceCount));
if(DeviceCount > 0){
printf("%d Devices Found\n",DeviceCount);
}else{
printf("No devices found or error in reading the number of devices\n");
return 0;
}
int i = 0;
//for(int i = 0;i<DeviceCount;i++){
hipDeviceProp_t properties;
HANDLE_ERROR(hipGetDeviceProperties(&properties,i));
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", properties.name);
printf(" Device Global Memory size: %zd MB \n",properties.totalGlobalMem/1000000);
printf("\n");
deviceMemory = properties.totalGlobalMem;
//}
return deviceMemory;
}
//###########################################################################################################################################//
static void* read_dataFiles(void* arguments)
{
struct file_IO *inputArgs;
inputArgs = (struct file_IO *)arguments;
FILE* textFile;
float* dataArray;
textFile = inputArgs->fp;
dataArray = inputArgs->inpVals;
char line[LINESIZE];
memset(line,'\0',sizeof(line));
char tempVal[15];
memset(tempVal,'\0',sizeof(tempVal));
char* startPtr,*endPtr;
long j;
int i;
float Value;
i=0;
j=0;
memset(line,'\0',sizeof(line));
memset(tempVal,'\0',sizeof(tempVal));
i=0;
j=0;
while(fgets(line,LINESIZE,textFile)!=NULL){
startPtr = line;
for(i=0;i<LONG_SIZE;i++){
Value = 0;
memset(tempVal,'\0',sizeof(tempVal));
if(i != (LONG_SIZE - 1)) {
endPtr = strchr(startPtr,',');
strncpy(tempVal,startPtr,endPtr-startPtr);
//printf("%s ",tempVal);
if(strcmp("NaN",tempVal)==0) {
Value = 0.0;
}
else{
Value = atof(tempVal);
}
dataArray[j * LAT_SIZE + i] = Value;
endPtr = endPtr + 1;
startPtr = endPtr;
//printf("%d,%f ",i,Value);
}
else if(i == (LONG_SIZE - 1)){
strcpy(tempVal,startPtr);
if(strcmp("NaN\n",tempVal)==0) {
Value = 0.0;
}
else{
Value = atof(tempVal);
}
dataArray[j * LAT_SIZE + i] = Value;
}
}
j++;
}
return NULL;
}
//###########################################################################################################################################//
static void* write_dataVars(void* arguments)
{
struct file_IO *inputArgs;
inputArgs = (struct file_IO *)arguments;
float* dataArray,*destArray;
size_t totalSize;
long int i;
dataArray = inputArgs->inpVals;
destArray = inputArgs->streamArray;
totalSize = inputArgs->dataSize;
for(i=0;i<totalSize;i++){
destArray[i] = *(dataArray + i);
}
return NULL;
}
//###########################################################################################################################################//
long int convert_to_month(int month,int day)
{
long int index,offset;
if(month == 8){
index = 1; //The data starts in august
}
else if(month == 9){
index = 32; //The data for september starts after 31 days of august
}
else if(month == 10){
index = 62; //The data for october starts after 31+30 days of sept and august respectively.
}
else if(month == 11){
index = 93; //The data for october starts after 31+30+31 days of sept,aug and oct respectively.
}
else{
printf("\n\t\tIncorrect month used\n\t\tUse between August-November inclusive; Only use numbers ; August = 8\n");
return -1;
}
//If 1st or 2nd of August, start at timestep 23 (after 23 hours)
if(((month == 8) && (day == 1))||((month == 8) && (day == 2))){
offset = 23;
//If in August; Gives correct result for starting timestep
}else if (month == 8){
offset = 23 + (day - 1) * TIMESTEPS_PER_DAY ;
//23 added because 1st day only has 23 hours
}else{
offset = 23 + (index - 2) * TIMESTEPS_PER_DAY + (day - 1) * TIMESTEPS_PER_DAY;
}
return offset;
}
//###########################################################################################################################################//
static void HandleError( hipError_t err,const char *file, int line ) {
if (err != hipSuccess) {
printf( "%s in %s at line %d\n", hipGetErrorString( err ),file, line );
// cout << hipGetErrorString(err) << "in" << file << "at line" << line << "\n";
exit( EXIT_FAILURE );
}
}
//###########################################################################################################################################//
//###########################################################################################################################################//
//###########################################################################################################################################//
int main(int argc,char* argv[])
{
//--------------------------Checking for input arguments------------------------------//
char baseFileName[] = "../../Birds_Full/Birds_data/InterpolatedData/";
char yearFileName[80];
char fullFileName[80];
char start_date[12];
char yearStr[4],monthStr[2],dayStr[2];
float starting_row,starting_col;
long int offset_into_data = 0;
int NumOfBirds,year,day,month;
int option;
while ((option = getopt(argc, argv,"y:m:d:r:c:N:")) != -1) {
switch (option) {
case 'y' : year = atoi(optarg);
break;
case 'm' : month = atoi(optarg);
break;
case 'd' : day = atoi(optarg);
break;
case 'r' : starting_row = atof(optarg);
break;
case 'c' : starting_col = atof(optarg);
break;
// case 't' : breadth = atoi(optarg);
// break;
case 'N' : NumOfBirds = atoi(optarg);
break;
default: printf("\nUsage: birds -y Year -m Month(Number) -d DayOfTheMonth -r StartingRow -c StartingCol -N NumberOfBirds\n");
exit(EXIT_FAILURE);
}
}
/** If starting row is greater than or equal the row that we are interested in; Below a particular row we are not interested in the flight of the birds**/
if(starting_row >= MAX_LAT_SOUTH){
printf("\t\tProvided starting row is below the southern most lattitude at which the model is set to stop\n");
printf("\t\tEither change the starting row location and/or MAX_LAT upto which the birds can fly\n");
return -1;
}
//-----------------------------------------------Day-----------------------------------------//
/** Making sure random date is not provided **/
if((day>0) && (day<32)){
sprintf(dayStr,"%d",day);
}else{
printf("\t\t Invalid date provided; Date should be greater than 0 and less than 32\n");
return -1;
}
//-----------------------------------------------Month-----------------------------------------//
/** Making sure month provided is between August and November inclusive **/
if((month < 12) && (month > 7)){
sprintf(monthStr,"%d",month);
}else{
printf("\t\t Invalid month provided; Use between 8 and 11 inclusive\n");
return -1;
}
/** Converting month and day information into number of timesteps; Special case of AUG 1st is also taken care of
Instead of AUG 1 it starts at August 2 (because data starts at 7pm but birds fly at 6pm) **/
offset_into_data = convert_to_month(month,day);
printf("Offset into data is: %ld\n",offset_into_data);
//-----------------------------------------------Year-----------------------------------------//
/** Checking if correct year specified **/
if((year>= 2008) && (year<=2013)){
//Add file location here
sprintf(yearStr,"%d",year);
strcpy(yearFileName,baseFileName);
strcat(yearFileName,yearStr);
strcat(yearFileName,"/");
}
else{
printf("\n\tInvalid year specified\n\tSpecified %d; Use years from 2008 to 2013 in its full format\n",year);
printf("\t\tUsage: birds -y Year -m Month(Number) -d DayOfTheMonth -r StartingRow -c StartingCol -N NumberOfBirds\n");
return -1;
}
strcpy(start_date,yearStr);
strcat(start_date,"/");
strcat(start_date,monthStr);
strcat(start_date,"/");
sprintf(dayStr,"%d",day);
strcat(start_date,dayStr);
//------------Opening position data file where lat and long data will be stored----------------//
FILE *posdataTxt,*vdataTxt,*udataTxt,*v10dataTxt,*u10dataTxt,*precipTxt,*pressureTxt,*lwTxt,*dirTxt;
posdataTxt = fopen("posdata.txt","a");
if(posdataTxt == NULL) {
perror("Cannot open position data file\n");
return -1;
}
//----------------------Opening U850 data file----------------------------//
memset(fullFileName,0,strlen(fullFileName));
strcpy(fullFileName,yearFileName);
strcat(fullFileName,"U850.txt");
printf("U50 filename is %s \n",fullFileName);
udataTxt = fopen(fullFileName,"r");
if(udataTxt == NULL) {
perror("Cannot open file with U850 data\n");
return -1;
}
//------------------------Opening V850 data file--------------------------//
memset(fullFileName,0,strlen(fullFileName));
strcpy(fullFileName,yearFileName);
strcat(fullFileName,"V850.txt");
vdataTxt = fopen(fullFileName,"r");
if(vdataTxt == NULL) {
perror("Cannot open file with V850 data\n");
return -1;
}
//-----------------------Opening U10 data file---------------------------//
//Birds will check the wind at the surface therefore the u and v
//at 10m is required
memset(fullFileName,0,strlen(fullFileName));
strcpy(fullFileName,yearFileName);
strcat(fullFileName,"U10.txt");
u10dataTxt = fopen(fullFileName,"r");
if(u10dataTxt == NULL) {
perror("Cannot open file with U10 data\n");
return -1;
}
//-----------------------Opening V10 data file---------------------------//
memset(fullFileName,0,strlen(fullFileName));
strcpy(fullFileName,yearFileName);
strcat(fullFileName,"V10.txt");
v10dataTxt = fopen(fullFileName,"r");
if(v10dataTxt == NULL) {
perror("Cannot open file with V10 data\n");
return -1;
}
//--------------------Opening PRCP data file------------------------------//
memset(fullFileName,0,strlen(fullFileName));
strcpy(fullFileName,yearFileName);
strcat(fullFileName,"PRCP.txt");
precipTxt = fopen(fullFileName,"r");
if(precipTxt == NULL) {
perror("Cannot open file with PRCP data\n");
return -1;
}
//------------------------Opening MSLP data file--------------------------//
memset(fullFileName,0,strlen(fullFileName));
strcpy(fullFileName,yearFileName);
strcat(fullFileName,"MSLP.txt");
pressureTxt = fopen(fullFileName,"r");
if(pressureTxt == NULL) {
perror("Cannot open file with pressure data!\n");
return -1;
}
//--------------------------Opening Land vs Water File---------------------//
lwTxt = fopen("./Lw_and_Dir/land_water_detail.txt","r");
if(lwTxt == NULL) {
perror("Cannot open file with direction data\n");
return -1;
}
//--------------------------Opening Direction file
//--------------------(Example: ext_crop.txt or extP_crop.txt)-------------//
dirTxt = fopen("./Lw_and_Dir/ext_Final_NewCoordSystem.txt","r");
//dirTxt = fopen("ext_crop.txt","r");
if(dirTxt == NULL) {
perror("Cannot open file with direction data\n");
return -1;
}
//-----------------------------Setting Heap Size,printf buffer size etc--------------------------------------------//
size_t limit;
HANDLE_ERROR(hipDeviceSetLimit(hipLimitPrintfFifoSize, 500 * 1024 * 1024));
hipDeviceGetLimit(&limit,hipLimitPrintfFifoSize);
HANDLE_ERROR(hipDeviceSetLimit(hipLimitMallocHeapSize,(size_t)(6 * LAT_SIZE * LONG_SIZE * TIMESTEPS * sizeof(float))));
//--------------------------Memory Allocation for global arrays containing weather data----------------------------//
float *h_row,*h_col;
float *d_row,*d_col;
float *d_udata,*d_vdata,*d_u10data,*d_v10data,*d_lwData;
float *d_dirData,*d_precipData,*d_pressureData;
uint8_t *h_birdStatus,*d_birdStatus;
dirData = (float*) malloc(LAT_SIZE * LONG_SIZE * sizeof(float));
h_row = (float*) malloc(NumOfBirds * (TIMESTEPS + 1) * sizeof(float));
h_col = (float*) malloc(NumOfBirds * (TIMESTEPS + 1) * sizeof(float));
h_birdStatus = (uint8_t*)malloc(NumOfBirds * sizeof(uint8_t));
udata = (float*)malloc(LAT_SIZE * LONG_SIZE * TIMESTEPS * sizeof(float));
vdata = (float*)malloc(LAT_SIZE * LONG_SIZE * TIMESTEPS * sizeof(float));
u10data = (float*)malloc(LAT_SIZE * LONG_SIZE * TIMESTEPS * sizeof(float));
v10data = (float*)malloc(LAT_SIZE * LONG_SIZE * TIMESTEPS * sizeof(float));
precipData = (float*)malloc(LAT_SIZE * LONG_SIZE * TIMESTEPS * sizeof(float));
pressureData = (float*)malloc(LAT_SIZE * LONG_SIZE * TIMESTEPS * sizeof(float));
lwData = (float*) malloc(LAT_SIZE * LONG_SIZE * sizeof(float));
//------------------------------------------------------------------------------------------------------------------//
/*
HANDLE_ERROR(hipHostMalloc((void**)&udata,LAT_SIZE * LONG_SIZE * TIMESTEPS * sizeof(float)));
HANDLE_ERROR(hipHostMalloc((void**)&vdata,LAT_SIZE * LONG_SIZE * TIMESTEPS * sizeof(float)));
HANDLE_ERROR(hipHostMalloc((void**)&u10data,LAT_SIZE * LONG_SIZE * TIMESTEPS * sizeof(float)));
HANDLE_ERROR(hipHostMalloc((void**)&v10data,LAT_SIZE * LONG_SIZE * TIMESTEPS * sizeof(float)));
HANDLE_ERROR(hipHostMalloc((void**)&precipData,LAT_SIZE * LONG_SIZE * TIMESTEPS * sizeof(float)));
HANDLE_ERROR(hipHostMalloc((void**)&pressureData,LAT_SIZE * LONG_SIZE * TIMESTEPS * sizeof(float)));
HANDLE_ERROR(hipHostMalloc((void**)&lwData,LAT_SIZE * LONG_SIZE * sizeof(float)));
*/
printf("Size of large arrays is %zd\n",sizeof(udata)/sizeof(udata[0]));
printf("Size of large arrays is %ld\n",sizeof(udata)/sizeof(float));
printf("Size of large arrays is %d\n",sizeof(udata)/sizeof(float));
int ii;
for(ii=0;ii<(NumOfBirds * (TIMESTEPS + 1));ii++){
*(h_row + ii) = starting_row;
*(h_col + ii) = starting_col;
}
for(ii=0;ii<NumOfBirds;ii++){
h_birdStatus[ii] = (uint8_t)1;
}
//--------------------------Initializing the structures-------------------------------------------------------------------//
inpStruct[0].fp = vdataTxt;
inpStruct[0].inpVals = vdata;
inpStruct[1].fp = udataTxt;
inpStruct[1].inpVals = udata;
inpStruct[2].fp = v10dataTxt;
inpStruct[2].inpVals = v10data;
inpStruct[3].fp = u10dataTxt;
inpStruct[3].inpVals = u10data;
inpStruct[4].fp = precipTxt;
inpStruct[4].inpVals = precipData;
inpStruct[5].fp = pressureTxt;
inpStruct[5].inpVals = pressureData;
inpStruct[6].fp = lwTxt;
inpStruct[6].inpVals = lwData;
inpStruct[7].fp = dirTxt;
inpStruct[7].inpVals = dirData;
/** Using pthreads to read from the files in parallel**/
pthread_t threads[8];
int i,j;
for(i=0;i<8;i++){
if(pthread_create(&threads[i],NULL,read_dataFiles,(void*)&inpStruct[i]) != 0){
fprintf(stderr,"ERROR: Thread creation using pthreads failed\n");
return -1;
}
}
for(i=0;i<8;i++){
if(pthread_join(threads[i],NULL)!=0){
fprintf(stderr,"ERROR: Thread join failed\n");
return -1;
}
}
printf("End of parallel data read\n");
//-----------------------------------Getting Random Values-------------------------------------------//
int DeviceCount;
float *rand_norm_nums;
hiprandState_t* states;
/** Getting the total number of devices available **/
HANDLE_ERROR(hipGetDeviceCount(&DeviceCount));
HANDLE_ERROR(hipSetDevice(DeviceCount - 1));
HANDLE_ERROR(hipDeviceReset());
HANDLE_ERROR(hipMalloc((void**)&states,NumOfBirds * 2 * TIMESTEPS * sizeof(hiprandState_t)));
HANDLE_ERROR(hipMalloc((void**)&rand_norm_nums,NumOfBirds * 2 * TIMESTEPS * sizeof(float)));
//Making each block have total threads of 32
//GridSize setup such that total y grid is of size NumOfBirds and x grid is of size TIMESTEPS
dim3 blockSize(32,1,1);
dim3 gridSize(((TIMESTEPS * 2) + 31)/32,NumOfBirds,1);
hipLaunchKernelGGL(( setup_kernel), dim3(gridSize),dim3(blockSize), 0, 0, time(NULL),states);
HANDLE_ERROR(hipDeviceSynchronize());
hipLaunchKernelGGL(( generate_kernel), dim3(gridSize),dim3(blockSize), 0, 0, states,rand_norm_nums);
//Do not need to get them back at all; Will have to send it back to GPU
// hipMemcpy(cpu_nums,rand_norm_nums, LAT_SIZE * LONG_SIZE * sizeof(float),hipMemcpyDeviceToHost);
// hipMemcpy(dir_u,d_u_dirAngle,LAT_SIZE * LONG_SIZE * sizeof(float),hipMemcpyDeviceToHost);
// hipMemcpy(dir_v,d_v_dirAngle,LAT_SIZE * LONG_SIZE * sizeof(float),hipMemcpyDeviceToHost);
/* print them out */
/* for ( j = 0; j < LAT_SIZE; j++) {
for( i = 0;i<LONG_SIZE;i++){
//printf("%f ", cpu_nums[j*LONG_SIZE + i]);
if(i == LONG_SIZE -1) {
printf("%f\n",dir_u[j * LAT_SIZE + i]);
}
else {
printf("%f ",dir_u[j * LAT_SIZE + i]);
}
}
// printf("\n");
}
*/
HANDLE_ERROR(hipDeviceSynchronize());
// free the memory we allocated for the states
HANDLE_ERROR(hipFree(states));
printf("Random number generator is working\n");
//-------------------------------------------------------------------------------------------------------------------------//
HANDLE_ERROR(hipMalloc((void**)&d_row,NumOfBirds * (TIMESTEPS + 1 ) * sizeof(float)));
HANDLE_ERROR(hipMalloc((void**)&d_col,NumOfBirds * (TIMESTEPS + 1 ) * sizeof(float)));
HANDLE_ERROR(hipMalloc((void**)&d_lwData,LAT_SIZE * LONG_SIZE * sizeof(float)));
HANDLE_ERROR(hipMalloc((void**)&d_DirData,LAT_SIZE * LONG_SIZE * sizeof(float)));
HANDLE_ERROR(hipMemcpy(d_row,h_row,NumOfBirds * (TIMESTEPS + 1 ) * sizeof(float),hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_col,h_col,NumOfBirds * (TIMESTEPS + 1 ) * sizeof(float),hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_lwData,lwData,LAT_SIZE * LONG_SIZE * sizeof(float),hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_dirData,dirData,LAT_SIZE * LONG_SIZE * sizeof(float),hipMemcpyHostToDevice));
//-------------------------------------------------------------------------------------------------------------//
size_t MemoryEachVar,DataPerTransfer,SizePerTimestep;
int TimestepsPerTransfer,TimestepsLastTransfer,DaysPerTransfer;
size_t MemoryRemaining,TotalMemory;
HANDLE_ERROR(hipSetDevice(DeviceCount - 1));
// Getting the total remaining memory that the device can allocate
HANDLE_ERROR(hipMemGetInfo(&MemoryRemaining,&TotalMemory));
MemoryRemaining -= 2*NumOfBirds* (TIMESTEPS + 1) * sizeof(float);
MemoryRemaining -= NumOfBirds * sizeof(uint8_t);
//Need to make sure 100MB is free!! For some reason
MemoryRemaining -= 100 * 1000000;
printf("Total mem: %zd,Free mem: %zd\n",TotalMemory,MemoryRemaining);
printf("\n\n\t\t Total Memory remaining is: %zd \n",MemoryRemaining);
//Memory that each variable gets every timestep
MemoryEachVar = MemoryRemaining/NUM_DATA_FILES;
printf("\t\t Memory for each variable is: %zd \n",MemoryEachVar);
// Need to send data per timestep so has to be a multiple of LAT_SIZE *LONG_SIZE* sizeof(float) * 24
//Can also be called as Minimum_Size_Per_Timestep; Sending data so that it is according to days
SizePerTimestep = LAT_SIZE * LONG_SIZE * TIMESTEPS_PER_DAY * sizeof(float);
// To get a number divisible by SizePerTimestep
//DataPerTransfer is the data size to be transferred for each variable
//Example, if 100MB then 100MB for each of the vars is transferred each time
DataPerTransfer = (MemoryEachVar/SizePerTimestep) * SizePerTimestep;
DaysPerTransfer = DataPerTransfer/SizePerTimestep;
TimestepsPerTransfer = DaysPerTransfer * TIMESTEPS_PER_DAY;
printf("\t\tChecking Division: %zd\n",MemoryEachVar/SizePerTimestep);
printf("\t\t Total Timesteps per Transfer of data is: %ld \n",TimestepsPerTransfer);
printf("\t\tData per transfer is %zd\n",DataPerTransfer);
//------------------------------------Getting the size of data needed per transfer---------------------------------------------//
int divisible,Transfers;
long int DataLastTransfer;//Per variable
Transfers = (TOTAL_DAYS * TIMESTEPS_PER_DAY) / TimestepsPerTransfer;
divisible = (TOTAL_DAYS*TIMESTEPS_PER_DAY) % TimestepsPerTransfer;
if(divisible != 0){
Transfers++;
}
printf("Total Transfers required: %ld\n",Transfers);
/** Tota bytes transfered per data transfer**/
const int TotalTransfers = Transfers;
TimestepsLastTransfer = (TOTAL_DAYS*TIMESTEPS_PER_DAY) - (Transfers-1)*TimestepsPerTransfer;
/*
hipStream_t stream[TotalTransfers-1];
for(i=0;i<TotalTransfers-1;i++){
HANDLE_ERROR(hipStreamCreate(&stream[i]));
}
*/
DataLastTransfer = TOTAL_DAYS * TIMESTEPS_PER_DAY * LAT_SIZE * LONG_SIZE * sizeof(float)
- DataPerTransfer * (TotalTransfers-1);
//---------------------------------------Memory allocation per transfer----------------------------------------------------------//
long int start_timestep,cur_timestep,max_timesteps,ptrOffset;
ptrOffset = 0;
//min_timesteps = offset_into_data;
//printf("Current timestep variable is:%ld\n",min_timesteps);
//return 0;
cur_timestep = offset_into_data;
//printf("cur_timestep = offset_into_data; Value in cur_timestep is: %ld\n",cur_timestep);
for(i=0;i<TotalTransfers-1;i++){
HANDLE_ERROR(hipSetDevice(DeviceCount - 1));
HANDLE_ERROR(hipMemGetInfo(&MemoryRemaining,&TotalMemory));
printf("Total mem: %zd,Free mem(Before any allocation): %zd\n",TotalMemory,MemoryRemaining);
HANDLE_ERROR(hipMemGetInfo(&MemoryRemaining,&TotalMemory));
printf("Total mem: %zd,Free mem(After SetDevice): %zd\n",TotalMemory,MemoryRemaining);
//HANDLE_ERROR(hipStreamCreate(&stream[i]));
HANDLE_ERROR(hipMemGetInfo(&MemoryRemaining,&TotalMemory));
printf("Total mem: %zd,Free mem(After Stream Create): %zd\n",TotalMemory,MemoryRemaining);
HANDLE_ERROR(hipMalloc((void**)&d_udata,DataPerTransfer));
HANDLE_ERROR(hipMemGetInfo(&MemoryRemaining,&TotalMemory));
printf("Total mem: %zd,Free mem(After udata allocation): %zd\n",TotalMemory,MemoryRemaining);
HANDLE_ERROR(hipMalloc((void**)&d_vdata,DataPerTransfer));
HANDLE_ERROR(hipMemGetInfo(&MemoryRemaining,&TotalMemory));
printf("Total mem: %zd,Free mem(After vdata allocation): %zd\n",TotalMemory,MemoryRemaining);
HANDLE_ERROR(hipMalloc((void**)&d_u10data,DataPerTransfer));
HANDLE_ERROR(hipMemGetInfo(&MemoryRemaining,&TotalMemory));
printf("Total mem: %zd,Free mem(After u10data allocation): %zd\n",TotalMemory,MemoryRemaining);
HANDLE_ERROR(hipMalloc((void**)&d_v10data,DataPerTransfer));
HANDLE_ERROR(hipMemGetInfo(&MemoryRemaining,&TotalMemory));
printf("Total mem: %zd,Free mem(After v10data allocation): %zd\n",TotalMemory,MemoryRemaining);
HANDLE_ERROR(hipMalloc((void**)&d_precipData,DataPerTransfer));
HANDLE_ERROR(hipMemGetInfo(&MemoryRemaining,&TotalMemory));
printf("Total mem: %zd,Free mem(After precipData allocation): %zd\n",TotalMemory,MemoryRemaining);
HANDLE_ERROR(hipMalloc((void**)&d_pressureData,DataPerTransfer));
HANDLE_ERROR(hipMemGetInfo(&MemoryRemaining,&TotalMemory));
printf("Total mem: %zd,Free mem(After pressureData allocation): %zd\n",TotalMemory,MemoryRemaining);
HANDLE_ERROR(hipMalloc((void**)&d_birdStatus,NumOfBirds * sizeof(uint8_t)));
HANDLE_ERROR(hipMemGetInfo(&MemoryRemaining,&TotalMemory));
printf("Total mem: %zd,Free mem(After birdStatus allocation): %zd\n",TotalMemory,MemoryRemaining);
HANDLE_ERROR(hipDeviceSynchronize());
printf("After all the host allocations %d\n",i);
//-----------------------------------------Initializing gridSize and block Size-------------------------------//
//HANDLE_ERROR(hipSetDevice(DeviceCount - 1));
dim3 gridSize((NumOfBirds + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK,1,1);
dim3 blockSize(THREADS_PER_BLOCK,1,1);
HANDLE_ERROR(hipMemGetInfo(&MemoryRemaining,&TotalMemory));
printf("Total mem: %zd,Free mem(After grid and block init): %zd\n",TotalMemory,MemoryRemaining);
//-----------------------------------------Copying data from CPU to GPU------------------------------------------------//
HANDLE_ERROR(hipSetDevice(DeviceCount - 1));
HANDLE_ERROR(hipMemcpy(d_udata,udata+ptrOffset,DataPerTransfer,hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_vdata,vdata+ptrOffset,DataPerTransfer,hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_u10data,u10data+ptrOffset,DataPerTransfer,hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_v10data,v10data+ptrOffset,DataPerTransfer,hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_precipData,precipData+ptrOffset,DataPerTransfer,hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_pressureData,pressureData+ptrOffset,DataPerTransfer,hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_birdStatus,h_birdStatus,NumOfBirds * sizeof(uint8_t),hipMemcpyHostToDevice));
/*
HANDLE_ERROR(hipMemcpyAsync(d_lwData,lwData,LAT_SIZE * LONG_SIZE * sizeof(float),hipMemcpyHostToDevice,stream[i]));
HANDLE_ERROR(hipMemGetInfo(&MemoryRemaining,&TotalMemory));
printf("Total mem: %zd,Free mem(After grid and block init): %zd\n",TotalMemory,MemoryRemaining);
HANDLE_ERROR(hipMemcpyAsync(d_udata,udata + ptrOffset,DataPerTransfer,hipMemcpyHostToDevice,stream[i]));
HANDLE_ERROR(hipMemcpyAsync(d_vdata,(vdata+ptrOffset),DataPerTransfer,hipMemcpyHostToDevice,stream[i]));
HANDLE_ERROR(hipMemcpyAsync(d_u10data,(u10data+ptrOffset),DataPerTransfer,hipMemcpyHostToDevice,stream[i]));
HANDLE_ERROR(hipMemcpyAsync(d_v10data,(v10data+ptrOffset),DataPerTransfer,hipMemcpyHostToDevice,stream[i]));
HANDLE_ERROR(hipMemcpyAsync(d_precipData,(precipData+ptrOffset),DataPerTransfer,hipMemcpyHostToDevice,stream[i]));
HANDLE_ERROR(hipMemcpyAsync(d_pressureData,(pressureData+ptrOffset),DataPerTransfer,hipMemcpyHostToDevice,stream[i]));
*/
//-----------------------------------------Calling the Kernel-----------------------------------------------------------//
//All of these are inclusive
//If TimeStepsPerTransfer is 9, then they would be: 0-8, 9-17, 18-26,...
max_timesteps = ((i+1) * TimestepsPerTransfer) - 1;
printf("Current timestep variable is:%ld\n",cur_timestep);
printf("Max timestep is: %ld\n",max_timesteps);
printf("Offset into data is:%ld\n",offset_into_data);
/*if((offset_into_data <= max_timesteps) && (i > 0)){
cur_timestep = i * TimestepsPerTransfer;
//cur_timestep = offset_into_data;
}else{
cur_timestep = offset_into_data;
}
*/
start_timestep = i * TimestepsPerTransfer;
if((max_timesteps - offset_into_data) > TimestepsPerTransfer){
cur_timestep = start_timestep;
}else{
cur_timestep = offset_into_data;
}
printf("Current timestep variable after checking if offset less than max_timesteps is:%ld\n",cur_timestep);
hipLaunchKernelGGL(( bird_movement), dim3(gridSize),dim3(blockSize), 0, 0, d_row,d_col,NumOfBirds,start_timestep,cur_timestep,max_timesteps,d_udata,d_vdata,
d_u10data,d_v10data,d_dirData,rand_norm_nums,d_precipData,d_pressureData,d_lwData,d_birdStatus);
//HANDLE_ERROR(hipStreamSynchronize(stream[i]));
HANDLE_ERROR(hipDeviceSynchronize());
//---------------------------------Freeing allocated memory in GPU and pinned memory in CPU-------------------//
printf("Before freeing;Inside the loop\n");
HANDLE_ERROR(hipMemcpy(h_birdStatus,d_birdStatus,NumOfBirds * sizeof(uint8_t),hipMemcpyDeviceToHost));
//HANDLE_ERROR(hipStreamDestroy(stream[i]));
// HANDLE_ERROR(hipFree(d_lwData));
//HANDLE_ERROR(hipFree(d_birdStatus));
HANDLE_ERROR(hipFree(d_udata));
HANDLE_ERROR(hipFree(d_vdata));
HANDLE_ERROR(hipFree(d_u10data));
HANDLE_ERROR(hipFree(d_v10data));
HANDLE_ERROR(hipFree(d_precipData));
HANDLE_ERROR(hipFree(d_pressureData));
//ptrOffset+= DataPerTransfer/sizeof(float);
ptrOffset = (DataPerTransfer/sizeof(float)) * (i + 1);
printf("After all freeing %d\n",i);
}
/*
HANDLE_ERROR(hipMemcpy(h_row,d_row,NumOfBirds * (TIMESTEPS + 1 ) * sizeof(float),hipMemcpyDeviceToHost));
HANDLE_ERROR(hipMemcpy(h_col,d_col,NumOfBirds * (TIMESTEPS + 1 ) * sizeof(float),hipMemcpyDeviceToHost));
for(i = 0;i < NumOfBirds * (TIMESTEPS + 1); i++ ){
printf("%f ",h_row[i]);
if(i == TIMESTEPS){
printf("%f \n",h_row[i]);
}
}
printf("\n\n");
for(i = 0;i < NumOfBirds * (TIMESTEPS + 1); i++ ){
printf("%f ",h_col[i]);
if(i == TIMESTEPS){
printf("%f \n",h_col[i]);
}
}
*/
//-----------------------------------------------------------------------------------------------------------//
//----------------------------------------------------Last Iteration-----------------------------------------//
//-----------------------------------------------------------------------------------------------------------//
// Last iteration where the size might not be the same as others
long int DataRemaining;
DataRemaining = LONG_SIZE * LAT_SIZE * TIMESTEPS * sizeof(float) - (DataPerTransfer * (TotalTransfers-1));
DataRemaining = DataRemaining/NUM_DATA_FILES;
start_timestep = (TotalTransfers - 1) * TimestepsPerTransfer;
max_timesteps = TIMESTEPS;
ptrOffset = (DataPerTransfer/sizeof(float)) * (TotalTransfers - 1);
//----------------------------------------------------------------------------------------//
HANDLE_ERROR(hipSetDevice(DeviceCount - 1));
HANDLE_ERROR(hipMemGetInfo(&MemoryRemaining,&TotalMemory));
printf("Total mem: %zd,Free mem(Before any allocation): %zd\n",TotalMemory,MemoryRemaining);
HANDLE_ERROR(hipMemGetInfo(&MemoryRemaining,&TotalMemory));
printf("Total mem: %zd,Free mem(After SetDevice): %zd\n",TotalMemory,MemoryRemaining);
//HANDLE_ERROR(hipStreamCreate(&stream[i]));
HANDLE_ERROR(hipMemGetInfo(&MemoryRemaining,&TotalMemory));
printf("Total mem: %zd,Free mem(After Stream Create): %zd\n",TotalMemory,MemoryRemaining);
HANDLE_ERROR(hipMalloc((void**)&d_udata,DataRemaining));
HANDLE_ERROR(hipMemGetInfo(&MemoryRemaining,&TotalMemory));
printf("Total mem: %zd,Free mem(After udata allocation): %zd\n",TotalMemory,MemoryRemaining);
HANDLE_ERROR(hipMalloc((void**)&d_vdata,DataRemaining));
HANDLE_ERROR(hipMemGetInfo(&MemoryRemaining,&TotalMemory));
printf("Total mem: %zd,Free mem(After vdata allocation): %zd\n",TotalMemory,MemoryRemaining);
HANDLE_ERROR(hipMalloc((void**)&d_u10data,DataRemaining));
HANDLE_ERROR(hipMemGetInfo(&MemoryRemaining,&TotalMemory));
printf("Total mem: %zd,Free mem(After u10data allocation): %zd\n",TotalMemory,MemoryRemaining);
HANDLE_ERROR(hipMalloc((void**)&d_v10data,DataRemaining));
HANDLE_ERROR(hipMemGetInfo(&MemoryRemaining,&TotalMemory));
printf("Total mem: %zd,Free mem(After v10data allocation): %zd\n",TotalMemory,MemoryRemaining);
HANDLE_ERROR(hipMalloc((void**)&d_precipData,DataRemaining));
HANDLE_ERROR(hipMemGetInfo(&MemoryRemaining,&TotalMemory));
printf("Total mem: %zd,Free mem(After precipData allocation): %zd\n",TotalMemory,MemoryRemaining);
HANDLE_ERROR(hipMalloc((void**)&d_pressureData,DataRemaining));
HANDLE_ERROR(hipMemGetInfo(&MemoryRemaining,&TotalMemory));
printf("Total mem: %zd,Free mem(After pressureData allocation): %zd\n",TotalMemory,MemoryRemaining);
//HANDLE_ERROR(hipMalloc((void**)&d_birdStatus,NumOfBirds * sizeof(uint8_t)));
//HANDLE_ERROR(hipMemGetInfo(&MemoryRemaining,&TotalMemory));
//printf("Total mem: %zd,Free mem(After pressureData allocation): %zd\n",TotalMemory,MemoryRemaining);
HANDLE_ERROR(hipDeviceSynchronize());
printf("After all the host allocations %d\n",i);
//-----------------------------------------Initializing gridSize and block Size-------------------------------//
printf("Before grid and block size allocations\n");
//dim3 gridSize2((NumOfBirds + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK,1,1);
//dim3 blockSize2(THREADS_PER_BLOCK,1,1);
printf("After grid and block size allocations\n");
//-----------------------------------------Copying data from CPU to GPU----------------------------------------//
HANDLE_ERROR(hipSetDevice(DeviceCount - 1));
HANDLE_ERROR(hipMemcpy(d_udata,udata+ptrOffset,DataRemaining,hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_vdata,vdata+ptrOffset,DataRemaining,hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_u10data,u10data+ptrOffset,DataRemaining,hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_v10data,v10data+ptrOffset,DataRemaining,hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_precipData,precipData+ptrOffset,DataRemaining,hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_pressureData,pressureData+ptrOffset,DataRemaining,hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_birdStatus,h_birdStatus,NumOfBirds * sizeof(uint8_t),hipMemcpyHostToDevice));
//-----------------------------------------Calling the Kernel-------------------------------------------------//
if((max_timesteps - offset_into_data) > TimestepsLastTransfer){
cur_timestep = start_timestep;
}else{
cur_timestep = offset_into_data;
}
printf("Before calling the kernel\n");
hipLaunchKernelGGL(( bird_movement), dim3(gridSize),dim3(blockSize), 0, 0, d_row,d_col,NumOfBirds,start_timestep,cur_timestep,max_timesteps,d_udata,d_vdata,
d_u10data,d_v10data,d_dirData,rand_norm_nums,,d_precipData,d_pressureData,d_lwData,d_birdStatus);
HANDLE_ERROR(hipDeviceSynchronize());
HANDLE_ERROR(hipMemcpy(h_row,d_row,NumOfBirds * (TIMESTEPS + 1 ) * sizeof(float),hipMemcpyDeviceToHost));
HANDLE_ERROR(hipMemcpy(h_col,d_col,NumOfBirds * (TIMESTEPS + 1 ) * sizeof(float),hipMemcpyDeviceToHost));
for(i = 0;i < NumOfBirds * (TIMESTEPS + 1); i++ ){
printf("%f ",h_row[i]);
if(((i+1) % (TIMESTEPS + 1)) == 0){
printf("%f \n",h_row[i]);
}
}
printf("\n\n");
for(i = 0;i < NumOfBirds * (TIMESTEPS + 1); i++ ){
printf("%f ",h_col[i]);
if(((i+1) % (TIMESTEPS + 1)) == 0){
printf("%f \n",h_col[i]);
}
}
//-----------------------------------------------Freeing allocated memory--------------------------------------//
// HANDLE_ERROR(hipStreamDestroy(stream[0]));
HANDLE_ERROR(hipFree(rand_norm_nums));
HANDLE_ERROR(hipFree(d_birdStatus));
HANDLE_ERROR(hipFree(d_udata));
HANDLE_ERROR(hipFree(d_vdata));
HANDLE_ERROR(hipFree(d_u10data));
HANDLE_ERROR(hipFree(d_v10data));
HANDLE_ERROR(hipFree(d_precipData));
HANDLE_ERROR(hipFree(d_pressureData));
/*
HANDLE_ERROR(hipHostFree(udata));
HANDLE_ERROR(hipHostFree(vdata));
HANDLE_ERROR(hipHostFree(u10data));
HANDLE_ERROR(hipHostFree(v10data));
HANDLE_ERROR(hipHostFree(precipData));
HANDLE_ERROR(hipHostFree(pressureData));
HANDLE_ERROR(hipHostFree(lwData));
*/
free(dirData);
free(udata);
free(vdata);
free(u10data);
free(v10data);
free(precipData);
free(pressureData);
free(lwData);
free(h_birdStatus);
/*
HANDLE_ERROR(hipFree(d_lwData));
HANDLE_ERROR(hipFree(d_u_dirAngle));
HANDLE_ERROR(hipFree(d_v_dirAngle));
printf("After freeing everything\n");
*/
HANDLE_ERROR(hipFree(d_row));
HANDLE_ERROR(hipFree(d_col));
free(h_row);
free(h_col);
//free(lwData);
//free(dirData);
fclose(dirTxt);
fclose(posdataTxt);
fclose(udataTxt);
fclose(vdataTxt);
fclose(v10dataTxt);
fclose(u10dataTxt);
fclose(precipTxt);
fclose(pressureTxt);
fclose(lwTxt);
printf("End\n");
return 0;
}
| f9d2e3eab92a7fda459bd4b68f8b82f9eb1123e5.cu |
//Needs Header Files for the functions; The header file should have both C and CUDA functions
//This file uses 6 hourly data. Each day is 6 hours long and skipping a day means to add 6
//to the counter that counts the timesteps (l).
//The birds start at 00:00 UTC which is 6pm in central time examplewhen there is no day light savings
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <pthread.h>
#include <string.h>
#include <math.h>
#include <float.h>
#include <time.h>
#include <sys/time.h>
#include <stdlib.h>
#include <getopt.h>
#include <math.h>
//#include "birds_CUDA.h"
//#define CUDA_API_PER_THREAD_DEFAULT_STREAM
#include <cuda.h>
#include <cuda_runtime.h>
#include <curand_kernel.h>
#define PI 3.14159
#define LONG_SIZE 429
#define LAT_SIZE 429
#define LINESIZE 15*LONG_SIZE+LONG_SIZE - 3
#define TOTAL_DAYS 122
#define TIMESTEPS_PER_DAY 24
#define TIMESTEPS TOTAL_DAYS*TIMESTEPS_PER_DAY
#define SKIP_TIMESTEPS 0
//This is the number of timesteps that the bird will skip in the beginning to get to the desired
//takeoff time. Since the data starts at 7 pm, the birds will skip the first 23 hours to get to
//6pm.
#define INITIAL_SKIP_TIMESTEPS 23
//The maximum lattitude south that the model cares about bird flight. If birds go below
//that lattitude the model stops
//Counted from the North;
#define MAX_LAT_SOUTH 300
//Stopover days; As of now, if 0 then the bird flies without stopping continiously;
//If 1, then the bird waits for 18 hours after successful 6 hours of flight to fly again
#define STOPOVER_DAYS 0
//#define DESIRED_SPEED 3.6 //Birds want to travel at 10m/s, it is 36km/hr(in the grid it is 3.6 units per hour)
#define DESIRED_SPEED 10.5 //Air speed; Desired speed = flightspeed + windspeed ; Only used in windprofit calculation
#define STD_BIRDANGLE 10.0 //Standard deviation * 6 = the total difference from max to min angle possible
//If STD_BIRDANGLE = 10 then the angle can differ +- (10*6)/2 = +- 30 from mean
#define glCompAcc 1e-8 //If the difference is equal to or less than this then equal
#define MIN_PROFIT -10
//Defining the x-variable size, it's sum and
//sum of squares as needed for slope calculation
#define REGRESSION_HRS 6
//Precipitation (mm/hr) below which birds can fly
#define MAX_PRECIP 2
//HRS_SUM = sum(1 to 12) before. Now has to be sum(1 to 6) = 21
#define HRS_SUM 21
#define HRS_SQUARE_SUM 91
#define DENOM_SLOPE (REGRESSION_HRS * HRS_SQUARE_SUM)-(HRS_SUM * HRS_SUM)
// Barometric pressure
// Bird finds the pressure at the time it leaves and compares it with the data from
// the previous day.
//The angle that the bird flies when it is out at sea and needs to get back to land.
//To make the birds head back directly west the angle must be set to 180.
#define BIRD_SEA_ANGLE 180
//The maximum number of hours that the birds can fly continiously
#define BIRD_HRS_LIMIT 72
#define TOTAL_DATA_FILES 9
//Total number of data files or variables bird flight depends on;Does not include direction files and land water data
#define NUM_DATA_FILES 6
#define THREADS_PER_BLOCK 512
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
//------------------------------Notes---------------------------------------------------------------------------------------
/*
Altitude = 850 millibars
Year = 2009
22 Jan 2015 No upper limit to the bird flight speed currently; Birds can fly well above 10m/s
Precipitation = millimeters
*/
//--------------------------------------------------------------------------------------------------------------------------
__global__ void setup_kernel(unsigned int seed,curandState *states);
__global__ void generate_kernel(curandState *states,float* numbers,float* angles);
__global__ void bird_movement(float* rowArray,float* colArray,int NumOfBirds,long int start_l,long int cur_l,long int max_timesteps,float* udata,float* vdata,float* u10data,
float* v10data,float* d_dirData,float* rand_norm_nums,float* precipData,float* pressureData,float* lwData,uint8_t* birdStatus);
__device__ float bilinear_interpolation_SmallData(float x,float y,float* data_array);
__device__ float bilinear_interpolation_LargeData(float x,float y,float* data_array,long l);
__device__ float WrappedNormal (float MeanAngle,float AngStdDev);
__device__ float getProfitValue(float u_val,float v_val,float dirVal,float dir_u,float dir_v);
__device__ long int bird_AtSea(int id,int arrLength,float* rowArray,float* colArray,long int start_l,long int l,float* udata,float* vdata,float* lwData,uint8_t* birdStatus);
static void* write_dataVars(void* arguments);
static void* read_dataFiles(void* arguments);
long int convert_to_month(int month,int day);
static void HandleError( cudaError_t err,const char *file, int line );
long Get_GPU_devices();
//-------------------------------------------------------------------------------------------------------------------------------------
struct file_IO {
FILE *fp;
float* inpVals;
float* streamArray;
size_t dataSize;
}inpStruct[8];
//-------------------------------------------------------------------------------------------------------------------------------------
//Global Variables
float* udata;
float* vdata;
float* u10data;
float* v10data;
float* precipData;
float* pressureData;
float* dir_u;
float* dir_v;
float* lwData;
float* dirData;
//###########################################################################################################################################//
__device__ long int bird_AtSea(int id,int arrLength,float* rowArray,float* colArray,long int start_l,long int l,float* udata,float* vdata,float* lwData,uint8_t* birdStatus)
{
printf("Inside the bird_atSea() function\n");
//long int count_timeSteps = l;
float u_val,v_val,u_dir,v_dir,pos_row,pos_col;
int index = 0;
pos_row = rowArray[id * arrLength + l ];
pos_col = colArray[id * arrLength + l ];
printf("After getting the positions of row and columns\n");
//index = lwData[(int)(rintf(pos_row)) * LONG_SIZE + (int)(rintf(pos_col))];
printf("After getting index\n");
float count_timeSteps = 0;
long int bckp_l;
//int i;
//Does not check the first time?
//while(index != 1){
for(count_timeSteps = 0;count_timeSteps<(BIRD_HRS_LIMIT - 10);count_timeSteps++,l++){
/** Bilinear interpolation for u and v data **/
u_val = bilinear_interpolation_LargeData(pos_col,pos_row,udata,l-start_l);
v_val = bilinear_interpolation_LargeData(pos_col,pos_row,vdata,l-start_l);
u_dir = DESIRED_SPEED * cosf(BIRD_SEA_ANGLE * (PI/180));
v_dir = DESIRED_SPEED * sinf(BIRD_SEA_ANGLE * (PI/180));
/** Desired speed needs to change in the case of column position or the birds
will not fly west **/
pos_row = pos_row + (v_val + v_dir) * 0.36 * -1;
pos_col = pos_col + (u_val + u_dir) * 0.36;
//position[(l-l_start)* PosRowLen + (id *2)] = pos_row ;
//position[(l-l_start)* PosRowLen + (id *2) + 1] = pos_col ;
rowArray[id * arrLength + l + 1] = pos_row;
colArray[id * arrLength + l + 1] = pos_col;
printf("Storing row and column data\n");
index = lwData[__float2int_rd(pos_row * LAT_SIZE + pos_col)];
if(index == 1){
//l--;
bckp_l = l;
//This takes it back to the starting time of the previous day
l = l - (6 + 4 + count_timeSteps);
//Use casting to float to get round up value;Add to l
//Then do, l=l+ roundup((float)((count_timeSteps + 10)/24)) * 24; __float2ull_ru
l = l + __float2ull_ru((count_timeSteps + 10)/24) * 24 + 24 * STOPOVER_DAYS;
for(bckp_l;bckp_l <= l;bckp_l++){
rowArray[id * arrLength + bckp_l + 1 ] = pos_row;
colArray[id * arrLength + bckp_l + 1 ] = pos_col;
}
return l;
}
if(pos_row >= MAX_LAT_SOUTH){
printf("Bird reached maximum lattitude; Exiting program\n");
birdStatus[id] = 0;
return -1;
}
}
if(count_timeSteps >= (BIRD_HRS_LIMIT-10)){
printf("Dead Bird! Bird has been flying for 80 hours straight!\n");
birdStatus[id] = 0;
return -1;
}
return l;
}
//###########################################################################################################################################//
__device__ float getProfitValue(float u_val,float v_val,float dirVal,float dir_u,float dir_v)
{
/** All wind data in m/s **/
float diffAngle,magnitude,magnitude_squared,tailComponent,crossComponent,profit_value;
tailComponent = 0;
magnitude = hypotf(u_val,v_val);
magnitude_squared = magnitude * magnitude;
/** Getting the tail component of the wind; or the component of the wind in the desired direction of flight
From formula of getting the vector projection of wind onto the desired direction **/
tailComponent = (dir_v * v_val + dir_u * u_val);
tailComponent = tailComponent/hypotf(dir_u,dir_u);
/** DiffAngle is the angle between the desired direction of the bird and the direction of the wind
DiffAngle has to be calculated such that both the vectors are pointing away from where they meet.
Using the formula to get angle between two vectors **/
diffAngle = acosf( (u_val*dir_u + v_val * dir_v)/ (( hypotf(u_val,v_val) * hypotf(dir_u,dir_v) )) ) * 180/PI;
/** Separate profit value methods have to be used if the tail component is less that equal to or greater than the desired speed of the birds **/
if(tailComponent <= DESIRED_SPEED) {
profit_value = (DESIRED_SPEED * DESIRED_SPEED) + magnitude_squared - 2 * DESIRED_SPEED * magnitude * cosf(diffAngle * PI/180);
profit_value = DESIRED_SPEED - sqrtf(profit_value);
}
else {
/** Perpendicular to a vector (x,y) is (y,-x) or (-y,x) Cross component is always positive **/
crossComponent = fabsf((-dir_v*u_val + dir_u*v_val)/hypotf(dir_v,dir_u));
profit_value = tailComponent - crossComponent;
}
return profit_value;
}
//###########################################################################################################################################//
__device__ float bilinear_interpolation_SmallData(float x,float y,float* data_array)
{
float x1,y1,x2,y2;
float Q11,Q12,Q21,Q22,R1,R2,R;
//float val_x1,val_x2,val_y1,val_y2;
x1 = floorf(x);
x2 = ceilf(x);
y1 = floorf(y);
y2 = ceilf(y);
R = 0;
Q11 = data_array[(int)(y1 * LONG_SIZE + x1)];
Q12 = data_array[(int)(y2 * LONG_SIZE + x1)];
Q21 = data_array[(int)(y1 * LONG_SIZE + x2)];
Q22 = data_array[(int)(y2 * LONG_SIZE + x2)];
R1 = Q11 + (x - x1)*(Q21 - Q11);
R2 = Q12 + (x - x1)*(Q22 - Q12);
R = R1 + (y - y1)*(R2 - R1);
//printf("Q11:%f,Q12:%f,Q21:%f,Q22:%f; And Value=%f\n",Q11,Q12,Q21,Q22,value);
return R;
}
//###########################################################################################################################################//
__device__ float bilinear_interpolation_LargeData(float x,float y,float* data_array,long l)
{
float x1,y1,x2,y2;
float Q11,Q12,Q21,Q22,R1,R2,R;
//float val_x1,val_x2,val_y1,val_y2;
x1 = floorf(x);
x2 = ceilf(x);
y1 = floorf(y);
y2 = ceilf(y);
R = 0;
Q11 = data_array[(int)(l * LAT_SIZE * LONG_SIZE + y1 * LONG_SIZE + x1) ];
Q12 = data_array[(int)(l * LAT_SIZE * LONG_SIZE + y2 * LONG_SIZE + x1) ];
Q21 = data_array[(int)(l * LAT_SIZE * LONG_SIZE + y1 * LONG_SIZE + x2) ];
Q22 = data_array[(int)(l * LAT_SIZE * LONG_SIZE + y2 * LONG_SIZE + x2) ];
R1 = Q11 + (x - x1)*(Q21 - Q11);
R2 = Q12 + (x - x1)*(Q22 - Q12);
R = R1 + (y - y1)*(R2 - R1);
//printf("Q11:%f,Q12:%f,Q21:%f,Q22:%f; And Value=%f\n",Q11,Q12,Q21,Q22,value);
return R;
}
//###########################################################################################################################################//
__global__ void setup_kernel(unsigned int seed,curandState *states)
{
//Thread indices
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
//int id = y * LONG_SIZE + x; <<---Needs to change from LONG_SIZE to something else
curand_init(seed,id,0,&states[id]);
}
//###########################################################################################################################################//
__global__ void generate_kernel(curandState *states,float* numbers)
{
//Thread indices
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
// int id = y * LONG_SIZE + x; <<----Needs to change from LONG_SIZE
numbers[id] = curand_normal(&states[id]);
}
//###########################################################################################################################################//
__device__ float WrappedNormal(int id,float MeanAngle,float AngStdDev,float* rand_norm_nums)
{
float z,x,y,u1,u2;
u1 = rand_norm_nums[id * TIMESTEPS * 2 + cur_timestep];
u2 = rand_norm_nums[id * TIMESTEPS * 2 + TIMESTEPS + cur_timestep];
while(1){
z = 1.715538 * (u1 - 0.5)/u2;
x = 0.25 * z * z;
if((x - (1- u2)) < glCompAcc){
break;
}else if(x -(-logf(u2)) < glCompAcc){
break;
}
}
y = AngStdDev * z + MeanAngle;
if((y - 360) > (-glCompAcc)){
y = y - 360;
}
if(y < 0){
y = 360 + y;
}
return y;
}
//###########################################################################################################################################//
//###########################################################################################################################################//
//###########################################################################################################################################//
__global__ void bird_movement(float* rowArray,float* colArray,int NumOfBirds,long int start_l,long int cur_l,long int max_timesteps,float* udata,float* vdata,float* u10data,float* v10data,
float* dirData,float* rand_norm_nums,float* precipData,float* pressureData,float* lwData,uint8_t* birdStatus)
{
//Thread indices
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
// int id = y * LONG_SIZE + x; <<---- This needs to change
//printf("Inside the kernel\n");
if(id > (NumOfBirds -1)||(birdStatus[id]==0)||(cur_l > max_timesteps)){
return;
}
else{
//Making a local copy of the timstep variable
long int l;
long l_old;
float profit_value,actualAngle,wrappedAngle;
float last_pressure,pressure_sum,pressure_MultSum,slope;
float u_ten,v_ten,u_val,v_val,uDir_value,vDir_value,precip_val;
int k,i;
float pos_row,pos_col;
int arrLength;//Length of the row and column array for each bird
int index;
l = cur_l;
arrLength = (TIMESTEPS + 1);
index = (int)(id * (TIMESTEPS + 1) + l);
slope = 0;
printf("Value of l is %ld\n",l);
// pos_row = id * arrLength + (l - l_start);
printf("Array length per bird is %d\n",arrLength);
printf("id is %d\n",id);
//printf("Current l is: %d\n",current_l);
printf("id * arrayLength is:%d\n",id*arrLength);
printf("Calculated array index value is: %d\n",index);
//return;
//while(l < (TOTAL_DAYS * TIMESTEPS_PER_DAY - 24)){
while(l < max_timesteps){
//current_l = (int)(l -l_start);
printf("Inside the while loop\n");
//printf("Index here is %d\n",id * arrLength + current_l);
//printf("Before printing pos_row and pos_col\n");
printf("Starting pos_row is %f , pos_col is: %f\n",*(rowArray + id * arrLength + l),*(colArray + id * arrLength + l));
printf("After printing pos_row and pos_col\n");
printf("Before any computation; Timestep #: %ld\n",l);
pos_row = rowArray[id * arrLength + l ];
pos_col = colArray[id * arrLength + l];
if((pos_row > LAT_SIZE) || (pos_col >LONG_SIZE)||(pos_row < 0)||(pos_col < 0 )){
birdStatus[id] = 0;
return;
}
//printf("After position calculations\n");
actualAngle = dirData[__float2int_rd(pos_row * LAT_SIZE + pos_col)];
wrappedAngle = WrappedNormal(id,actualAngle,STD_BIRDANGLE,float* rand_norm_nums);
uDir_value = DESIRED_SPEED * cosf(wrappedAngle * (PI/180));
vDir_value = DESIRED_SPEED * sinf(wrappedAngle * (PI/180));
//##################Accesing should be relative; The l values should be adjusted when accesing as the arrays brought in
//start index at 0(?)
printf("Current l is: %ld\n",l);
u_ten = bilinear_interpolation_LargeData(pos_col,pos_row,u10data,l-start_l);
v_ten = bilinear_interpolation_LargeData(pos_col,pos_row,v10data,l-start_l);
profit_value = getProfitValue(u_ten,v_ten,wrappedAngle,uDir_value,vDir_value);
if((profit_value >= MIN_PROFIT) && ((last_pressure>=1009)||(slope >-1))){
//printf("Profit value greater than MIN_PROFIT\n");
for(k=0;k<6 && l<max_timesteps;k++,l++) {
//l = (int)(l -l_start);
u_val = bilinear_interpolation_LargeData(pos_col,pos_row,udata,l-start_l);
v_val = bilinear_interpolation_LargeData(pos_col,pos_row,vdata,l-start_l);
precip_val = bilinear_interpolation_LargeData(pos_col,pos_row,precipData,l-start_l);
//printf("End of bilinear interp for precip\n");
//Getting new position values for row and column
pos_row = rowArray[id * arrLength + l ];
pos_col = colArray[id * arrLength + l ];
//printf("Calculating row and col values\n");
if((pos_row > LAT_SIZE) || (pos_col >LONG_SIZE)||(pos_row < 0)||(pos_col < 0 )){
birdStatus[id] = 0;
return;
}
//Storing the new values
rowArray[id * arrLength + l + 1] = pos_row + (v_val + vDir_value ) * 0.36 * -1;
colArray[id * arrLength + l + 1] = pos_col + (u_val + uDir_value) * 0.36;
//printf("Storing row and col values\n");
printf("6 Hour Flight\tRow: %f,Col:%f\n",rowArray[id * arrLength + l + 1],colArray[id * arrLength + l + 1]);
printf("6 hour flight;Timestep #: %ld\n",l);
}
printf("After 6 hour flight over\n");
pos_row = rowArray[id * arrLength + l];
pos_col = colArray[id * arrLength + l];
printf("After getting row and col values\n");
//printf("End of 6 hour flight\n");
// If the bird is at sea after the first 6 hours of flight
if(lwData[__float2int_rd(pos_row * LAT_SIZE + pos_col)] != 1){
printf("Birds at sea after 6 hours\n");
for(k=6;k<10 && l<max_timesteps;k++,l++){
printf("Timestep # (+4 Hours): %ld\n",l);
uDir_value = DESIRED_SPEED * cosf(wrappedAngle * (PI/180));
vDir_value = DESIRED_SPEED * sinf(wrappedAngle * (PI/180));
u_val = bilinear_interpolation_LargeData(pos_col,pos_row,udata,l-start_l);
v_val = bilinear_interpolation_LargeData(pos_col,pos_row,vdata,l-start_l);
//Getting new position values for row and column and storing it
pos_row += (v_val + vDir_value ) * 0.36 * -1;
pos_col += (u_val + uDir_value) * 0.36;
if((pos_row > LAT_SIZE) || (pos_col >LONG_SIZE)||(pos_row < 0)||(pos_col < 0 )){
return;
}
rowArray[id * arrLength + l + 1] = pos_row;
colArray[id * arrLength + l + 1] = pos_col;
printf("+4 Hour Flight\tRow: %f,Col:%f\n",rowArray[id * arrLength + l + 1],colArray[id * arrLength + l + 1]);
}
// If at sea even after the 4 hours
if(lwData[__float2int_rd(pos_row * LAT_SIZE + pos_col)] != 1){
printf("Birds were at sea even after 10 hours \n");
l = bird_AtSea(id,arrLength,colArray,rowArray,start_l,l,udata,vdata,lwData,birdStatus);
if( l == -1){
return;
}
//printf("After the function bird_AtSea() \n");
}
//printf("End of +4 hours of flight at sea\n");
}else{
for(i=6;i<24;i++,l++){
printf("Timestep # (Not at sea after 6 hours): %ld\n",l);
rowArray[id * arrLength + l + 1] = pos_row;
colArray[id * arrLength + l + 1] = pos_col;
}
}
}
else{
//l += 24;
//l = (int)(l -l_start);
for(i=0;i<18;i++,l++){
printf("Timestep #: %ld\n",l);
rowArray[id * arrLength + l + 1] = pos_row;
colArray[id * arrLength + l + 1] = pos_col;
}
}
l_old = l - REGRESSION_HRS;
pressure_sum = 0;
pressure_MultSum = 0;
//Taking the pressure from 6 hours earlier of the location where the bird landed
for(k=1; (l_old < l) && (k<=REGRESSION_HRS) && (l_old<max_timesteps); l_old++,k++){
pressure_sum += bilinear_interpolation_LargeData(pos_col,pos_row,pressureData,l_old-start_l); //<----------------ERROR HERE
pressure_MultSum += k * bilinear_interpolation_LargeData(pos_col,pos_row,pressureData,l_old-start_l);
//last_pressure is the last day or the day of flight
if(k == REGRESSION_HRS) {
last_pressure = bilinear_interpolation_LargeData(pos_col,pos_row,pressureData,l_old-start_l);
}
}
slope = ((REGRESSION_HRS * pressure_MultSum) - (pressure_sum * HRS_SUM))/(DENOM_SLOPE);
}
}
}
//###########################################################################################################################################//
long Get_GPU_devices()
{
cudaDeviceProp prop;
int whichDevice,DeviceCount;
long deviceMemory;
HANDLE_ERROR(cudaGetDevice(&whichDevice));
HANDLE_ERROR(cudaGetDeviceProperties(&prop,whichDevice));
if(!prop.deviceOverlap){
printf("Device does not handle overlaps so streams are not possible\n");
return 0;
}
DeviceCount = 0;
HANDLE_ERROR(cudaGetDeviceCount(&DeviceCount));
if(DeviceCount > 0){
printf("%d Devices Found\n",DeviceCount);
}else{
printf("No devices found or error in reading the number of devices\n");
return 0;
}
int i = 0;
//for(int i = 0;i<DeviceCount;i++){
cudaDeviceProp properties;
HANDLE_ERROR(cudaGetDeviceProperties(&properties,i));
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", properties.name);
printf(" Device Global Memory size: %zd MB \n",properties.totalGlobalMem/1000000);
printf("\n");
deviceMemory = properties.totalGlobalMem;
//}
return deviceMemory;
}
//###########################################################################################################################################//
static void* read_dataFiles(void* arguments)
{
struct file_IO *inputArgs;
inputArgs = (struct file_IO *)arguments;
FILE* textFile;
float* dataArray;
textFile = inputArgs->fp;
dataArray = inputArgs->inpVals;
char line[LINESIZE];
memset(line,'\0',sizeof(line));
char tempVal[15];
memset(tempVal,'\0',sizeof(tempVal));
char* startPtr,*endPtr;
long j;
int i;
float Value;
i=0;
j=0;
memset(line,'\0',sizeof(line));
memset(tempVal,'\0',sizeof(tempVal));
i=0;
j=0;
while(fgets(line,LINESIZE,textFile)!=NULL){
startPtr = line;
for(i=0;i<LONG_SIZE;i++){
Value = 0;
memset(tempVal,'\0',sizeof(tempVal));
if(i != (LONG_SIZE - 1)) {
endPtr = strchr(startPtr,',');
strncpy(tempVal,startPtr,endPtr-startPtr);
//printf("%s ",tempVal);
if(strcmp("NaN",tempVal)==0) {
Value = 0.0;
}
else{
Value = atof(tempVal);
}
dataArray[j * LAT_SIZE + i] = Value;
endPtr = endPtr + 1;
startPtr = endPtr;
//printf("%d,%f ",i,Value);
}
else if(i == (LONG_SIZE - 1)){
strcpy(tempVal,startPtr);
if(strcmp("NaN\n",tempVal)==0) {
Value = 0.0;
}
else{
Value = atof(tempVal);
}
dataArray[j * LAT_SIZE + i] = Value;
}
}
j++;
}
return NULL;
}
//###########################################################################################################################################//
static void* write_dataVars(void* arguments)
{
struct file_IO *inputArgs;
inputArgs = (struct file_IO *)arguments;
float* dataArray,*destArray;
size_t totalSize;
long int i;
dataArray = inputArgs->inpVals;
destArray = inputArgs->streamArray;
totalSize = inputArgs->dataSize;
for(i=0;i<totalSize;i++){
destArray[i] = *(dataArray + i);
}
return NULL;
}
//###########################################################################################################################################//
long int convert_to_month(int month,int day)
{
long int index,offset;
if(month == 8){
index = 1; //The data starts in august
}
else if(month == 9){
index = 32; //The data for september starts after 31 days of august
}
else if(month == 10){
index = 62; //The data for october starts after 31+30 days of sept and august respectively.
}
else if(month == 11){
index = 93; //The data for october starts after 31+30+31 days of sept,aug and oct respectively.
}
else{
printf("\n\t\tIncorrect month used\n\t\tUse between August-November inclusive; Only use numbers ; August = 8\n");
return -1;
}
//If 1st or 2nd of August, start at timestep 23 (after 23 hours)
if(((month == 8) && (day == 1))||((month == 8) && (day == 2))){
offset = 23;
//If in August; Gives correct result for starting timestep
}else if (month == 8){
offset = 23 + (day - 1) * TIMESTEPS_PER_DAY ;
//23 added because 1st day only has 23 hours
}else{
offset = 23 + (index - 2) * TIMESTEPS_PER_DAY + (day - 1) * TIMESTEPS_PER_DAY;
}
return offset;
}
//###########################################################################################################################################//
static void HandleError( cudaError_t err,const char *file, int line ) {
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString( err ),file, line );
// cout << cudaGetErrorString(err) << "in" << file << "at line" << line << "\n";
exit( EXIT_FAILURE );
}
}
//###########################################################################################################################################//
//###########################################################################################################################################//
//###########################################################################################################################################//
int main(int argc,char* argv[])
{
//--------------------------Checking for input arguments------------------------------//
char baseFileName[] = "../../Birds_Full/Birds_data/InterpolatedData/";
char yearFileName[80];
char fullFileName[80];
char start_date[12];
char yearStr[4],monthStr[2],dayStr[2];
float starting_row,starting_col;
long int offset_into_data = 0;
int NumOfBirds,year,day,month;
int option;
while ((option = getopt(argc, argv,"y:m:d:r:c:N:")) != -1) {
switch (option) {
case 'y' : year = atoi(optarg);
break;
case 'm' : month = atoi(optarg);
break;
case 'd' : day = atoi(optarg);
break;
case 'r' : starting_row = atof(optarg);
break;
case 'c' : starting_col = atof(optarg);
break;
// case 't' : breadth = atoi(optarg);
// break;
case 'N' : NumOfBirds = atoi(optarg);
break;
default: printf("\nUsage: birds -y Year -m Month(Number) -d DayOfTheMonth -r StartingRow -c StartingCol -N NumberOfBirds\n");
exit(EXIT_FAILURE);
}
}
/** If starting row is greater than or equal the row that we are interested in; Below a particular row we are not interested in the flight of the birds**/
if(starting_row >= MAX_LAT_SOUTH){
printf("\t\tProvided starting row is below the southern most lattitude at which the model is set to stop\n");
printf("\t\tEither change the starting row location and/or MAX_LAT upto which the birds can fly\n");
return -1;
}
//-----------------------------------------------Day-----------------------------------------//
/** Making sure random date is not provided **/
if((day>0) && (day<32)){
sprintf(dayStr,"%d",day);
}else{
printf("\t\t Invalid date provided; Date should be greater than 0 and less than 32\n");
return -1;
}
//-----------------------------------------------Month-----------------------------------------//
/** Making sure month provided is between August and November inclusive **/
if((month < 12) && (month > 7)){
sprintf(monthStr,"%d",month);
}else{
printf("\t\t Invalid month provided; Use between 8 and 11 inclusive\n");
return -1;
}
/** Converting month and day information into number of timesteps; Special case of AUG 1st is also taken care of
Instead of AUG 1 it starts at August 2 (because data starts at 7pm but birds fly at 6pm) **/
offset_into_data = convert_to_month(month,day);
printf("Offset into data is: %ld\n",offset_into_data);
//-----------------------------------------------Year-----------------------------------------//
/** Checking if correct year specified **/
if((year>= 2008) && (year<=2013)){
//Add file location here
sprintf(yearStr,"%d",year);
strcpy(yearFileName,baseFileName);
strcat(yearFileName,yearStr);
strcat(yearFileName,"/");
}
else{
printf("\n\tInvalid year specified\n\tSpecified %d; Use years from 2008 to 2013 in its full format\n",year);
printf("\t\tUsage: birds -y Year -m Month(Number) -d DayOfTheMonth -r StartingRow -c StartingCol -N NumberOfBirds\n");
return -1;
}
strcpy(start_date,yearStr);
strcat(start_date,"/");
strcat(start_date,monthStr);
strcat(start_date,"/");
sprintf(dayStr,"%d",day);
strcat(start_date,dayStr);
//------------Opening position data file where lat and long data will be stored----------------//
FILE *posdataTxt,*vdataTxt,*udataTxt,*v10dataTxt,*u10dataTxt,*precipTxt,*pressureTxt,*lwTxt,*dirTxt;
posdataTxt = fopen("posdata.txt","a");
if(posdataTxt == NULL) {
perror("Cannot open position data file\n");
return -1;
}
//----------------------Opening U850 data file----------------------------//
memset(fullFileName,0,strlen(fullFileName));
strcpy(fullFileName,yearFileName);
strcat(fullFileName,"U850.txt");
printf("U50 filename is %s \n",fullFileName);
udataTxt = fopen(fullFileName,"r");
if(udataTxt == NULL) {
perror("Cannot open file with U850 data\n");
return -1;
}
//------------------------Opening V850 data file--------------------------//
memset(fullFileName,0,strlen(fullFileName));
strcpy(fullFileName,yearFileName);
strcat(fullFileName,"V850.txt");
vdataTxt = fopen(fullFileName,"r");
if(vdataTxt == NULL) {
perror("Cannot open file with V850 data\n");
return -1;
}
//-----------------------Opening U10 data file---------------------------//
//Birds will check the wind at the surface therefore the u and v
//at 10m is required
memset(fullFileName,0,strlen(fullFileName));
strcpy(fullFileName,yearFileName);
strcat(fullFileName,"U10.txt");
u10dataTxt = fopen(fullFileName,"r");
if(u10dataTxt == NULL) {
perror("Cannot open file with U10 data\n");
return -1;
}
//-----------------------Opening V10 data file---------------------------//
memset(fullFileName,0,strlen(fullFileName));
strcpy(fullFileName,yearFileName);
strcat(fullFileName,"V10.txt");
v10dataTxt = fopen(fullFileName,"r");
if(v10dataTxt == NULL) {
perror("Cannot open file with V10 data\n");
return -1;
}
//--------------------Opening PRCP data file------------------------------//
memset(fullFileName,0,strlen(fullFileName));
strcpy(fullFileName,yearFileName);
strcat(fullFileName,"PRCP.txt");
precipTxt = fopen(fullFileName,"r");
if(precipTxt == NULL) {
perror("Cannot open file with PRCP data\n");
return -1;
}
//------------------------Opening MSLP data file--------------------------//
memset(fullFileName,0,strlen(fullFileName));
strcpy(fullFileName,yearFileName);
strcat(fullFileName,"MSLP.txt");
pressureTxt = fopen(fullFileName,"r");
if(pressureTxt == NULL) {
perror("Cannot open file with pressure data!\n");
return -1;
}
//--------------------------Opening Land vs Water File---------------------//
lwTxt = fopen("./Lw_and_Dir/land_water_detail.txt","r");
if(lwTxt == NULL) {
perror("Cannot open file with direction data\n");
return -1;
}
//--------------------------Opening Direction file
//--------------------(Example: ext_crop.txt or extP_crop.txt)-------------//
dirTxt = fopen("./Lw_and_Dir/ext_Final_NewCoordSystem.txt","r");
//dirTxt = fopen("ext_crop.txt","r");
if(dirTxt == NULL) {
perror("Cannot open file with direction data\n");
return -1;
}
//-----------------------------Setting Heap Size,printf buffer size etc--------------------------------------------//
size_t limit;
HANDLE_ERROR(cudaDeviceSetLimit(cudaLimitPrintfFifoSize, 500 * 1024 * 1024));
cudaDeviceGetLimit(&limit,cudaLimitPrintfFifoSize);
HANDLE_ERROR(cudaDeviceSetLimit(cudaLimitMallocHeapSize,(size_t)(6 * LAT_SIZE * LONG_SIZE * TIMESTEPS * sizeof(float))));
//--------------------------Memory Allocation for global arrays containing weather data----------------------------//
float *h_row,*h_col;
float *d_row,*d_col;
float *d_udata,*d_vdata,*d_u10data,*d_v10data,*d_lwData;
float *d_dirData,*d_precipData,*d_pressureData;
uint8_t *h_birdStatus,*d_birdStatus;
dirData = (float*) malloc(LAT_SIZE * LONG_SIZE * sizeof(float));
h_row = (float*) malloc(NumOfBirds * (TIMESTEPS + 1) * sizeof(float));
h_col = (float*) malloc(NumOfBirds * (TIMESTEPS + 1) * sizeof(float));
h_birdStatus = (uint8_t*)malloc(NumOfBirds * sizeof(uint8_t));
udata = (float*)malloc(LAT_SIZE * LONG_SIZE * TIMESTEPS * sizeof(float));
vdata = (float*)malloc(LAT_SIZE * LONG_SIZE * TIMESTEPS * sizeof(float));
u10data = (float*)malloc(LAT_SIZE * LONG_SIZE * TIMESTEPS * sizeof(float));
v10data = (float*)malloc(LAT_SIZE * LONG_SIZE * TIMESTEPS * sizeof(float));
precipData = (float*)malloc(LAT_SIZE * LONG_SIZE * TIMESTEPS * sizeof(float));
pressureData = (float*)malloc(LAT_SIZE * LONG_SIZE * TIMESTEPS * sizeof(float));
lwData = (float*) malloc(LAT_SIZE * LONG_SIZE * sizeof(float));
//------------------------------------------------------------------------------------------------------------------//
/*
HANDLE_ERROR(cudaMallocHost((void**)&udata,LAT_SIZE * LONG_SIZE * TIMESTEPS * sizeof(float)));
HANDLE_ERROR(cudaMallocHost((void**)&vdata,LAT_SIZE * LONG_SIZE * TIMESTEPS * sizeof(float)));
HANDLE_ERROR(cudaMallocHost((void**)&u10data,LAT_SIZE * LONG_SIZE * TIMESTEPS * sizeof(float)));
HANDLE_ERROR(cudaMallocHost((void**)&v10data,LAT_SIZE * LONG_SIZE * TIMESTEPS * sizeof(float)));
HANDLE_ERROR(cudaMallocHost((void**)&precipData,LAT_SIZE * LONG_SIZE * TIMESTEPS * sizeof(float)));
HANDLE_ERROR(cudaMallocHost((void**)&pressureData,LAT_SIZE * LONG_SIZE * TIMESTEPS * sizeof(float)));
HANDLE_ERROR(cudaMallocHost((void**)&lwData,LAT_SIZE * LONG_SIZE * sizeof(float)));
*/
printf("Size of large arrays is %zd\n",sizeof(udata)/sizeof(udata[0]));
printf("Size of large arrays is %ld\n",sizeof(udata)/sizeof(float));
printf("Size of large arrays is %d\n",sizeof(udata)/sizeof(float));
int ii;
for(ii=0;ii<(NumOfBirds * (TIMESTEPS + 1));ii++){
*(h_row + ii) = starting_row;
*(h_col + ii) = starting_col;
}
for(ii=0;ii<NumOfBirds;ii++){
h_birdStatus[ii] = (uint8_t)1;
}
//--------------------------Initializing the structures-------------------------------------------------------------------//
inpStruct[0].fp = vdataTxt;
inpStruct[0].inpVals = vdata;
inpStruct[1].fp = udataTxt;
inpStruct[1].inpVals = udata;
inpStruct[2].fp = v10dataTxt;
inpStruct[2].inpVals = v10data;
inpStruct[3].fp = u10dataTxt;
inpStruct[3].inpVals = u10data;
inpStruct[4].fp = precipTxt;
inpStruct[4].inpVals = precipData;
inpStruct[5].fp = pressureTxt;
inpStruct[5].inpVals = pressureData;
inpStruct[6].fp = lwTxt;
inpStruct[6].inpVals = lwData;
inpStruct[7].fp = dirTxt;
inpStruct[7].inpVals = dirData;
/** Using pthreads to read from the files in parallel**/
pthread_t threads[8];
int i,j;
for(i=0;i<8;i++){
if(pthread_create(&threads[i],NULL,read_dataFiles,(void*)&inpStruct[i]) != 0){
fprintf(stderr,"ERROR: Thread creation using pthreads failed\n");
return -1;
}
}
for(i=0;i<8;i++){
if(pthread_join(threads[i],NULL)!=0){
fprintf(stderr,"ERROR: Thread join failed\n");
return -1;
}
}
printf("End of parallel data read\n");
//-----------------------------------Getting Random Values-------------------------------------------//
int DeviceCount;
float *rand_norm_nums;
curandState_t* states;
/** Getting the total number of devices available **/
HANDLE_ERROR(cudaGetDeviceCount(&DeviceCount));
HANDLE_ERROR(cudaSetDevice(DeviceCount - 1));
HANDLE_ERROR(cudaDeviceReset());
HANDLE_ERROR(cudaMalloc((void**)&states,NumOfBirds * 2 * TIMESTEPS * sizeof(curandState_t)));
HANDLE_ERROR(cudaMalloc((void**)&rand_norm_nums,NumOfBirds * 2 * TIMESTEPS * sizeof(float)));
//Making each block have total threads of 32
//GridSize setup such that total y grid is of size NumOfBirds and x grid is of size TIMESTEPS
dim3 blockSize(32,1,1);
dim3 gridSize(((TIMESTEPS * 2) + 31)/32,NumOfBirds,1);
setup_kernel<<<gridSize,blockSize>>>(time(NULL),states);
HANDLE_ERROR(cudaDeviceSynchronize());
generate_kernel<<<gridSize,blockSize>>>(states,rand_norm_nums);
//Do not need to get them back at all; Will have to send it back to GPU
// cudaMemcpy(cpu_nums,rand_norm_nums, LAT_SIZE * LONG_SIZE * sizeof(float),cudaMemcpyDeviceToHost);
// cudaMemcpy(dir_u,d_u_dirAngle,LAT_SIZE * LONG_SIZE * sizeof(float),cudaMemcpyDeviceToHost);
// cudaMemcpy(dir_v,d_v_dirAngle,LAT_SIZE * LONG_SIZE * sizeof(float),cudaMemcpyDeviceToHost);
/* print them out */
/* for ( j = 0; j < LAT_SIZE; j++) {
for( i = 0;i<LONG_SIZE;i++){
//printf("%f ", cpu_nums[j*LONG_SIZE + i]);
if(i == LONG_SIZE -1) {
printf("%f\n",dir_u[j * LAT_SIZE + i]);
}
else {
printf("%f ",dir_u[j * LAT_SIZE + i]);
}
}
// printf("\n");
}
*/
HANDLE_ERROR(cudaDeviceSynchronize());
// free the memory we allocated for the states
HANDLE_ERROR(cudaFree(states));
printf("Random number generator is working\n");
//-------------------------------------------------------------------------------------------------------------------------//
HANDLE_ERROR(cudaMalloc((void**)&d_row,NumOfBirds * (TIMESTEPS + 1 ) * sizeof(float)));
HANDLE_ERROR(cudaMalloc((void**)&d_col,NumOfBirds * (TIMESTEPS + 1 ) * sizeof(float)));
HANDLE_ERROR(cudaMalloc((void**)&d_lwData,LAT_SIZE * LONG_SIZE * sizeof(float)));
HANDLE_ERROR(cudaMalloc((void**)&d_DirData,LAT_SIZE * LONG_SIZE * sizeof(float)));
HANDLE_ERROR(cudaMemcpy(d_row,h_row,NumOfBirds * (TIMESTEPS + 1 ) * sizeof(float),cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_col,h_col,NumOfBirds * (TIMESTEPS + 1 ) * sizeof(float),cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_lwData,lwData,LAT_SIZE * LONG_SIZE * sizeof(float),cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_dirData,dirData,LAT_SIZE * LONG_SIZE * sizeof(float),cudaMemcpyHostToDevice));
//-------------------------------------------------------------------------------------------------------------//
size_t MemoryEachVar,DataPerTransfer,SizePerTimestep;
int TimestepsPerTransfer,TimestepsLastTransfer,DaysPerTransfer;
size_t MemoryRemaining,TotalMemory;
HANDLE_ERROR(cudaSetDevice(DeviceCount - 1));
// Getting the total remaining memory that the device can allocate
HANDLE_ERROR(cudaMemGetInfo(&MemoryRemaining,&TotalMemory));
MemoryRemaining -= 2*NumOfBirds* (TIMESTEPS + 1) * sizeof(float);
MemoryRemaining -= NumOfBirds * sizeof(uint8_t);
//Need to make sure 100MB is free!! For some reason
MemoryRemaining -= 100 * 1000000;
printf("Total mem: %zd,Free mem: %zd\n",TotalMemory,MemoryRemaining);
printf("\n\n\t\t Total Memory remaining is: %zd \n",MemoryRemaining);
//Memory that each variable gets every timestep
MemoryEachVar = MemoryRemaining/NUM_DATA_FILES;
printf("\t\t Memory for each variable is: %zd \n",MemoryEachVar);
// Need to send data per timestep so has to be a multiple of LAT_SIZE *LONG_SIZE* sizeof(float) * 24
//Can also be called as Minimum_Size_Per_Timestep; Sending data so that it is according to days
SizePerTimestep = LAT_SIZE * LONG_SIZE * TIMESTEPS_PER_DAY * sizeof(float);
// To get a number divisible by SizePerTimestep
//DataPerTransfer is the data size to be transferred for each variable
//Example, if 100MB then 100MB for each of the vars is transferred each time
DataPerTransfer = (MemoryEachVar/SizePerTimestep) * SizePerTimestep;
DaysPerTransfer = DataPerTransfer/SizePerTimestep;
TimestepsPerTransfer = DaysPerTransfer * TIMESTEPS_PER_DAY;
printf("\t\tChecking Division: %zd\n",MemoryEachVar/SizePerTimestep);
printf("\t\t Total Timesteps per Transfer of data is: %ld \n",TimestepsPerTransfer);
printf("\t\tData per transfer is %zd\n",DataPerTransfer);
//------------------------------------Getting the size of data needed per transfer---------------------------------------------//
int divisible,Transfers;
long int DataLastTransfer;//Per variable
Transfers = (TOTAL_DAYS * TIMESTEPS_PER_DAY) / TimestepsPerTransfer;
divisible = (TOTAL_DAYS*TIMESTEPS_PER_DAY) % TimestepsPerTransfer;
if(divisible != 0){
Transfers++;
}
printf("Total Transfers required: %ld\n",Transfers);
/** Tota bytes transfered per data transfer**/
const int TotalTransfers = Transfers;
TimestepsLastTransfer = (TOTAL_DAYS*TIMESTEPS_PER_DAY) - (Transfers-1)*TimestepsPerTransfer;
/*
cudaStream_t stream[TotalTransfers-1];
for(i=0;i<TotalTransfers-1;i++){
HANDLE_ERROR(cudaStreamCreate(&stream[i]));
}
*/
DataLastTransfer = TOTAL_DAYS * TIMESTEPS_PER_DAY * LAT_SIZE * LONG_SIZE * sizeof(float)
- DataPerTransfer * (TotalTransfers-1);
//---------------------------------------Memory allocation per transfer----------------------------------------------------------//
long int start_timestep,cur_timestep,max_timesteps,ptrOffset;
ptrOffset = 0;
//min_timesteps = offset_into_data;
//printf("Current timestep variable is:%ld\n",min_timesteps);
//return 0;
cur_timestep = offset_into_data;
//printf("cur_timestep = offset_into_data; Value in cur_timestep is: %ld\n",cur_timestep);
for(i=0;i<TotalTransfers-1;i++){
HANDLE_ERROR(cudaSetDevice(DeviceCount - 1));
HANDLE_ERROR(cudaMemGetInfo(&MemoryRemaining,&TotalMemory));
printf("Total mem: %zd,Free mem(Before any allocation): %zd\n",TotalMemory,MemoryRemaining);
HANDLE_ERROR(cudaMemGetInfo(&MemoryRemaining,&TotalMemory));
printf("Total mem: %zd,Free mem(After SetDevice): %zd\n",TotalMemory,MemoryRemaining);
//HANDLE_ERROR(cudaStreamCreate(&stream[i]));
HANDLE_ERROR(cudaMemGetInfo(&MemoryRemaining,&TotalMemory));
printf("Total mem: %zd,Free mem(After Stream Create): %zd\n",TotalMemory,MemoryRemaining);
HANDLE_ERROR(cudaMalloc((void**)&d_udata,DataPerTransfer));
HANDLE_ERROR(cudaMemGetInfo(&MemoryRemaining,&TotalMemory));
printf("Total mem: %zd,Free mem(After udata allocation): %zd\n",TotalMemory,MemoryRemaining);
HANDLE_ERROR(cudaMalloc((void**)&d_vdata,DataPerTransfer));
HANDLE_ERROR(cudaMemGetInfo(&MemoryRemaining,&TotalMemory));
printf("Total mem: %zd,Free mem(After vdata allocation): %zd\n",TotalMemory,MemoryRemaining);
HANDLE_ERROR(cudaMalloc((void**)&d_u10data,DataPerTransfer));
HANDLE_ERROR(cudaMemGetInfo(&MemoryRemaining,&TotalMemory));
printf("Total mem: %zd,Free mem(After u10data allocation): %zd\n",TotalMemory,MemoryRemaining);
HANDLE_ERROR(cudaMalloc((void**)&d_v10data,DataPerTransfer));
HANDLE_ERROR(cudaMemGetInfo(&MemoryRemaining,&TotalMemory));
printf("Total mem: %zd,Free mem(After v10data allocation): %zd\n",TotalMemory,MemoryRemaining);
HANDLE_ERROR(cudaMalloc((void**)&d_precipData,DataPerTransfer));
HANDLE_ERROR(cudaMemGetInfo(&MemoryRemaining,&TotalMemory));
printf("Total mem: %zd,Free mem(After precipData allocation): %zd\n",TotalMemory,MemoryRemaining);
HANDLE_ERROR(cudaMalloc((void**)&d_pressureData,DataPerTransfer));
HANDLE_ERROR(cudaMemGetInfo(&MemoryRemaining,&TotalMemory));
printf("Total mem: %zd,Free mem(After pressureData allocation): %zd\n",TotalMemory,MemoryRemaining);
HANDLE_ERROR(cudaMalloc((void**)&d_birdStatus,NumOfBirds * sizeof(uint8_t)));
HANDLE_ERROR(cudaMemGetInfo(&MemoryRemaining,&TotalMemory));
printf("Total mem: %zd,Free mem(After birdStatus allocation): %zd\n",TotalMemory,MemoryRemaining);
HANDLE_ERROR(cudaDeviceSynchronize());
printf("After all the host allocations %d\n",i);
//-----------------------------------------Initializing gridSize and block Size-------------------------------//
//HANDLE_ERROR(cudaSetDevice(DeviceCount - 1));
dim3 gridSize((NumOfBirds + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK,1,1);
dim3 blockSize(THREADS_PER_BLOCK,1,1);
HANDLE_ERROR(cudaMemGetInfo(&MemoryRemaining,&TotalMemory));
printf("Total mem: %zd,Free mem(After grid and block init): %zd\n",TotalMemory,MemoryRemaining);
//-----------------------------------------Copying data from CPU to GPU------------------------------------------------//
HANDLE_ERROR(cudaSetDevice(DeviceCount - 1));
HANDLE_ERROR(cudaMemcpy(d_udata,udata+ptrOffset,DataPerTransfer,cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_vdata,vdata+ptrOffset,DataPerTransfer,cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_u10data,u10data+ptrOffset,DataPerTransfer,cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_v10data,v10data+ptrOffset,DataPerTransfer,cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_precipData,precipData+ptrOffset,DataPerTransfer,cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_pressureData,pressureData+ptrOffset,DataPerTransfer,cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_birdStatus,h_birdStatus,NumOfBirds * sizeof(uint8_t),cudaMemcpyHostToDevice));
/*
HANDLE_ERROR(cudaMemcpyAsync(d_lwData,lwData,LAT_SIZE * LONG_SIZE * sizeof(float),cudaMemcpyHostToDevice,stream[i]));
HANDLE_ERROR(cudaMemGetInfo(&MemoryRemaining,&TotalMemory));
printf("Total mem: %zd,Free mem(After grid and block init): %zd\n",TotalMemory,MemoryRemaining);
HANDLE_ERROR(cudaMemcpyAsync(d_udata,udata + ptrOffset,DataPerTransfer,cudaMemcpyHostToDevice,stream[i]));
HANDLE_ERROR(cudaMemcpyAsync(d_vdata,(vdata+ptrOffset),DataPerTransfer,cudaMemcpyHostToDevice,stream[i]));
HANDLE_ERROR(cudaMemcpyAsync(d_u10data,(u10data+ptrOffset),DataPerTransfer,cudaMemcpyHostToDevice,stream[i]));
HANDLE_ERROR(cudaMemcpyAsync(d_v10data,(v10data+ptrOffset),DataPerTransfer,cudaMemcpyHostToDevice,stream[i]));
HANDLE_ERROR(cudaMemcpyAsync(d_precipData,(precipData+ptrOffset),DataPerTransfer,cudaMemcpyHostToDevice,stream[i]));
HANDLE_ERROR(cudaMemcpyAsync(d_pressureData,(pressureData+ptrOffset),DataPerTransfer,cudaMemcpyHostToDevice,stream[i]));
*/
//-----------------------------------------Calling the Kernel-----------------------------------------------------------//
//All of these are inclusive
//If TimeStepsPerTransfer is 9, then they would be: 0-8, 9-17, 18-26,...
max_timesteps = ((i+1) * TimestepsPerTransfer) - 1;
printf("Current timestep variable is:%ld\n",cur_timestep);
printf("Max timestep is: %ld\n",max_timesteps);
printf("Offset into data is:%ld\n",offset_into_data);
/*if((offset_into_data <= max_timesteps) && (i > 0)){
cur_timestep = i * TimestepsPerTransfer;
//cur_timestep = offset_into_data;
}else{
cur_timestep = offset_into_data;
}
*/
start_timestep = i * TimestepsPerTransfer;
if((max_timesteps - offset_into_data) > TimestepsPerTransfer){
cur_timestep = start_timestep;
}else{
cur_timestep = offset_into_data;
}
printf("Current timestep variable after checking if offset less than max_timesteps is:%ld\n",cur_timestep);
bird_movement<<<gridSize,blockSize>>>(d_row,d_col,NumOfBirds,start_timestep,cur_timestep,max_timesteps,d_udata,d_vdata,
d_u10data,d_v10data,d_dirData,rand_norm_nums,d_precipData,d_pressureData,d_lwData,d_birdStatus);
//HANDLE_ERROR(cudaStreamSynchronize(stream[i]));
HANDLE_ERROR(cudaDeviceSynchronize());
//---------------------------------Freeing allocated memory in GPU and pinned memory in CPU-------------------//
printf("Before freeing;Inside the loop\n");
HANDLE_ERROR(cudaMemcpy(h_birdStatus,d_birdStatus,NumOfBirds * sizeof(uint8_t),cudaMemcpyDeviceToHost));
//HANDLE_ERROR(cudaStreamDestroy(stream[i]));
// HANDLE_ERROR(cudaFree(d_lwData));
//HANDLE_ERROR(cudaFree(d_birdStatus));
HANDLE_ERROR(cudaFree(d_udata));
HANDLE_ERROR(cudaFree(d_vdata));
HANDLE_ERROR(cudaFree(d_u10data));
HANDLE_ERROR(cudaFree(d_v10data));
HANDLE_ERROR(cudaFree(d_precipData));
HANDLE_ERROR(cudaFree(d_pressureData));
//ptrOffset+= DataPerTransfer/sizeof(float);
ptrOffset = (DataPerTransfer/sizeof(float)) * (i + 1);
printf("After all freeing %d\n",i);
}
/*
HANDLE_ERROR(cudaMemcpy(h_row,d_row,NumOfBirds * (TIMESTEPS + 1 ) * sizeof(float),cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaMemcpy(h_col,d_col,NumOfBirds * (TIMESTEPS + 1 ) * sizeof(float),cudaMemcpyDeviceToHost));
for(i = 0;i < NumOfBirds * (TIMESTEPS + 1); i++ ){
printf("%f ",h_row[i]);
if(i == TIMESTEPS){
printf("%f \n",h_row[i]);
}
}
printf("\n\n");
for(i = 0;i < NumOfBirds * (TIMESTEPS + 1); i++ ){
printf("%f ",h_col[i]);
if(i == TIMESTEPS){
printf("%f \n",h_col[i]);
}
}
*/
//-----------------------------------------------------------------------------------------------------------//
//----------------------------------------------------Last Iteration-----------------------------------------//
//-----------------------------------------------------------------------------------------------------------//
// Last iteration where the size might not be the same as others
long int DataRemaining;
DataRemaining = LONG_SIZE * LAT_SIZE * TIMESTEPS * sizeof(float) - (DataPerTransfer * (TotalTransfers-1));
DataRemaining = DataRemaining/NUM_DATA_FILES;
start_timestep = (TotalTransfers - 1) * TimestepsPerTransfer;
max_timesteps = TIMESTEPS;
ptrOffset = (DataPerTransfer/sizeof(float)) * (TotalTransfers - 1);
//----------------------------------------------------------------------------------------//
HANDLE_ERROR(cudaSetDevice(DeviceCount - 1));
HANDLE_ERROR(cudaMemGetInfo(&MemoryRemaining,&TotalMemory));
printf("Total mem: %zd,Free mem(Before any allocation): %zd\n",TotalMemory,MemoryRemaining);
HANDLE_ERROR(cudaMemGetInfo(&MemoryRemaining,&TotalMemory));
printf("Total mem: %zd,Free mem(After SetDevice): %zd\n",TotalMemory,MemoryRemaining);
//HANDLE_ERROR(cudaStreamCreate(&stream[i]));
HANDLE_ERROR(cudaMemGetInfo(&MemoryRemaining,&TotalMemory));
printf("Total mem: %zd,Free mem(After Stream Create): %zd\n",TotalMemory,MemoryRemaining);
HANDLE_ERROR(cudaMalloc((void**)&d_udata,DataRemaining));
HANDLE_ERROR(cudaMemGetInfo(&MemoryRemaining,&TotalMemory));
printf("Total mem: %zd,Free mem(After udata allocation): %zd\n",TotalMemory,MemoryRemaining);
HANDLE_ERROR(cudaMalloc((void**)&d_vdata,DataRemaining));
HANDLE_ERROR(cudaMemGetInfo(&MemoryRemaining,&TotalMemory));
printf("Total mem: %zd,Free mem(After vdata allocation): %zd\n",TotalMemory,MemoryRemaining);
HANDLE_ERROR(cudaMalloc((void**)&d_u10data,DataRemaining));
HANDLE_ERROR(cudaMemGetInfo(&MemoryRemaining,&TotalMemory));
printf("Total mem: %zd,Free mem(After u10data allocation): %zd\n",TotalMemory,MemoryRemaining);
HANDLE_ERROR(cudaMalloc((void**)&d_v10data,DataRemaining));
HANDLE_ERROR(cudaMemGetInfo(&MemoryRemaining,&TotalMemory));
printf("Total mem: %zd,Free mem(After v10data allocation): %zd\n",TotalMemory,MemoryRemaining);
HANDLE_ERROR(cudaMalloc((void**)&d_precipData,DataRemaining));
HANDLE_ERROR(cudaMemGetInfo(&MemoryRemaining,&TotalMemory));
printf("Total mem: %zd,Free mem(After precipData allocation): %zd\n",TotalMemory,MemoryRemaining);
HANDLE_ERROR(cudaMalloc((void**)&d_pressureData,DataRemaining));
HANDLE_ERROR(cudaMemGetInfo(&MemoryRemaining,&TotalMemory));
printf("Total mem: %zd,Free mem(After pressureData allocation): %zd\n",TotalMemory,MemoryRemaining);
//HANDLE_ERROR(cudaMalloc((void**)&d_birdStatus,NumOfBirds * sizeof(uint8_t)));
//HANDLE_ERROR(cudaMemGetInfo(&MemoryRemaining,&TotalMemory));
//printf("Total mem: %zd,Free mem(After pressureData allocation): %zd\n",TotalMemory,MemoryRemaining);
HANDLE_ERROR(cudaDeviceSynchronize());
printf("After all the host allocations %d\n",i);
//-----------------------------------------Initializing gridSize and block Size-------------------------------//
printf("Before grid and block size allocations\n");
//dim3 gridSize2((NumOfBirds + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK,1,1);
//dim3 blockSize2(THREADS_PER_BLOCK,1,1);
printf("After grid and block size allocations\n");
//-----------------------------------------Copying data from CPU to GPU----------------------------------------//
HANDLE_ERROR(cudaSetDevice(DeviceCount - 1));
HANDLE_ERROR(cudaMemcpy(d_udata,udata+ptrOffset,DataRemaining,cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_vdata,vdata+ptrOffset,DataRemaining,cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_u10data,u10data+ptrOffset,DataRemaining,cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_v10data,v10data+ptrOffset,DataRemaining,cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_precipData,precipData+ptrOffset,DataRemaining,cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_pressureData,pressureData+ptrOffset,DataRemaining,cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_birdStatus,h_birdStatus,NumOfBirds * sizeof(uint8_t),cudaMemcpyHostToDevice));
//-----------------------------------------Calling the Kernel-------------------------------------------------//
if((max_timesteps - offset_into_data) > TimestepsLastTransfer){
cur_timestep = start_timestep;
}else{
cur_timestep = offset_into_data;
}
printf("Before calling the kernel\n");
bird_movement<<<gridSize,blockSize>>>(d_row,d_col,NumOfBirds,start_timestep,cur_timestep,max_timesteps,d_udata,d_vdata,
d_u10data,d_v10data,d_dirData,rand_norm_nums,,d_precipData,d_pressureData,d_lwData,d_birdStatus);
HANDLE_ERROR(cudaDeviceSynchronize());
HANDLE_ERROR(cudaMemcpy(h_row,d_row,NumOfBirds * (TIMESTEPS + 1 ) * sizeof(float),cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaMemcpy(h_col,d_col,NumOfBirds * (TIMESTEPS + 1 ) * sizeof(float),cudaMemcpyDeviceToHost));
for(i = 0;i < NumOfBirds * (TIMESTEPS + 1); i++ ){
printf("%f ",h_row[i]);
if(((i+1) % (TIMESTEPS + 1)) == 0){
printf("%f \n",h_row[i]);
}
}
printf("\n\n");
for(i = 0;i < NumOfBirds * (TIMESTEPS + 1); i++ ){
printf("%f ",h_col[i]);
if(((i+1) % (TIMESTEPS + 1)) == 0){
printf("%f \n",h_col[i]);
}
}
//-----------------------------------------------Freeing allocated memory--------------------------------------//
// HANDLE_ERROR(cudaStreamDestroy(stream[0]));
HANDLE_ERROR(cudaFree(rand_norm_nums));
HANDLE_ERROR(cudaFree(d_birdStatus));
HANDLE_ERROR(cudaFree(d_udata));
HANDLE_ERROR(cudaFree(d_vdata));
HANDLE_ERROR(cudaFree(d_u10data));
HANDLE_ERROR(cudaFree(d_v10data));
HANDLE_ERROR(cudaFree(d_precipData));
HANDLE_ERROR(cudaFree(d_pressureData));
/*
HANDLE_ERROR(cudaFreeHost(udata));
HANDLE_ERROR(cudaFreeHost(vdata));
HANDLE_ERROR(cudaFreeHost(u10data));
HANDLE_ERROR(cudaFreeHost(v10data));
HANDLE_ERROR(cudaFreeHost(precipData));
HANDLE_ERROR(cudaFreeHost(pressureData));
HANDLE_ERROR(cudaFreeHost(lwData));
*/
free(dirData);
free(udata);
free(vdata);
free(u10data);
free(v10data);
free(precipData);
free(pressureData);
free(lwData);
free(h_birdStatus);
/*
HANDLE_ERROR(cudaFree(d_lwData));
HANDLE_ERROR(cudaFree(d_u_dirAngle));
HANDLE_ERROR(cudaFree(d_v_dirAngle));
printf("After freeing everything\n");
*/
HANDLE_ERROR(cudaFree(d_row));
HANDLE_ERROR(cudaFree(d_col));
free(h_row);
free(h_col);
//free(lwData);
//free(dirData);
fclose(dirTxt);
fclose(posdataTxt);
fclose(udataTxt);
fclose(vdataTxt);
fclose(v10dataTxt);
fclose(u10dataTxt);
fclose(precipTxt);
fclose(pressureTxt);
fclose(lwTxt);
printf("End\n");
return 0;
}
|
8c38f534e17dece055dcea268e85d799430b4860.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* CUDA Kernels for Expectation Maximization with Gaussian Mixture Models
*
* Author: Andrew Pangborn
*
* Department of Computer Engineering
* Rochester Institute of Technology
*/
#ifndef _TEMPLATE_KERNEL_H_
#define _TEMPLATE_KERNEL_H_
#include "gaussian.h"
/*
* Compute the multivariate mean of the FCS data
*/
__device__ void mvtmeans(float* fcs_data, int num_dimensions, int num_events, float* means) {
int tid = threadIdx.x;
if(tid < num_dimensions) {
means[tid] = 0.0;
// Sum up all the values for each dimension
for(int i = 0; i < num_events; i++) {
means[tid] += fcs_data[i*num_dimensions+tid];
}
// Divide by the # of elements to get the average
means[tid] /= (float) num_events;
}
}
__device__ void averageVariance(float* fcs_data, float* means, int num_dimensions, int num_events, float* avgvar) {
int tid = threadIdx.x;
__shared__ float variances[NUM_DIMENSIONS];
__shared__ float total_variance;
// Compute average variance for each dimension
if(tid < num_dimensions) {
variances[tid] = 0.0;
// Sum up all the variance
for(int i = 0; i < num_events; i++) {
// variance = (data - mean)^2
variances[tid] += (fcs_data[i*num_dimensions + tid])*(fcs_data[i*num_dimensions + tid]);
}
variances[tid] /= (float) num_events;
variances[tid] -= means[tid]*means[tid];
}
__syncthreads();
if(tid == 0) {
total_variance = 0.0;
for(int i=0; i<num_dimensions;i++) {
total_variance += variances[i];
}
*avgvar = total_variance / (float) num_dimensions;
}
}
// Inverts an NxN matrix 'data' stored as a 1D array in-place
// 'actualsize' is N
// Computes the log of the determinant of the origianl matrix in the process
__device__ void invert(float* data, int actualsize, float* log_determinant) {
int maxsize = actualsize;
int n = actualsize;
if(threadIdx.x == 0) {
*log_determinant = 0.0;
// sanity check
if (actualsize == 1) {
*log_determinant = logf(data[0]);
data[0] = 1.0 / data[0];
} else {
for (int i=1; i < actualsize; i++) data[i] /= data[0]; // normalize row 0
for (int i=1; i < actualsize; i++) {
for (int j=i; j < actualsize; j++) { // do a column of L
float sum = 0.0;
for (int k = 0; k < i; k++)
sum += data[j*maxsize+k] * data[k*maxsize+i];
data[j*maxsize+i] -= sum;
}
if (i == actualsize-1) continue;
for (int j=i+1; j < actualsize; j++) { // do a row of U
float sum = 0.0;
for (int k = 0; k < i; k++)
sum += data[i*maxsize+k]*data[k*maxsize+j];
data[i*maxsize+j] =
(data[i*maxsize+j]-sum) / data[i*maxsize+i];
}
}
for(int i=0; i<actualsize; i++) {
*log_determinant += logf(fabs(data[i*n+i]));
}
for ( int i = 0; i < actualsize; i++ ) // invert L
for ( int j = i; j < actualsize; j++ ) {
float x = 1.0;
if ( i != j ) {
x = 0.0;
for ( int k = i; k < j; k++ )
x -= data[j*maxsize+k]*data[k*maxsize+i];
}
data[j*maxsize+i] = x / data[j*maxsize+j];
}
for ( int i = 0; i < actualsize; i++ ) // invert U
for ( int j = i; j < actualsize; j++ ) {
if ( i == j ) continue;
float sum = 0.0;
for ( int k = i; k < j; k++ )
sum += data[k*maxsize+j]*( (i==k) ? 1.0 : data[i*maxsize+k] );
data[i*maxsize+j] = -sum;
}
for ( int i = 0; i < actualsize; i++ ) // final inversion
for ( int j = 0; j < actualsize; j++ ) {
float sum = 0.0;
for ( int k = ((i>j)?i:j); k < actualsize; k++ )
sum += ((j==k)?1.0:data[j*maxsize+k])*data[k*maxsize+i];
data[j*maxsize+i] = sum;
}
}
}
}
__device__ void compute_pi(clusters_t* clusters, int num_clusters) {
__shared__ float sum;
if(threadIdx.x == 0) {
sum = 0.0;
for(int i=0; i<num_clusters; i++) {
sum += clusters->N[i];
}
}
__syncthreads();
for(int c = threadIdx.x; c < num_clusters; c += blockDim.x) {
if(clusters->N[c] < 0.5f) {
clusters->pi[threadIdx.x] = 1e-10;
} else {
clusters->pi[threadIdx.x] = clusters->N[c] / sum;
}
}
__syncthreads();
}
__device__ void compute_constants(clusters_t* clusters, int num_clusters, int num_dimensions) {
int tid = threadIdx.x;
int num_threads = blockDim.x;
int num_elements = num_dimensions*num_dimensions;
__shared__ float determinant_arg; // only one thread computes the inverse so we need a shared argument
float log_determinant;
__shared__ float matrix[NUM_DIMENSIONS*NUM_DIMENSIONS];
// Invert the matrix for every cluster
int c = blockIdx.x;
// Copy the R matrix into shared memory for doing the matrix inversion
for(int i=tid; i<num_elements; i+= num_threads ) {
matrix[i] = clusters->R[c*num_dimensions*num_dimensions+i];
}
__syncthreads();
#if DIAG_ONLY
if(tid == 0) {
determinant_arg = 1.0f;
for(int i=0; i < num_dimensions; i++) {
determinant_arg *= matrix[i*num_dimensions+i];
matrix[i*num_dimensions+i] = 1.0f / matrix[i*num_dimensions+i];
}
determinant_arg = logf(determinant_arg);
}
#else
invert(matrix,num_dimensions,&determinant_arg);
#endif
__syncthreads();
log_determinant = determinant_arg;
// Copy the matrx from shared memory back into the cluster memory
for(int i=tid; i<num_elements; i+= num_threads) {
clusters->Rinv[c*num_dimensions*num_dimensions+i] = matrix[i];
}
__syncthreads();
// Compute the constant
// Equivilent to: log(1/((2*PI)^(M/2)*det(R)^(1/2)))
// This constant is used in all E-step likelihood calculations
if(tid == 0) {
clusters->constant[c] = -num_dimensions*0.5f*logf(2.0f*PI) - 0.5f*log_determinant;
}
}
/*
* Computes the constant, pi, Rinv for each cluster
*
* Needs to be launched with the number of blocks = number of clusters
*/
__global__ void
constants_kernel(clusters_t* clusters, int num_clusters, int num_dimensions) {
// compute_constants(clusters,num_clusters,num_dimensions);
int tid = threadIdx.x;
int bid = blockIdx.x;
int num_threads = blockDim.x;
int num_elements = num_dimensions*num_dimensions;
__shared__ float determinant_arg; // only one thread computes the inverse so we need a shared argument
__shared__ float sum;
__shared__ float matrix[NUM_DIMENSIONS*NUM_DIMENSIONS];
float log_determinant;
// Invert the matrix for every cluster
// Copy the R matrix into shared memory for doing the matrix inversion
for(int i=tid; i<num_elements; i+= num_threads ) {
matrix[i] = clusters->R[bid*num_dimensions*num_dimensions+i];
}
__syncthreads();
#if DIAG_ONLY
if(tid == 0) {
determinant_arg = 1.0f;
for(int i=0; i < num_dimensions; i++) {
determinant_arg *= matrix[i*num_dimensions+i];
matrix[i*num_dimensions+i] = 1.0f / matrix[i*num_dimensions+i];
}
determinant_arg = logf(determinant_arg);
}
#else
invert(matrix,num_dimensions,&determinant_arg);
#endif
__syncthreads();
log_determinant = determinant_arg;
// Copy the matrx from shared memory back into the cluster memory
for(int i=tid; i<num_elements; i+= num_threads) {
clusters->Rinv[bid*num_dimensions*num_dimensions+i] = matrix[i];
}
__syncthreads();
// Compute the constant
// Equivilent to: log(1/((2*PI)^(M/2)*det(R)^(1/2)))
// This constant is used in all E-step likelihood calculations
if(tid == 0) {
clusters->constant[bid] = -num_dimensions*0.5f*logf(2.0f*PI) - 0.5f*log_determinant;
}
__syncthreads();
if(bid == 0) {
// compute_pi(clusters,num_clusters);
if(tid == 0) {
sum = 0.0;
for(int i=0; i<num_clusters; i++) {
sum += clusters->N[i];
}
}
__syncthreads();
for(int i = tid; i < num_clusters; i += num_threads) {
if(clusters->N[i] < 0.5f) {
clusters->pi[tid] = 1e-10;
} else {
clusters->pi[tid] = clusters->N[i] / sum;
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
//! @param fcs_data FCS data: [num_events]
//! @param clusters Clusters: [num_clusters]
//! @param num_dimensions number of dimensions in an FCS event
//! @param num_events number of FCS events
////////////////////////////////////////////////////////////////////////////////
__global__ void
seed_clusters_kernel( const float* fcs_data,
clusters_t* clusters,
const int num_dimensions,
const int num_clusters,
const int num_events)
{
int tid = threadIdx.x;
int num_threads = blockDim.x;
int row, col;
float seed;
// Number of elements in the covariance matrix
int num_elements = num_dimensions*num_dimensions;
// shared memory
__shared__ float means[NUM_DIMENSIONS];
__shared__ float avgvar;
__shared__ float variances[NUM_DIMENSIONS];
__shared__ float total_variance;
// Compute the means
// mvtmeans(fcs_data, num_dimensions, num_events, means);
if(tid < num_dimensions) {
means[tid] = 0.0;
// Sum up all the values for each dimension
for(int i = 0; i < num_events; i++) {
means[tid] += fcs_data[i*num_dimensions+tid];
}
// Divide by the # of elements to get the average
means[tid] /= (float) num_events;
}
__syncthreads();
// Compute the average variance
// averageVariance(fcs_data, means, num_dimensions, num_events, &avgvar);
// Compute average variance for each dimension
if(tid < num_dimensions) {
variances[tid] = 0.0;
// Sum up all the variance
for(int i = 0; i < num_events; i++) {
// variance = (data - mean)^2
variances[tid] += (fcs_data[i*num_dimensions + tid])*(fcs_data[i*num_dimensions + tid]);
}
variances[tid] /= (float) num_events;
variances[tid] -= means[tid]*means[tid];
}
__syncthreads();
if(tid == 0) {
total_variance = 0.0;
for(int i=0; i<num_dimensions;i++) {
total_variance += variances[i];
}
avgvar = total_variance / (float) num_dimensions;
}
__syncthreads();
if(num_clusters > 1) {
seed = (num_events-1.0f)/(num_clusters-1.0f);
} else {
seed = 0.0;
}
// Seed the pi, means, and covariances for every cluster
for(int c=0; c < num_clusters; c++) {
if(tid < num_dimensions) {
clusters->means[c*num_dimensions+tid] = fcs_data[((int)(c*seed))*num_dimensions+tid];
}
for(int i=tid; i < num_elements; i+= num_threads) {
// Add the average variance divided by a constant, this keeps the cov matrix from becoming singular
row = (i) / num_dimensions;
col = (i) % num_dimensions;
if(row == col) {
clusters->R[c*num_dimensions*num_dimensions+i] = 1.0f;
} else {
clusters->R[c*num_dimensions*num_dimensions+i] = 0.0f;
}
}
if(tid == 0) {
clusters->pi[c] = 1.0f/((float)num_clusters);
clusters->N[c] = ((float) num_events) / ((float)num_clusters);
clusters->avgvar[c] = avgvar / COVARIANCE_DYNAMIC_RANGE;
}
}
}
__device__ float parallelSum(float* data, const unsigned int ndata) {
const unsigned int tid = threadIdx.x;
float t;
__syncthreads();
// Butterfly sum. ndata MUST be a power of 2.
for(unsigned int bit = ndata >> 1; bit > 0; bit >>= 1) {
t = data[tid] + data[tid^bit]; __syncthreads();
data[tid] = t; __syncthreads();
}
return data[tid];
}
__device__ void compute_indices(int num_events, int* start, int* stop) {
// Break up the events evenly between the blocks
int num_pixels_per_block = num_events / NUM_BLOCKS;
// Make sure the events being accessed by the block are aligned to a multiple of 16
num_pixels_per_block = num_pixels_per_block - (num_pixels_per_block % 16);
*start = blockIdx.y * num_pixels_per_block + threadIdx.x;
// Last block will handle the leftover events
if(blockIdx.y == gridDim.y-1) {
*stop = num_events;
} else {
*stop = (blockIdx.y+1) * num_pixels_per_block;
}
}
__global__ void
estep1(float* data, clusters_t* clusters, int num_dimensions, int num_events) {
// Cached cluster parameters
__shared__ float means[NUM_DIMENSIONS];
__shared__ float Rinv[NUM_DIMENSIONS*NUM_DIMENSIONS];
float cluster_pi;
float constant;
const unsigned int tid = threadIdx.x;
int start_index;
int end_index;
int c = blockIdx.x;
compute_indices(num_events,&start_index,&end_index);
float like;
// This loop computes the expectation of every event into every cluster
//
// P(k|n) = L(x_n|mu_k,R_k)*P(k) / P(x_n)
//
// Compute log-likelihood for every cluster for each event
// L = constant*exp(-0.5*(x-mu)*Rinv*(x-mu))
// log_L = log_constant - 0.5*(x-u)*Rinv*(x-mu)
// the constant stored in clusters[c].constant is already the log of the constant
// copy the means for this cluster into shared memory
if(tid < num_dimensions) {
means[tid] = clusters->means[c*num_dimensions+tid];
}
// copy the covariance inverse into shared memory
for(int i=tid; i < num_dimensions*num_dimensions; i+= NUM_THREADS_ESTEP) {
Rinv[i] = clusters->Rinv[c*num_dimensions*num_dimensions+i];
}
cluster_pi = clusters->pi[c];
constant = clusters->constant[c];
// Sync to wait for all params to be loaded to shared memory
__syncthreads();
for(int event=start_index; event<end_index; event += NUM_THREADS_ESTEP) {
like = 0.0f;
// this does the loglikelihood calculation
#if DIAG_ONLY
for(int j=0; j<num_dimensions; j++) {
like += (data[j*num_events+event]-means[j]) * (data[j*num_events+event]-means[j]) * Rinv[j*num_dimensions+j];
}
#else
for(int i=0; i<num_dimensions; i++) {
for(int j=0; j<num_dimensions; j++) {
like += (data[i*num_events+event]-means[i]) * (data[j*num_events+event]-means[j]) * Rinv[i*num_dimensions+j];
}
}
#endif
// numerator of the E-step probability computation
clusters->memberships[c*num_events+event] = -0.5f * like + constant + logf(cluster_pi);
}
}
__global__ void
estep2(float* fcs_data, clusters_t* clusters, int num_dimensions, int num_clusters, int num_events, float* likelihood) {
float temp;
float thread_likelihood = 0.0f;
__shared__ float total_likelihoods[NUM_THREADS_ESTEP];
float max_likelihood;
float denominator_sum;
// Break up the events evenly between the blocks
int num_pixels_per_block = num_events / gridDim.x;
// Make sure the events being accessed by the block are aligned to a multiple of 16
num_pixels_per_block = num_pixels_per_block - (num_pixels_per_block % 16);
int tid = threadIdx.x;
int start_index;
int end_index;
start_index = blockIdx.x * num_pixels_per_block + tid;
// Last block will handle the leftover events
if(blockIdx.x == gridDim.x-1) {
end_index = num_events;
} else {
end_index = (blockIdx.x+1) * num_pixels_per_block;
}
total_likelihoods[tid] = 0.0;
// P(x_n) = sum of likelihoods weighted by P(k) (their probability, cluster[c].pi)
// log(a+b) != log(a) + log(b) so we need to do the log of the sum of the exponentials
// For the sake of numerical stability, we first find the max and scale the values
// That way, the maximum value ever going into the exp function is 0 and we avoid overflow
// log-sum-exp formula:
// log(sum(exp(x_i)) = max(z) + log(sum(exp(z_i-max(z))))
for(int pixel=start_index; pixel<end_index; pixel += NUM_THREADS_ESTEP) {
// find the maximum likelihood for this event
max_likelihood = clusters->memberships[pixel];
for(int c=1; c<num_clusters; c++) {
max_likelihood = fmaxf(max_likelihood,clusters->memberships[c*num_events+pixel]);
}
// Compute P(x_n), the denominator of the probability (sum of weighted likelihoods)
denominator_sum = 0.0;
for(int c=0; c<num_clusters; c++) {
temp = expf(clusters->memberships[c*num_events+pixel]-max_likelihood);
denominator_sum += temp;
}
denominator_sum = max_likelihood + logf(denominator_sum);
thread_likelihood += denominator_sum;
// Divide by denominator, also effectively normalize probabilities
// exp(log(p) - log(denom)) == p / denom
for(int c=0; c<num_clusters; c++) {
clusters->memberships[c*num_events+pixel] = expf(clusters->memberships[c*num_events+pixel] - denominator_sum);
//printf("Probability that pixel #%d is in cluster #%d: %f\n",pixel,c,clusters->memberships[c*num_events+pixel]);
}
}
total_likelihoods[tid] = thread_likelihood;
__syncthreads();
temp = parallelSum(total_likelihoods,NUM_THREADS_ESTEP);
if(tid == 0) {
likelihood[blockIdx.x] = temp;
}
}
/*
* Means kernel
* MultiGPU version, sums up all of the elements, but does not divide by N.
* This task is left for the host after combing results from multiple GPUs
*
* Should be launched with [M x D] grid
*/
__global__ void
mstep_means(float* fcs_data, clusters_t* clusters, int num_dimensions, int num_clusters, int num_events) {
// One block per cluster, per dimension: (M x D) grid of blocks
int tid = threadIdx.x;
int num_threads = blockDim.x;
int c = blockIdx.x; // cluster number
int d = blockIdx.y; // dimension number
__shared__ float temp_sum[NUM_THREADS_MSTEP];
float sum = 0.0f;
for(int event=tid; event < num_events; event+= num_threads) {
sum += fcs_data[d*num_events+event]*clusters->memberships[c*num_events+event];
}
temp_sum[tid] = sum;
__syncthreads();
// Reduce partial sums
sum = parallelSum(temp_sum,NUM_THREADS_MSTEP);
if(tid == 0) {
clusters->means[c*num_dimensions+d] = sum;
}
/*if(tid == 0) {
for(int i=1; i < num_threads; i++) {
temp_sum[0] += temp_sum[i];
}
clusters->means[c*num_dimensions+d] = temp_sum[0];
//clusters->means[c*num_dimensions+d] = temp_sum[0] / clusters->N[c];
}*/
}
/*
* Computes the size of each cluster, N
* Should be launched with M blocks (where M = number of clusters)
*/
__global__ void
mstep_N(clusters_t* clusters, int num_dimensions, int num_clusters, int num_events) {
int tid = threadIdx.x;
int num_threads = blockDim.x;
int c = blockIdx.x;
// Need to store the sum computed by each thread so in the end
// a single thread can reduce to get the final sum
__shared__ float temp_sums[NUM_THREADS_MSTEP];
// Compute new N
float sum = 0.0f;
// Break all the events accross the threads, add up probabilities
for(int event=tid; event < num_events; event += num_threads) {
sum += clusters->memberships[c*num_events+event];
}
temp_sums[tid] = sum;
__syncthreads();
sum = parallelSum(temp_sums,NUM_THREADS_MSTEP);
if(tid == 0) {
clusters->N[c] = sum;
clusters->pi[c] = sum;
}
// Let the first thread add up all the intermediate sums
// Could do a parallel reduction...doubt it's really worth it for so few elements though
/*if(tid == 0) {
clusters->N[c] = 0.0;
for(int j=0; j<num_threads; j++) {
clusters->N[c] += temp_sums[j];
}
//printf("clusters[%d].N = %f\n",c,clusters[c].N);
// Set PI to the # of expected items, and then normalize it later
clusters->pi[c] = clusters->N[c];
}*/
}
/*
* Computes the row and col of a square matrix based on the index into
* a lower triangular (with diagonal) matrix
*
* Used to determine what row/col should be computed for covariance
* based on a block index.
*/
__device__ void compute_row_col(int n, int* row, int* col) {
int i = 0;
for(int r=0; r < n; r++) {
for(int c=0; c <= r; c++) {
if(i == blockIdx.y) {
*row = r;
*col = c;
return;
}
i++;
}
}
}
/*
* Computes the covariance matrices of the data (R matrix)
* Must be launched with a M x D*D grid of blocks:
* i.e. dim3 gridDim(num_clusters,num_dimensions*num_dimensions)
*/
__global__ void
mstep_covariance1(float* fcs_data, clusters_t* clusters, int num_dimensions, int num_clusters, int num_events) {
int tid = threadIdx.x; // easier variable name for our thread ID
// Determine what row,col this matrix is handling, also handles the symmetric element
int row,col,c;
compute_row_col(num_dimensions, &row, &col);
//row = blockIdx.y / num_dimensions;
//col = blockIdx.y % num_dimensions;
__syncthreads();
c = blockIdx.x; // Determines what cluster this block is handling
int matrix_index = row * num_dimensions + col;
#if DIAG_ONLY
if(row != col) {
clusters->R[c*num_dimensions*num_dimensions+matrix_index] = 0.0;
matrix_index = col*num_dimensions+row;
clusters->R[c*num_dimensions*num_dimensions+matrix_index] = 0.0;
return;
}
#endif
// Store the means in shared memory to speed up the covariance computations
__shared__ float means[NUM_DIMENSIONS];
// copy the means for this cluster into shared memory
if(tid < num_dimensions) {
means[tid] = clusters->means[c*num_dimensions+tid];
}
// Sync to wait for all params to be loaded to shared memory
__syncthreads();
__shared__ float temp_sums[NUM_THREADS_MSTEP];
float cov_sum = 0.0;
for(int event=tid; event < num_events; event+=NUM_THREADS_MSTEP) {
cov_sum += (fcs_data[row*num_events+event]-means[row])*
(fcs_data[col*num_events+event]-means[col])*clusters->memberships[c*num_events+event];
}
temp_sums[tid] = cov_sum;
__syncthreads();
cov_sum = parallelSum(temp_sums,NUM_THREADS_MSTEP);
if(tid == 0) {
clusters->R[c*num_dimensions*num_dimensions+matrix_index] = cov_sum;
// Set the symmetric value
matrix_index = col*num_dimensions+row;
clusters->R[c*num_dimensions*num_dimensions+matrix_index] = cov_sum;
// Regularize matrix - adds some variance to the diagonal elements
// Helps keep covariance matrix non-singular (so it can be inverted)
// The amount added is scaled down based on COVARIANCE_DYNAMIC_RANGE constant defined at top of this file
if(row == col) {
clusters->R[c*num_dimensions*num_dimensions+matrix_index] += clusters->avgvar[c];
}
}
}
__global__ void
mstep_covariance2(float* fcs_data, clusters_t* clusters, int num_dimensions, int num_clusters, int num_events) {
int tid = threadIdx.x; // easier variable name for our thread ID
// Determine what row,col this matrix is handling, also handles the symmetric element
int row,col,c1;
compute_row_col(num_dimensions, &row, &col);
__syncthreads();
c1 = blockIdx.x * NUM_CLUSTERS_PER_BLOCK; // Determines what cluster this block is handling
#if DIAG_ONLY
if(row != col) {
clusters->R[c*num_dimensions*num_dimensions+row*num_dimensions+col] = 0.0f;
clusters->R[c*num_dimensions*num_dimensions+col*num_dimensions+row] = 0.0f;
return;
}
#endif
// Store the means in shared memory to speed up the covariance computations
__shared__ float means_row[NUM_CLUSTERS_PER_BLOCK];
__shared__ float means_col[NUM_CLUSTERS_PER_BLOCK];
//if(tid < NUM_CLUSTERS_PER_BLOCK) {
if ( (tid < min(num_clusters, NUM_CLUSTERS_PER_BLOCK)) // c1 = 0
&& (c1+tid < num_clusters)) {
means_row[tid] = clusters->means[(c1+tid)*num_dimensions+row];
means_col[tid] = clusters->means[(c1+tid)*num_dimensions+col];
}
// Sync to wait for all params to be loaded to shared memory
__syncthreads();
// 256 * 6
__shared__ float temp_sums[NUM_THREADS_MSTEP*NUM_CLUSTERS_PER_BLOCK];
float cov_sum1 = 0.0f;
float cov_sum2 = 0.0f;
float cov_sum3 = 0.0f;
float cov_sum4 = 0.0f;
float cov_sum5 = 0.0f;
float cov_sum6 = 0.0f;
float val1,val2;
for(int c=0; c < NUM_CLUSTERS_PER_BLOCK; c++) {
temp_sums[c*NUM_THREADS_MSTEP+tid] = 0.0;
}
for(int event=tid; event < num_events; event+=NUM_THREADS_MSTEP) {
val1 = fcs_data[row*num_events+event];
val2 = fcs_data[col*num_events+event];
cov_sum1 += (val1-means_row[0])*(val2-means_col[0])*clusters->memberships[c1*num_events+event];
cov_sum2 += (val1-means_row[1])*(val2-means_col[1])*clusters->memberships[(c1+1)*num_events+event];
cov_sum3 += (val1-means_row[2])*(val2-means_col[2])*clusters->memberships[(c1+2)*num_events+event];
cov_sum4 += (val1-means_row[3])*(val2-means_col[3])*clusters->memberships[(c1+3)*num_events+event];
cov_sum5 += (val1-means_row[4])*(val2-means_col[4])*clusters->memberships[(c1+4)*num_events+event];
cov_sum6 += (val1-means_row[5])*(val2-means_col[5])*clusters->memberships[(c1+5)*num_events+event];
}
temp_sums[0*NUM_THREADS_MSTEP+tid] = cov_sum1;
temp_sums[1*NUM_THREADS_MSTEP+tid] = cov_sum2;
temp_sums[2*NUM_THREADS_MSTEP+tid] = cov_sum3;
temp_sums[3*NUM_THREADS_MSTEP+tid] = cov_sum4;
temp_sums[4*NUM_THREADS_MSTEP+tid] = cov_sum5;
temp_sums[5*NUM_THREADS_MSTEP+tid] = cov_sum6;
__syncthreads();
for(int c=0; c < NUM_CLUSTERS_PER_BLOCK; c++) {
temp_sums[c*NUM_THREADS_MSTEP+tid] = parallelSum(&temp_sums[c*NUM_THREADS_MSTEP],NUM_THREADS_MSTEP);
__syncthreads();
}
if(tid == 0) {
for(int c=0; c < NUM_CLUSTERS_PER_BLOCK && (c+c1) < num_clusters; c++) {
int offset = (c+c1)*num_dimensions*num_dimensions;
cov_sum1 = temp_sums[c*NUM_THREADS_MSTEP];
clusters->R[offset+row*num_dimensions+col] = cov_sum1;
// Set the symmetric value
clusters->R[offset+col*num_dimensions+row] = cov_sum1;
// Regularize matrix - adds some variance to the diagonal elements
// Helps keep covariance matrix non-singular (so it can be inverted)
// The amount added is scaled down based on COVARIANCE_DYNAMIC_RANGE constant defined in gaussian.h
if(row == col) {
clusters->R[offset+row*num_dimensions+col] += clusters->avgvar[c+c1];
}
}
}
}
#endif // #ifndef _TEMPLATE_KERNEL_H_
| 8c38f534e17dece055dcea268e85d799430b4860.cu | /*
* CUDA Kernels for Expectation Maximization with Gaussian Mixture Models
*
* Author: Andrew Pangborn
*
* Department of Computer Engineering
* Rochester Institute of Technology
*/
#ifndef _TEMPLATE_KERNEL_H_
#define _TEMPLATE_KERNEL_H_
#include "gaussian.h"
/*
* Compute the multivariate mean of the FCS data
*/
__device__ void mvtmeans(float* fcs_data, int num_dimensions, int num_events, float* means) {
int tid = threadIdx.x;
if(tid < num_dimensions) {
means[tid] = 0.0;
// Sum up all the values for each dimension
for(int i = 0; i < num_events; i++) {
means[tid] += fcs_data[i*num_dimensions+tid];
}
// Divide by the # of elements to get the average
means[tid] /= (float) num_events;
}
}
__device__ void averageVariance(float* fcs_data, float* means, int num_dimensions, int num_events, float* avgvar) {
int tid = threadIdx.x;
__shared__ float variances[NUM_DIMENSIONS];
__shared__ float total_variance;
// Compute average variance for each dimension
if(tid < num_dimensions) {
variances[tid] = 0.0;
// Sum up all the variance
for(int i = 0; i < num_events; i++) {
// variance = (data - mean)^2
variances[tid] += (fcs_data[i*num_dimensions + tid])*(fcs_data[i*num_dimensions + tid]);
}
variances[tid] /= (float) num_events;
variances[tid] -= means[tid]*means[tid];
}
__syncthreads();
if(tid == 0) {
total_variance = 0.0;
for(int i=0; i<num_dimensions;i++) {
total_variance += variances[i];
}
*avgvar = total_variance / (float) num_dimensions;
}
}
// Inverts an NxN matrix 'data' stored as a 1D array in-place
// 'actualsize' is N
// Computes the log of the determinant of the origianl matrix in the process
__device__ void invert(float* data, int actualsize, float* log_determinant) {
int maxsize = actualsize;
int n = actualsize;
if(threadIdx.x == 0) {
*log_determinant = 0.0;
// sanity check
if (actualsize == 1) {
*log_determinant = logf(data[0]);
data[0] = 1.0 / data[0];
} else {
for (int i=1; i < actualsize; i++) data[i] /= data[0]; // normalize row 0
for (int i=1; i < actualsize; i++) {
for (int j=i; j < actualsize; j++) { // do a column of L
float sum = 0.0;
for (int k = 0; k < i; k++)
sum += data[j*maxsize+k] * data[k*maxsize+i];
data[j*maxsize+i] -= sum;
}
if (i == actualsize-1) continue;
for (int j=i+1; j < actualsize; j++) { // do a row of U
float sum = 0.0;
for (int k = 0; k < i; k++)
sum += data[i*maxsize+k]*data[k*maxsize+j];
data[i*maxsize+j] =
(data[i*maxsize+j]-sum) / data[i*maxsize+i];
}
}
for(int i=0; i<actualsize; i++) {
*log_determinant += logf(fabs(data[i*n+i]));
}
for ( int i = 0; i < actualsize; i++ ) // invert L
for ( int j = i; j < actualsize; j++ ) {
float x = 1.0;
if ( i != j ) {
x = 0.0;
for ( int k = i; k < j; k++ )
x -= data[j*maxsize+k]*data[k*maxsize+i];
}
data[j*maxsize+i] = x / data[j*maxsize+j];
}
for ( int i = 0; i < actualsize; i++ ) // invert U
for ( int j = i; j < actualsize; j++ ) {
if ( i == j ) continue;
float sum = 0.0;
for ( int k = i; k < j; k++ )
sum += data[k*maxsize+j]*( (i==k) ? 1.0 : data[i*maxsize+k] );
data[i*maxsize+j] = -sum;
}
for ( int i = 0; i < actualsize; i++ ) // final inversion
for ( int j = 0; j < actualsize; j++ ) {
float sum = 0.0;
for ( int k = ((i>j)?i:j); k < actualsize; k++ )
sum += ((j==k)?1.0:data[j*maxsize+k])*data[k*maxsize+i];
data[j*maxsize+i] = sum;
}
}
}
}
__device__ void compute_pi(clusters_t* clusters, int num_clusters) {
__shared__ float sum;
if(threadIdx.x == 0) {
sum = 0.0;
for(int i=0; i<num_clusters; i++) {
sum += clusters->N[i];
}
}
__syncthreads();
for(int c = threadIdx.x; c < num_clusters; c += blockDim.x) {
if(clusters->N[c] < 0.5f) {
clusters->pi[threadIdx.x] = 1e-10;
} else {
clusters->pi[threadIdx.x] = clusters->N[c] / sum;
}
}
__syncthreads();
}
__device__ void compute_constants(clusters_t* clusters, int num_clusters, int num_dimensions) {
int tid = threadIdx.x;
int num_threads = blockDim.x;
int num_elements = num_dimensions*num_dimensions;
__shared__ float determinant_arg; // only one thread computes the inverse so we need a shared argument
float log_determinant;
__shared__ float matrix[NUM_DIMENSIONS*NUM_DIMENSIONS];
// Invert the matrix for every cluster
int c = blockIdx.x;
// Copy the R matrix into shared memory for doing the matrix inversion
for(int i=tid; i<num_elements; i+= num_threads ) {
matrix[i] = clusters->R[c*num_dimensions*num_dimensions+i];
}
__syncthreads();
#if DIAG_ONLY
if(tid == 0) {
determinant_arg = 1.0f;
for(int i=0; i < num_dimensions; i++) {
determinant_arg *= matrix[i*num_dimensions+i];
matrix[i*num_dimensions+i] = 1.0f / matrix[i*num_dimensions+i];
}
determinant_arg = logf(determinant_arg);
}
#else
invert(matrix,num_dimensions,&determinant_arg);
#endif
__syncthreads();
log_determinant = determinant_arg;
// Copy the matrx from shared memory back into the cluster memory
for(int i=tid; i<num_elements; i+= num_threads) {
clusters->Rinv[c*num_dimensions*num_dimensions+i] = matrix[i];
}
__syncthreads();
// Compute the constant
// Equivilent to: log(1/((2*PI)^(M/2)*det(R)^(1/2)))
// This constant is used in all E-step likelihood calculations
if(tid == 0) {
clusters->constant[c] = -num_dimensions*0.5f*logf(2.0f*PI) - 0.5f*log_determinant;
}
}
/*
* Computes the constant, pi, Rinv for each cluster
*
* Needs to be launched with the number of blocks = number of clusters
*/
__global__ void
constants_kernel(clusters_t* clusters, int num_clusters, int num_dimensions) {
// compute_constants(clusters,num_clusters,num_dimensions);
int tid = threadIdx.x;
int bid = blockIdx.x;
int num_threads = blockDim.x;
int num_elements = num_dimensions*num_dimensions;
__shared__ float determinant_arg; // only one thread computes the inverse so we need a shared argument
__shared__ float sum;
__shared__ float matrix[NUM_DIMENSIONS*NUM_DIMENSIONS];
float log_determinant;
// Invert the matrix for every cluster
// Copy the R matrix into shared memory for doing the matrix inversion
for(int i=tid; i<num_elements; i+= num_threads ) {
matrix[i] = clusters->R[bid*num_dimensions*num_dimensions+i];
}
__syncthreads();
#if DIAG_ONLY
if(tid == 0) {
determinant_arg = 1.0f;
for(int i=0; i < num_dimensions; i++) {
determinant_arg *= matrix[i*num_dimensions+i];
matrix[i*num_dimensions+i] = 1.0f / matrix[i*num_dimensions+i];
}
determinant_arg = logf(determinant_arg);
}
#else
invert(matrix,num_dimensions,&determinant_arg);
#endif
__syncthreads();
log_determinant = determinant_arg;
// Copy the matrx from shared memory back into the cluster memory
for(int i=tid; i<num_elements; i+= num_threads) {
clusters->Rinv[bid*num_dimensions*num_dimensions+i] = matrix[i];
}
__syncthreads();
// Compute the constant
// Equivilent to: log(1/((2*PI)^(M/2)*det(R)^(1/2)))
// This constant is used in all E-step likelihood calculations
if(tid == 0) {
clusters->constant[bid] = -num_dimensions*0.5f*logf(2.0f*PI) - 0.5f*log_determinant;
}
__syncthreads();
if(bid == 0) {
// compute_pi(clusters,num_clusters);
if(tid == 0) {
sum = 0.0;
for(int i=0; i<num_clusters; i++) {
sum += clusters->N[i];
}
}
__syncthreads();
for(int i = tid; i < num_clusters; i += num_threads) {
if(clusters->N[i] < 0.5f) {
clusters->pi[tid] = 1e-10;
} else {
clusters->pi[tid] = clusters->N[i] / sum;
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
//! @param fcs_data FCS data: [num_events]
//! @param clusters Clusters: [num_clusters]
//! @param num_dimensions number of dimensions in an FCS event
//! @param num_events number of FCS events
////////////////////////////////////////////////////////////////////////////////
__global__ void
seed_clusters_kernel( const float* fcs_data,
clusters_t* clusters,
const int num_dimensions,
const int num_clusters,
const int num_events)
{
int tid = threadIdx.x;
int num_threads = blockDim.x;
int row, col;
float seed;
// Number of elements in the covariance matrix
int num_elements = num_dimensions*num_dimensions;
// shared memory
__shared__ float means[NUM_DIMENSIONS];
__shared__ float avgvar;
__shared__ float variances[NUM_DIMENSIONS];
__shared__ float total_variance;
// Compute the means
// mvtmeans(fcs_data, num_dimensions, num_events, means);
if(tid < num_dimensions) {
means[tid] = 0.0;
// Sum up all the values for each dimension
for(int i = 0; i < num_events; i++) {
means[tid] += fcs_data[i*num_dimensions+tid];
}
// Divide by the # of elements to get the average
means[tid] /= (float) num_events;
}
__syncthreads();
// Compute the average variance
// averageVariance(fcs_data, means, num_dimensions, num_events, &avgvar);
// Compute average variance for each dimension
if(tid < num_dimensions) {
variances[tid] = 0.0;
// Sum up all the variance
for(int i = 0; i < num_events; i++) {
// variance = (data - mean)^2
variances[tid] += (fcs_data[i*num_dimensions + tid])*(fcs_data[i*num_dimensions + tid]);
}
variances[tid] /= (float) num_events;
variances[tid] -= means[tid]*means[tid];
}
__syncthreads();
if(tid == 0) {
total_variance = 0.0;
for(int i=0; i<num_dimensions;i++) {
total_variance += variances[i];
}
avgvar = total_variance / (float) num_dimensions;
}
__syncthreads();
if(num_clusters > 1) {
seed = (num_events-1.0f)/(num_clusters-1.0f);
} else {
seed = 0.0;
}
// Seed the pi, means, and covariances for every cluster
for(int c=0; c < num_clusters; c++) {
if(tid < num_dimensions) {
clusters->means[c*num_dimensions+tid] = fcs_data[((int)(c*seed))*num_dimensions+tid];
}
for(int i=tid; i < num_elements; i+= num_threads) {
// Add the average variance divided by a constant, this keeps the cov matrix from becoming singular
row = (i) / num_dimensions;
col = (i) % num_dimensions;
if(row == col) {
clusters->R[c*num_dimensions*num_dimensions+i] = 1.0f;
} else {
clusters->R[c*num_dimensions*num_dimensions+i] = 0.0f;
}
}
if(tid == 0) {
clusters->pi[c] = 1.0f/((float)num_clusters);
clusters->N[c] = ((float) num_events) / ((float)num_clusters);
clusters->avgvar[c] = avgvar / COVARIANCE_DYNAMIC_RANGE;
}
}
}
__device__ float parallelSum(float* data, const unsigned int ndata) {
const unsigned int tid = threadIdx.x;
float t;
__syncthreads();
// Butterfly sum. ndata MUST be a power of 2.
for(unsigned int bit = ndata >> 1; bit > 0; bit >>= 1) {
t = data[tid] + data[tid^bit]; __syncthreads();
data[tid] = t; __syncthreads();
}
return data[tid];
}
__device__ void compute_indices(int num_events, int* start, int* stop) {
// Break up the events evenly between the blocks
int num_pixels_per_block = num_events / NUM_BLOCKS;
// Make sure the events being accessed by the block are aligned to a multiple of 16
num_pixels_per_block = num_pixels_per_block - (num_pixels_per_block % 16);
*start = blockIdx.y * num_pixels_per_block + threadIdx.x;
// Last block will handle the leftover events
if(blockIdx.y == gridDim.y-1) {
*stop = num_events;
} else {
*stop = (blockIdx.y+1) * num_pixels_per_block;
}
}
__global__ void
estep1(float* data, clusters_t* clusters, int num_dimensions, int num_events) {
// Cached cluster parameters
__shared__ float means[NUM_DIMENSIONS];
__shared__ float Rinv[NUM_DIMENSIONS*NUM_DIMENSIONS];
float cluster_pi;
float constant;
const unsigned int tid = threadIdx.x;
int start_index;
int end_index;
int c = blockIdx.x;
compute_indices(num_events,&start_index,&end_index);
float like;
// This loop computes the expectation of every event into every cluster
//
// P(k|n) = L(x_n|mu_k,R_k)*P(k) / P(x_n)
//
// Compute log-likelihood for every cluster for each event
// L = constant*exp(-0.5*(x-mu)*Rinv*(x-mu))
// log_L = log_constant - 0.5*(x-u)*Rinv*(x-mu)
// the constant stored in clusters[c].constant is already the log of the constant
// copy the means for this cluster into shared memory
if(tid < num_dimensions) {
means[tid] = clusters->means[c*num_dimensions+tid];
}
// copy the covariance inverse into shared memory
for(int i=tid; i < num_dimensions*num_dimensions; i+= NUM_THREADS_ESTEP) {
Rinv[i] = clusters->Rinv[c*num_dimensions*num_dimensions+i];
}
cluster_pi = clusters->pi[c];
constant = clusters->constant[c];
// Sync to wait for all params to be loaded to shared memory
__syncthreads();
for(int event=start_index; event<end_index; event += NUM_THREADS_ESTEP) {
like = 0.0f;
// this does the loglikelihood calculation
#if DIAG_ONLY
for(int j=0; j<num_dimensions; j++) {
like += (data[j*num_events+event]-means[j]) * (data[j*num_events+event]-means[j]) * Rinv[j*num_dimensions+j];
}
#else
for(int i=0; i<num_dimensions; i++) {
for(int j=0; j<num_dimensions; j++) {
like += (data[i*num_events+event]-means[i]) * (data[j*num_events+event]-means[j]) * Rinv[i*num_dimensions+j];
}
}
#endif
// numerator of the E-step probability computation
clusters->memberships[c*num_events+event] = -0.5f * like + constant + logf(cluster_pi);
}
}
__global__ void
estep2(float* fcs_data, clusters_t* clusters, int num_dimensions, int num_clusters, int num_events, float* likelihood) {
float temp;
float thread_likelihood = 0.0f;
__shared__ float total_likelihoods[NUM_THREADS_ESTEP];
float max_likelihood;
float denominator_sum;
// Break up the events evenly between the blocks
int num_pixels_per_block = num_events / gridDim.x;
// Make sure the events being accessed by the block are aligned to a multiple of 16
num_pixels_per_block = num_pixels_per_block - (num_pixels_per_block % 16);
int tid = threadIdx.x;
int start_index;
int end_index;
start_index = blockIdx.x * num_pixels_per_block + tid;
// Last block will handle the leftover events
if(blockIdx.x == gridDim.x-1) {
end_index = num_events;
} else {
end_index = (blockIdx.x+1) * num_pixels_per_block;
}
total_likelihoods[tid] = 0.0;
// P(x_n) = sum of likelihoods weighted by P(k) (their probability, cluster[c].pi)
// log(a+b) != log(a) + log(b) so we need to do the log of the sum of the exponentials
// For the sake of numerical stability, we first find the max and scale the values
// That way, the maximum value ever going into the exp function is 0 and we avoid overflow
// log-sum-exp formula:
// log(sum(exp(x_i)) = max(z) + log(sum(exp(z_i-max(z))))
for(int pixel=start_index; pixel<end_index; pixel += NUM_THREADS_ESTEP) {
// find the maximum likelihood for this event
max_likelihood = clusters->memberships[pixel];
for(int c=1; c<num_clusters; c++) {
max_likelihood = fmaxf(max_likelihood,clusters->memberships[c*num_events+pixel]);
}
// Compute P(x_n), the denominator of the probability (sum of weighted likelihoods)
denominator_sum = 0.0;
for(int c=0; c<num_clusters; c++) {
temp = expf(clusters->memberships[c*num_events+pixel]-max_likelihood);
denominator_sum += temp;
}
denominator_sum = max_likelihood + logf(denominator_sum);
thread_likelihood += denominator_sum;
// Divide by denominator, also effectively normalize probabilities
// exp(log(p) - log(denom)) == p / denom
for(int c=0; c<num_clusters; c++) {
clusters->memberships[c*num_events+pixel] = expf(clusters->memberships[c*num_events+pixel] - denominator_sum);
//printf("Probability that pixel #%d is in cluster #%d: %f\n",pixel,c,clusters->memberships[c*num_events+pixel]);
}
}
total_likelihoods[tid] = thread_likelihood;
__syncthreads();
temp = parallelSum(total_likelihoods,NUM_THREADS_ESTEP);
if(tid == 0) {
likelihood[blockIdx.x] = temp;
}
}
/*
* Means kernel
* MultiGPU version, sums up all of the elements, but does not divide by N.
* This task is left for the host after combing results from multiple GPUs
*
* Should be launched with [M x D] grid
*/
__global__ void
mstep_means(float* fcs_data, clusters_t* clusters, int num_dimensions, int num_clusters, int num_events) {
// One block per cluster, per dimension: (M x D) grid of blocks
int tid = threadIdx.x;
int num_threads = blockDim.x;
int c = blockIdx.x; // cluster number
int d = blockIdx.y; // dimension number
__shared__ float temp_sum[NUM_THREADS_MSTEP];
float sum = 0.0f;
for(int event=tid; event < num_events; event+= num_threads) {
sum += fcs_data[d*num_events+event]*clusters->memberships[c*num_events+event];
}
temp_sum[tid] = sum;
__syncthreads();
// Reduce partial sums
sum = parallelSum(temp_sum,NUM_THREADS_MSTEP);
if(tid == 0) {
clusters->means[c*num_dimensions+d] = sum;
}
/*if(tid == 0) {
for(int i=1; i < num_threads; i++) {
temp_sum[0] += temp_sum[i];
}
clusters->means[c*num_dimensions+d] = temp_sum[0];
//clusters->means[c*num_dimensions+d] = temp_sum[0] / clusters->N[c];
}*/
}
/*
* Computes the size of each cluster, N
* Should be launched with M blocks (where M = number of clusters)
*/
__global__ void
mstep_N(clusters_t* clusters, int num_dimensions, int num_clusters, int num_events) {
int tid = threadIdx.x;
int num_threads = blockDim.x;
int c = blockIdx.x;
// Need to store the sum computed by each thread so in the end
// a single thread can reduce to get the final sum
__shared__ float temp_sums[NUM_THREADS_MSTEP];
// Compute new N
float sum = 0.0f;
// Break all the events accross the threads, add up probabilities
for(int event=tid; event < num_events; event += num_threads) {
sum += clusters->memberships[c*num_events+event];
}
temp_sums[tid] = sum;
__syncthreads();
sum = parallelSum(temp_sums,NUM_THREADS_MSTEP);
if(tid == 0) {
clusters->N[c] = sum;
clusters->pi[c] = sum;
}
// Let the first thread add up all the intermediate sums
// Could do a parallel reduction...doubt it's really worth it for so few elements though
/*if(tid == 0) {
clusters->N[c] = 0.0;
for(int j=0; j<num_threads; j++) {
clusters->N[c] += temp_sums[j];
}
//printf("clusters[%d].N = %f\n",c,clusters[c].N);
// Set PI to the # of expected items, and then normalize it later
clusters->pi[c] = clusters->N[c];
}*/
}
/*
* Computes the row and col of a square matrix based on the index into
* a lower triangular (with diagonal) matrix
*
* Used to determine what row/col should be computed for covariance
* based on a block index.
*/
__device__ void compute_row_col(int n, int* row, int* col) {
int i = 0;
for(int r=0; r < n; r++) {
for(int c=0; c <= r; c++) {
if(i == blockIdx.y) {
*row = r;
*col = c;
return;
}
i++;
}
}
}
/*
* Computes the covariance matrices of the data (R matrix)
* Must be launched with a M x D*D grid of blocks:
* i.e. dim3 gridDim(num_clusters,num_dimensions*num_dimensions)
*/
__global__ void
mstep_covariance1(float* fcs_data, clusters_t* clusters, int num_dimensions, int num_clusters, int num_events) {
int tid = threadIdx.x; // easier variable name for our thread ID
// Determine what row,col this matrix is handling, also handles the symmetric element
int row,col,c;
compute_row_col(num_dimensions, &row, &col);
//row = blockIdx.y / num_dimensions;
//col = blockIdx.y % num_dimensions;
__syncthreads();
c = blockIdx.x; // Determines what cluster this block is handling
int matrix_index = row * num_dimensions + col;
#if DIAG_ONLY
if(row != col) {
clusters->R[c*num_dimensions*num_dimensions+matrix_index] = 0.0;
matrix_index = col*num_dimensions+row;
clusters->R[c*num_dimensions*num_dimensions+matrix_index] = 0.0;
return;
}
#endif
// Store the means in shared memory to speed up the covariance computations
__shared__ float means[NUM_DIMENSIONS];
// copy the means for this cluster into shared memory
if(tid < num_dimensions) {
means[tid] = clusters->means[c*num_dimensions+tid];
}
// Sync to wait for all params to be loaded to shared memory
__syncthreads();
__shared__ float temp_sums[NUM_THREADS_MSTEP];
float cov_sum = 0.0;
for(int event=tid; event < num_events; event+=NUM_THREADS_MSTEP) {
cov_sum += (fcs_data[row*num_events+event]-means[row])*
(fcs_data[col*num_events+event]-means[col])*clusters->memberships[c*num_events+event];
}
temp_sums[tid] = cov_sum;
__syncthreads();
cov_sum = parallelSum(temp_sums,NUM_THREADS_MSTEP);
if(tid == 0) {
clusters->R[c*num_dimensions*num_dimensions+matrix_index] = cov_sum;
// Set the symmetric value
matrix_index = col*num_dimensions+row;
clusters->R[c*num_dimensions*num_dimensions+matrix_index] = cov_sum;
// Regularize matrix - adds some variance to the diagonal elements
// Helps keep covariance matrix non-singular (so it can be inverted)
// The amount added is scaled down based on COVARIANCE_DYNAMIC_RANGE constant defined at top of this file
if(row == col) {
clusters->R[c*num_dimensions*num_dimensions+matrix_index] += clusters->avgvar[c];
}
}
}
__global__ void
mstep_covariance2(float* fcs_data, clusters_t* clusters, int num_dimensions, int num_clusters, int num_events) {
int tid = threadIdx.x; // easier variable name for our thread ID
// Determine what row,col this matrix is handling, also handles the symmetric element
int row,col,c1;
compute_row_col(num_dimensions, &row, &col);
__syncthreads();
c1 = blockIdx.x * NUM_CLUSTERS_PER_BLOCK; // Determines what cluster this block is handling
#if DIAG_ONLY
if(row != col) {
clusters->R[c*num_dimensions*num_dimensions+row*num_dimensions+col] = 0.0f;
clusters->R[c*num_dimensions*num_dimensions+col*num_dimensions+row] = 0.0f;
return;
}
#endif
// Store the means in shared memory to speed up the covariance computations
__shared__ float means_row[NUM_CLUSTERS_PER_BLOCK];
__shared__ float means_col[NUM_CLUSTERS_PER_BLOCK];
//if(tid < NUM_CLUSTERS_PER_BLOCK) {
if ( (tid < min(num_clusters, NUM_CLUSTERS_PER_BLOCK)) // c1 = 0
&& (c1+tid < num_clusters)) {
means_row[tid] = clusters->means[(c1+tid)*num_dimensions+row];
means_col[tid] = clusters->means[(c1+tid)*num_dimensions+col];
}
// Sync to wait for all params to be loaded to shared memory
__syncthreads();
// 256 * 6
__shared__ float temp_sums[NUM_THREADS_MSTEP*NUM_CLUSTERS_PER_BLOCK];
float cov_sum1 = 0.0f;
float cov_sum2 = 0.0f;
float cov_sum3 = 0.0f;
float cov_sum4 = 0.0f;
float cov_sum5 = 0.0f;
float cov_sum6 = 0.0f;
float val1,val2;
for(int c=0; c < NUM_CLUSTERS_PER_BLOCK; c++) {
temp_sums[c*NUM_THREADS_MSTEP+tid] = 0.0;
}
for(int event=tid; event < num_events; event+=NUM_THREADS_MSTEP) {
val1 = fcs_data[row*num_events+event];
val2 = fcs_data[col*num_events+event];
cov_sum1 += (val1-means_row[0])*(val2-means_col[0])*clusters->memberships[c1*num_events+event];
cov_sum2 += (val1-means_row[1])*(val2-means_col[1])*clusters->memberships[(c1+1)*num_events+event];
cov_sum3 += (val1-means_row[2])*(val2-means_col[2])*clusters->memberships[(c1+2)*num_events+event];
cov_sum4 += (val1-means_row[3])*(val2-means_col[3])*clusters->memberships[(c1+3)*num_events+event];
cov_sum5 += (val1-means_row[4])*(val2-means_col[4])*clusters->memberships[(c1+4)*num_events+event];
cov_sum6 += (val1-means_row[5])*(val2-means_col[5])*clusters->memberships[(c1+5)*num_events+event];
}
temp_sums[0*NUM_THREADS_MSTEP+tid] = cov_sum1;
temp_sums[1*NUM_THREADS_MSTEP+tid] = cov_sum2;
temp_sums[2*NUM_THREADS_MSTEP+tid] = cov_sum3;
temp_sums[3*NUM_THREADS_MSTEP+tid] = cov_sum4;
temp_sums[4*NUM_THREADS_MSTEP+tid] = cov_sum5;
temp_sums[5*NUM_THREADS_MSTEP+tid] = cov_sum6;
__syncthreads();
for(int c=0; c < NUM_CLUSTERS_PER_BLOCK; c++) {
temp_sums[c*NUM_THREADS_MSTEP+tid] = parallelSum(&temp_sums[c*NUM_THREADS_MSTEP],NUM_THREADS_MSTEP);
__syncthreads();
}
if(tid == 0) {
for(int c=0; c < NUM_CLUSTERS_PER_BLOCK && (c+c1) < num_clusters; c++) {
int offset = (c+c1)*num_dimensions*num_dimensions;
cov_sum1 = temp_sums[c*NUM_THREADS_MSTEP];
clusters->R[offset+row*num_dimensions+col] = cov_sum1;
// Set the symmetric value
clusters->R[offset+col*num_dimensions+row] = cov_sum1;
// Regularize matrix - adds some variance to the diagonal elements
// Helps keep covariance matrix non-singular (so it can be inverted)
// The amount added is scaled down based on COVARIANCE_DYNAMIC_RANGE constant defined in gaussian.h
if(row == col) {
clusters->R[offset+row*num_dimensions+col] += clusters->avgvar[c+c1];
}
}
}
}
#endif // #ifndef _TEMPLATE_KERNEL_H_
|
39dc1d4940213237a167ea2a78eeb8dfb076f3dd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* University of Illinois Open Source License
* Copyright 2010 Luthey-Schulten Group,
* All rights reserved.
*
* Developed by: Luthey-Schulten Group
* University of Illinois at Urbana-Champaign
* http://www.scs.uiuc.edu/~schulten
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the Software), to deal with
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is furnished to
* do so, subject to the following conditions:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimers.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimers in the documentation
* and/or other materials provided with the distribution.
*
* - Neither the names of the Luthey-Schulten Group, University of Illinois at
* Urbana-Champaign, nor the names of its contributors may be used to endorse or
* promote products derived from this Software without specific prior written
* permission.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS WITH THE SOFTWARE.
*
* Author(s): Elijah Roberts
*/
#include "lm/Cuda.h"
#define LS_WORDS_PER_SITE 2
#define LS_APRON_SIZE 2
#define LS_X_BLOCK_MAX_X_SIZE 128
#define LS_Y_BLOCK_X_SIZE 16
#define LS_Y_BLOCK_Y_SIZE 8
#define LS_Z_BLOCK_X_SIZE 16
#define LS_Z_BLOCK_Z_SIZE 8
#define LS_PACKED_SITES
#define LS_PACKED_LAST_OBJECT_MASK 0xFF000000
#include "lm/rdme/dev/lattice_sim_1d_dev.cu"
__global__ void cu_PackedSiteSkipX_kernel(const unsigned int* inLattice, unsigned int* outLattice, const unsigned int gridXSize, const unsigned int latticeXSize, const unsigned int latticeYSize, const unsigned int latticeXYZSize);
__global__ void cu_PackedSiteSkipY_kernel(const unsigned int* inLattice, unsigned int* outLattice, const unsigned int gridXSize, const unsigned int latticeXSize, const unsigned int latticeYSize, const unsigned int latticeXYZSize);
__global__ void cu_PackedSiteSkipZ_kernel(const unsigned int* inLattice, unsigned int* outLattice, const unsigned int gridXSize, const unsigned int latticeXSize, const unsigned int latticeYSize, const unsigned int latticeZSize);
void cu_PackedSiteSkipX(unsigned int * host_inLattice, unsigned int * host_outLattice, unsigned int latticeXSize, unsigned int latticeYSize, unsigned int latticeZSize)
{
int major, minor;
lm::CUDA::getComputeCapabilities(0, &major, &minor);
if (major == 1 && minor < 2) throw lm::Exception("This test is only supported on compute capability >= 1.2.");
void* inLattice;
void* outLattice;
CUDA_EXCEPTION_CHECK(hipMalloc(&inLattice, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int)));
CUDA_EXCEPTION_CHECK(hipMalloc(&outLattice, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int)));
CUDA_EXCEPTION_CHECK(hipMemcpy(inLattice, host_inLattice, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int), hipMemcpyHostToDevice));
CUDA_EXCEPTION_CHECK(hipMemset(outLattice, 0xFF, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int)));
unsigned int xBlockXSize = min(LS_X_BLOCK_MAX_X_SIZE,latticeXSize);
unsigned int gridXSize = latticeXSize/xBlockXSize;
unsigned int gridYSize = latticeYSize;
unsigned int gridZSize = latticeZSize;
dim3 gridSize(gridXSize*gridYSize, gridZSize);
dim3 threadBlockSize(xBlockXSize, 1, 1);
hipLaunchKernelGGL(( CUDA_EXCEPTION_EXECUTE((cu_PackedSiteSkipX_kernel), dim3(gridSize),dim3(threadBlockSize), 0, 0, (unsigned int*)inLattice, (unsigned int*)outLattice, gridXSize, latticeXSize, latticeYSize, latticeXSize*latticeYSize*latticeZSize)));
CUDA_EXCEPTION_CHECK(hipStreamSynchronize(0));
CUDA_EXCEPTION_CHECK(hipMemcpy(host_outLattice, outLattice, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int), hipMemcpyDeviceToHost));
CUDA_EXCEPTION_CHECK(hipFree(outLattice));
CUDA_EXCEPTION_CHECK(hipFree(inLattice));
}
__global__ void cu_PackedSiteSkipX_kernel(const unsigned int* inLattice, unsigned int* outLattice, const unsigned int gridXSize, const unsigned int latticeXSize, const unsigned int latticeYSize, const unsigned int latticeXYZSize)
{
const unsigned int x = threadIdx.x;
__shared__ unsigned int bx, by, bz;
if (x == 0)
{
by = blockIdx.x/gridXSize;
bx = blockIdx.x-gridXSize*by;
bz = blockIdx.y;
}
__syncthreads();
// Figure out the offset of this thread in the lattice and the lattice segment.
unsigned int latticeXIndex = (bx*blockDim.x) + x;
unsigned int latticeIndex = (bz*latticeXSize*latticeYSize) + (by*latticeXSize) + latticeXIndex;
unsigned int windowIndex = x+LS_APRON_SIZE;
///////////////////////////////////////////
// Load the lattice into shared memory. //
///////////////////////////////////////////
// Shared memory to store the lattice segment. Each lattice site has four particles, eight bits for each particle.
__shared__ unsigned int window[LS_X_WINDOW_SIZE*LS_WORDS_PER_SITE];
// Copy the x window from device memory into shared memory.
copyXWindowFromLattice(bx, inLattice, window, latticeIndex, latticeXIndex, latticeXSize, latticeXYZSize, windowIndex);
outLattice[latticeIndex] = window[windowIndex];
outLattice[latticeIndex+latticeXYZSize] = window[windowIndex+LS_X_WINDOW_SIZE];
}
void cu_PackedSiteSkipY(unsigned int * host_inLattice, unsigned int * host_outLattice, unsigned int latticeXSize, unsigned int latticeYSize, unsigned int latticeZSize)
{
int major, minor;
lm::CUDA::getComputeCapabilities(0, &major, &minor);
if (major == 1 && minor < 2) throw lm::Exception("This test is only supported on compute capability >= 1.2.");
void* inLattice;
void* outLattice;
CUDA_EXCEPTION_CHECK(hipMalloc(&inLattice, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int)));
CUDA_EXCEPTION_CHECK(hipMalloc(&outLattice, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int)));
CUDA_EXCEPTION_CHECK(hipMemcpy(inLattice, host_inLattice, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int), hipMemcpyHostToDevice));
CUDA_EXCEPTION_CHECK(hipMemset(outLattice, 0xFF, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int)));
unsigned int gridXSize = latticeXSize/LS_Y_BLOCK_X_SIZE;
unsigned int gridYSize = latticeYSize/LS_Y_BLOCK_Y_SIZE;
unsigned int gridZSize = latticeZSize;
dim3 gridSize(gridXSize*gridYSize, gridZSize);
dim3 threadBlockSize(LS_Y_BLOCK_X_SIZE, LS_Y_BLOCK_Y_SIZE, 1);
hipLaunchKernelGGL(( CUDA_EXCEPTION_EXECUTE((cu_PackedSiteSkipY_kernel), dim3(gridSize),dim3(threadBlockSize), 0, 0, (unsigned int*)inLattice, (unsigned int*)outLattice, gridXSize, latticeXSize, latticeYSize, latticeXSize*latticeYSize*latticeZSize)));
CUDA_EXCEPTION_CHECK(hipStreamSynchronize(0));
CUDA_EXCEPTION_CHECK(hipMemcpy(host_outLattice, outLattice, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int), hipMemcpyDeviceToHost));
CUDA_EXCEPTION_CHECK(hipFree(outLattice));
CUDA_EXCEPTION_CHECK(hipFree(inLattice));
}
__global__ void cu_PackedSiteSkipY_kernel(const unsigned int* inLattice, unsigned int* outLattice, const unsigned int gridXSize, const unsigned int latticeXSize, const unsigned int latticeYSize, const unsigned int latticeXYZSize)
{
__shared__ unsigned int bx, by, bz;
calculateBlockPosition(&bx, &by, &bz, gridXSize);
// Figure out the offset of this thread in the lattice and the lattice segment.
unsigned int latticeYIndex = (by*blockDim.y) + threadIdx.y;
unsigned int latticeIndex = (bz*latticeXSize*latticeYSize) + (latticeYIndex*latticeXSize) + (bx*blockDim.x) + threadIdx.x;
unsigned int windowYIndex = threadIdx.y+LS_APRON_SIZE;
unsigned int windowIndex = (windowYIndex*blockDim.x) + threadIdx.x;
///////////////////////////////////////////
// Load the lattice into shared memory. //
///////////////////////////////////////////
// Shared memory to store the lattice segment. Each lattice site has four particles, eight bits for each particle.
__shared__ unsigned int window[LS_Y_WINDOW_SIZE*LS_WORDS_PER_SITE];
// Copy the x window from device memory into shared memory.
copyYWindowFromLattice(inLattice, window, latticeIndex, latticeYIndex, latticeXSize, latticeYSize, latticeXYZSize, windowIndex, windowYIndex);
outLattice[latticeIndex] = window[windowIndex];
outLattice[latticeIndex+latticeXYZSize] = window[windowIndex+LS_Y_WINDOW_SIZE];
}
void cu_PackedSiteSkipZ(unsigned int * host_inLattice, unsigned int * host_outLattice, unsigned int latticeXSize, unsigned int latticeYSize, unsigned int latticeZSize)
{
int major, minor;
lm::CUDA::getComputeCapabilities(0, &major, &minor);
if (major == 1 && minor < 2) throw lm::Exception("This test is only supported on compute capability >= 1.2.");
void* inLattice;
void* outLattice;
CUDA_EXCEPTION_CHECK(hipMalloc(&inLattice, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int)));
CUDA_EXCEPTION_CHECK(hipMalloc(&outLattice, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int)));
CUDA_EXCEPTION_CHECK(hipMemcpy(inLattice, host_inLattice, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int), hipMemcpyHostToDevice));
CUDA_EXCEPTION_CHECK(hipMemset(outLattice, 0xFF, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int)));
unsigned int gridXSize = latticeXSize/LS_Z_BLOCK_X_SIZE;
unsigned int gridYSize = latticeYSize;
unsigned int gridZSize = latticeZSize/LS_Z_BLOCK_Z_SIZE;
dim3 gridSize(gridXSize*gridYSize, gridZSize);
dim3 threadBlockSize(LS_Z_BLOCK_X_SIZE, 1, LS_Z_BLOCK_Z_SIZE);
hipLaunchKernelGGL(( CUDA_EXCEPTION_EXECUTE((cu_PackedSiteSkipZ_kernel), dim3(gridSize),dim3(threadBlockSize), 0, 0, (unsigned int*)inLattice, (unsigned int*)outLattice, gridXSize, latticeXSize, latticeYSize, latticeZSize)));
CUDA_EXCEPTION_CHECK(hipStreamSynchronize(0));
CUDA_EXCEPTION_CHECK(hipMemcpy(host_outLattice, outLattice, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int), hipMemcpyDeviceToHost));
CUDA_EXCEPTION_CHECK(hipFree(outLattice));
CUDA_EXCEPTION_CHECK(hipFree(inLattice));
}
__global__ void cu_PackedSiteSkipZ_kernel(const unsigned int* inLattice, unsigned int* outLattice, const unsigned int gridXSize, const unsigned int latticeXSize, const unsigned int latticeYSize, const unsigned int latticeZSize)
{
unsigned int latticeXYSize = latticeXSize*latticeYSize;
unsigned int latticeXYZSize = latticeXSize*latticeYSize*latticeZSize;
__shared__ unsigned int bx, by, bz;
calculateBlockPosition(&bx, &by, &bz, gridXSize);
// Figure out the offset of this thread in the lattice and the lattice segment.
unsigned int latticeZIndex = (bz*blockDim.z) + threadIdx.z;
unsigned int latticeIndex = (latticeZIndex*latticeXYSize) + (by*latticeXSize) + (bx*blockDim.x) + threadIdx.x;
unsigned int windowZIndex = threadIdx.z+LS_APRON_SIZE;
unsigned int windowIndex = (windowZIndex*blockDim.x) + threadIdx.x;
///////////////////////////////////////////
// Load the lattice into shared memory. //
///////////////////////////////////////////
// Shared memory to store the lattice segment. Each lattice site has four particles, eight bits for each particle.
__shared__ unsigned int window[LS_Z_WINDOW_SIZE*LS_WORDS_PER_SITE];
// Copy the x window from device memory into shared memory.
copyZWindowFromLattice(inLattice, window, latticeIndex, latticeZIndex, latticeZSize, latticeXYSize, latticeXYZSize, windowIndex, windowZIndex);
outLattice[latticeIndex] = window[windowIndex];
outLattice[latticeIndex+latticeXYZSize] = window[windowIndex+LS_Z_WINDOW_SIZE];
}
| 39dc1d4940213237a167ea2a78eeb8dfb076f3dd.cu | /*
* University of Illinois Open Source License
* Copyright 2010 Luthey-Schulten Group,
* All rights reserved.
*
* Developed by: Luthey-Schulten Group
* University of Illinois at Urbana-Champaign
* http://www.scs.uiuc.edu/~schulten
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the Software), to deal with
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is furnished to
* do so, subject to the following conditions:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimers.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimers in the documentation
* and/or other materials provided with the distribution.
*
* - Neither the names of the Luthey-Schulten Group, University of Illinois at
* Urbana-Champaign, nor the names of its contributors may be used to endorse or
* promote products derived from this Software without specific prior written
* permission.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS WITH THE SOFTWARE.
*
* Author(s): Elijah Roberts
*/
#include "lm/Cuda.h"
#define LS_WORDS_PER_SITE 2
#define LS_APRON_SIZE 2
#define LS_X_BLOCK_MAX_X_SIZE 128
#define LS_Y_BLOCK_X_SIZE 16
#define LS_Y_BLOCK_Y_SIZE 8
#define LS_Z_BLOCK_X_SIZE 16
#define LS_Z_BLOCK_Z_SIZE 8
#define LS_PACKED_SITES
#define LS_PACKED_LAST_OBJECT_MASK 0xFF000000
#include "lm/rdme/dev/lattice_sim_1d_dev.cu"
__global__ void cu_PackedSiteSkipX_kernel(const unsigned int* inLattice, unsigned int* outLattice, const unsigned int gridXSize, const unsigned int latticeXSize, const unsigned int latticeYSize, const unsigned int latticeXYZSize);
__global__ void cu_PackedSiteSkipY_kernel(const unsigned int* inLattice, unsigned int* outLattice, const unsigned int gridXSize, const unsigned int latticeXSize, const unsigned int latticeYSize, const unsigned int latticeXYZSize);
__global__ void cu_PackedSiteSkipZ_kernel(const unsigned int* inLattice, unsigned int* outLattice, const unsigned int gridXSize, const unsigned int latticeXSize, const unsigned int latticeYSize, const unsigned int latticeZSize);
void cu_PackedSiteSkipX(unsigned int * host_inLattice, unsigned int * host_outLattice, unsigned int latticeXSize, unsigned int latticeYSize, unsigned int latticeZSize)
{
int major, minor;
lm::CUDA::getComputeCapabilities(0, &major, &minor);
if (major == 1 && minor < 2) throw lm::Exception("This test is only supported on compute capability >= 1.2.");
void* inLattice;
void* outLattice;
CUDA_EXCEPTION_CHECK(cudaMalloc(&inLattice, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int)));
CUDA_EXCEPTION_CHECK(cudaMalloc(&outLattice, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int)));
CUDA_EXCEPTION_CHECK(cudaMemcpy(inLattice, host_inLattice, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int), cudaMemcpyHostToDevice));
CUDA_EXCEPTION_CHECK(cudaMemset(outLattice, 0xFF, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int)));
unsigned int xBlockXSize = min(LS_X_BLOCK_MAX_X_SIZE,latticeXSize);
unsigned int gridXSize = latticeXSize/xBlockXSize;
unsigned int gridYSize = latticeYSize;
unsigned int gridZSize = latticeZSize;
dim3 gridSize(gridXSize*gridYSize, gridZSize);
dim3 threadBlockSize(xBlockXSize, 1, 1);
CUDA_EXCEPTION_EXECUTE((cu_PackedSiteSkipX_kernel<<<gridSize,threadBlockSize>>>((unsigned int*)inLattice, (unsigned int*)outLattice, gridXSize, latticeXSize, latticeYSize, latticeXSize*latticeYSize*latticeZSize)));
CUDA_EXCEPTION_CHECK(cudaStreamSynchronize(0));
CUDA_EXCEPTION_CHECK(cudaMemcpy(host_outLattice, outLattice, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int), cudaMemcpyDeviceToHost));
CUDA_EXCEPTION_CHECK(cudaFree(outLattice));
CUDA_EXCEPTION_CHECK(cudaFree(inLattice));
}
__global__ void cu_PackedSiteSkipX_kernel(const unsigned int* inLattice, unsigned int* outLattice, const unsigned int gridXSize, const unsigned int latticeXSize, const unsigned int latticeYSize, const unsigned int latticeXYZSize)
{
const unsigned int x = threadIdx.x;
__shared__ unsigned int bx, by, bz;
if (x == 0)
{
by = blockIdx.x/gridXSize;
bx = blockIdx.x-gridXSize*by;
bz = blockIdx.y;
}
__syncthreads();
// Figure out the offset of this thread in the lattice and the lattice segment.
unsigned int latticeXIndex = (bx*blockDim.x) + x;
unsigned int latticeIndex = (bz*latticeXSize*latticeYSize) + (by*latticeXSize) + latticeXIndex;
unsigned int windowIndex = x+LS_APRON_SIZE;
///////////////////////////////////////////
// Load the lattice into shared memory. //
///////////////////////////////////////////
// Shared memory to store the lattice segment. Each lattice site has four particles, eight bits for each particle.
__shared__ unsigned int window[LS_X_WINDOW_SIZE*LS_WORDS_PER_SITE];
// Copy the x window from device memory into shared memory.
copyXWindowFromLattice(bx, inLattice, window, latticeIndex, latticeXIndex, latticeXSize, latticeXYZSize, windowIndex);
outLattice[latticeIndex] = window[windowIndex];
outLattice[latticeIndex+latticeXYZSize] = window[windowIndex+LS_X_WINDOW_SIZE];
}
void cu_PackedSiteSkipY(unsigned int * host_inLattice, unsigned int * host_outLattice, unsigned int latticeXSize, unsigned int latticeYSize, unsigned int latticeZSize)
{
int major, minor;
lm::CUDA::getComputeCapabilities(0, &major, &minor);
if (major == 1 && minor < 2) throw lm::Exception("This test is only supported on compute capability >= 1.2.");
void* inLattice;
void* outLattice;
CUDA_EXCEPTION_CHECK(cudaMalloc(&inLattice, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int)));
CUDA_EXCEPTION_CHECK(cudaMalloc(&outLattice, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int)));
CUDA_EXCEPTION_CHECK(cudaMemcpy(inLattice, host_inLattice, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int), cudaMemcpyHostToDevice));
CUDA_EXCEPTION_CHECK(cudaMemset(outLattice, 0xFF, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int)));
unsigned int gridXSize = latticeXSize/LS_Y_BLOCK_X_SIZE;
unsigned int gridYSize = latticeYSize/LS_Y_BLOCK_Y_SIZE;
unsigned int gridZSize = latticeZSize;
dim3 gridSize(gridXSize*gridYSize, gridZSize);
dim3 threadBlockSize(LS_Y_BLOCK_X_SIZE, LS_Y_BLOCK_Y_SIZE, 1);
CUDA_EXCEPTION_EXECUTE((cu_PackedSiteSkipY_kernel<<<gridSize,threadBlockSize>>>((unsigned int*)inLattice, (unsigned int*)outLattice, gridXSize, latticeXSize, latticeYSize, latticeXSize*latticeYSize*latticeZSize)));
CUDA_EXCEPTION_CHECK(cudaStreamSynchronize(0));
CUDA_EXCEPTION_CHECK(cudaMemcpy(host_outLattice, outLattice, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int), cudaMemcpyDeviceToHost));
CUDA_EXCEPTION_CHECK(cudaFree(outLattice));
CUDA_EXCEPTION_CHECK(cudaFree(inLattice));
}
__global__ void cu_PackedSiteSkipY_kernel(const unsigned int* inLattice, unsigned int* outLattice, const unsigned int gridXSize, const unsigned int latticeXSize, const unsigned int latticeYSize, const unsigned int latticeXYZSize)
{
__shared__ unsigned int bx, by, bz;
calculateBlockPosition(&bx, &by, &bz, gridXSize);
// Figure out the offset of this thread in the lattice and the lattice segment.
unsigned int latticeYIndex = (by*blockDim.y) + threadIdx.y;
unsigned int latticeIndex = (bz*latticeXSize*latticeYSize) + (latticeYIndex*latticeXSize) + (bx*blockDim.x) + threadIdx.x;
unsigned int windowYIndex = threadIdx.y+LS_APRON_SIZE;
unsigned int windowIndex = (windowYIndex*blockDim.x) + threadIdx.x;
///////////////////////////////////////////
// Load the lattice into shared memory. //
///////////////////////////////////////////
// Shared memory to store the lattice segment. Each lattice site has four particles, eight bits for each particle.
__shared__ unsigned int window[LS_Y_WINDOW_SIZE*LS_WORDS_PER_SITE];
// Copy the x window from device memory into shared memory.
copyYWindowFromLattice(inLattice, window, latticeIndex, latticeYIndex, latticeXSize, latticeYSize, latticeXYZSize, windowIndex, windowYIndex);
outLattice[latticeIndex] = window[windowIndex];
outLattice[latticeIndex+latticeXYZSize] = window[windowIndex+LS_Y_WINDOW_SIZE];
}
void cu_PackedSiteSkipZ(unsigned int * host_inLattice, unsigned int * host_outLattice, unsigned int latticeXSize, unsigned int latticeYSize, unsigned int latticeZSize)
{
int major, minor;
lm::CUDA::getComputeCapabilities(0, &major, &minor);
if (major == 1 && minor < 2) throw lm::Exception("This test is only supported on compute capability >= 1.2.");
void* inLattice;
void* outLattice;
CUDA_EXCEPTION_CHECK(cudaMalloc(&inLattice, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int)));
CUDA_EXCEPTION_CHECK(cudaMalloc(&outLattice, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int)));
CUDA_EXCEPTION_CHECK(cudaMemcpy(inLattice, host_inLattice, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int), cudaMemcpyHostToDevice));
CUDA_EXCEPTION_CHECK(cudaMemset(outLattice, 0xFF, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int)));
unsigned int gridXSize = latticeXSize/LS_Z_BLOCK_X_SIZE;
unsigned int gridYSize = latticeYSize;
unsigned int gridZSize = latticeZSize/LS_Z_BLOCK_Z_SIZE;
dim3 gridSize(gridXSize*gridYSize, gridZSize);
dim3 threadBlockSize(LS_Z_BLOCK_X_SIZE, 1, LS_Z_BLOCK_Z_SIZE);
CUDA_EXCEPTION_EXECUTE((cu_PackedSiteSkipZ_kernel<<<gridSize,threadBlockSize>>>((unsigned int*)inLattice, (unsigned int*)outLattice, gridXSize, latticeXSize, latticeYSize, latticeZSize)));
CUDA_EXCEPTION_CHECK(cudaStreamSynchronize(0));
CUDA_EXCEPTION_CHECK(cudaMemcpy(host_outLattice, outLattice, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int), cudaMemcpyDeviceToHost));
CUDA_EXCEPTION_CHECK(cudaFree(outLattice));
CUDA_EXCEPTION_CHECK(cudaFree(inLattice));
}
__global__ void cu_PackedSiteSkipZ_kernel(const unsigned int* inLattice, unsigned int* outLattice, const unsigned int gridXSize, const unsigned int latticeXSize, const unsigned int latticeYSize, const unsigned int latticeZSize)
{
unsigned int latticeXYSize = latticeXSize*latticeYSize;
unsigned int latticeXYZSize = latticeXSize*latticeYSize*latticeZSize;
__shared__ unsigned int bx, by, bz;
calculateBlockPosition(&bx, &by, &bz, gridXSize);
// Figure out the offset of this thread in the lattice and the lattice segment.
unsigned int latticeZIndex = (bz*blockDim.z) + threadIdx.z;
unsigned int latticeIndex = (latticeZIndex*latticeXYSize) + (by*latticeXSize) + (bx*blockDim.x) + threadIdx.x;
unsigned int windowZIndex = threadIdx.z+LS_APRON_SIZE;
unsigned int windowIndex = (windowZIndex*blockDim.x) + threadIdx.x;
///////////////////////////////////////////
// Load the lattice into shared memory. //
///////////////////////////////////////////
// Shared memory to store the lattice segment. Each lattice site has four particles, eight bits for each particle.
__shared__ unsigned int window[LS_Z_WINDOW_SIZE*LS_WORDS_PER_SITE];
// Copy the x window from device memory into shared memory.
copyZWindowFromLattice(inLattice, window, latticeIndex, latticeZIndex, latticeZSize, latticeXYSize, latticeXYZSize, windowIndex, windowZIndex);
outLattice[latticeIndex] = window[windowIndex];
outLattice[latticeIndex+latticeXYZSize] = window[windowIndex+LS_Z_WINDOW_SIZE];
}
|
349d315504ac85fbd210f4fbf66ebf2a3236b5a1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "InterpExecution.hpp"
namespace MNN {
namespace CUDA {
#define CUDA_KERNEL_LOOP(i, n) for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x)
template<typename T>
__global__ void INTERP(const int n, const int ih, const int iw, const int oh, const int ow,
const float scaleh, const float scalew, const float offseth, const float offsetw, const T* in, T* out) {
CUDA_KERNEL_LOOP(index, n) {
int x = index % ow;
int tmp = index / ow;
int y = tmp % oh;
int z = tmp / oh;
int ix = min(max(0, (int)floor((float)x*scalew+offsetw)), iw-1);
int iy = min(max(0, (int)floor((float)y*scaleh+offseth)), ih-1);
out[z*oh*ow + y*ow + x] = in[z*ih*iw + iy*iw + ix];
}
}
template<typename T>
__global__ void INTERP_BILINEAR(const int n, const int ih, const int iw, const int oh, const int ow,
const float scaleh, const float scalew, const float offseth, const float offsetw, const T* in, T* out) {
CUDA_KERNEL_LOOP(index, n) {
int x = index % ow;
int tmp = index / ow;
int y = tmp % oh;
int z = tmp / oh;
float fx = x*scalew+offsetw;
int ix_0 = min(max(0, (int)floor(fx)), iw-1);
int ix_1 = min((int)ceil(fx), iw-1);
float fy = y*scaleh+offseth;
int iy_0 = min(max(0, (int)floor(fy)), ih-1);
int iy_1 = min((int)ceil(fy), ih-1);
int index_00 = z*ih*iw + iy_0*iw + ix_0;
int index_01 = z*ih*iw + iy_0*iw + ix_1;
int index_10 = z*ih*iw + iy_1*iw + ix_0;
int index_11 = z*ih*iw + iy_1*iw + ix_1;
float factor_x = fx-ix_0;
float factor_y = fy-iy_0;
out[z*oh*ow + y*ow + x] = (1.0-factor_x)*(1.0-factor_y)*in[index_00] + factor_x*(1.0-factor_y)*in[index_01] +
(1.0-factor_x)*factor_y*in[index_10] + factor_x*factor_y*in[index_11];
}
}
InterpExecution::InterpExecution(const Interp* interp, Backend *backend) : Execution(backend) {
mWidthOffset = interp->widthOffset();
mHeightOffset = interp->heightOffset();
mResizeType = interp->resizeType();
mScaleWidth = interp->widthScale();
mScaleHeight = interp->heightScale();
}
InterpExecution::~InterpExecution() {
//do nothing
}
ErrorCode InterpExecution::onResize(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) {
//MNN_ASSERT(inputs.size() == 1);
MNN_ASSERT(outputs.size() == 1);
auto input = inputs[0];
auto output = outputs[0];
mChannel = input->channel();
mBatch = input->batch();
mInputHeight = input->height();
mInputWidth = input->width();
mOutputHeight = output->height();
mOutputWidth = output->width();
mCount = mBatch*mChannel*mOutputHeight*mOutputWidth;
//printf("mBatch:%d-mChannel:%d-mInputHeight:%d- mInputWidth:%d- mOutputHeight:%d- mOutputWidth:%d, mScaleHeight:%f- mScaleWidth:%f %f %f\n", mBatch, mChannel, mInputHeight,mInputWidth,mOutputHeight, mOutputWidth, mScaleHeight, mScaleWidth, mWidthOffset, mHeightOffset);
return NO_ERROR;
}
ErrorCode InterpExecution::onExecute(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) {
auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime();
int block_num = runtime->blocks_num(mCount);
int threads_num = runtime->threads_num();
auto input_addr = (void*)inputs[0]->deviceId();
auto output_addr = (void*)outputs[0]->deviceId();
if(mResizeType == 1){
hipLaunchKernelGGL(( INTERP), dim3(block_num), dim3(threads_num), 0, 0, mCount, mInputHeight, mInputWidth, mOutputHeight, mOutputWidth,
mScaleHeight, mScaleWidth, mHeightOffset, mWidthOffset, (const float *)input_addr, (float *)output_addr);
} else if(mResizeType == 2) {
hipLaunchKernelGGL(( INTERP_BILINEAR), dim3(block_num), dim3(threads_num), 0, 0, mCount, mInputHeight, mInputWidth, mOutputHeight, mOutputWidth,
mScaleHeight, mScaleWidth, mHeightOffset, mWidthOffset, (const float *)input_addr, (float *)output_addr);
}
return NO_ERROR;
}
class InterpCreator : public CUDABackend::Creator {
public:
virtual Execution* onCreate(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs,
const MNN::Op* op, Backend* backend) const override {
auto param = op->main_as_Interp();
if(param->resizeType() != 1 && param->resizeType() != 2) {
MNN_PRINT("CUDA interp resize type:%d not support, back to CPU\n", param->resizeType());
return nullptr;
}
return new InterpExecution(param, backend);
}
};
static CUDACreatorRegister<InterpCreator> __init(OpType_Interp);
}
}
| 349d315504ac85fbd210f4fbf66ebf2a3236b5a1.cu | #include "InterpExecution.hpp"
namespace MNN {
namespace CUDA {
#define CUDA_KERNEL_LOOP(i, n) for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x)
template<typename T>
__global__ void INTERP(const int n, const int ih, const int iw, const int oh, const int ow,
const float scaleh, const float scalew, const float offseth, const float offsetw, const T* in, T* out) {
CUDA_KERNEL_LOOP(index, n) {
int x = index % ow;
int tmp = index / ow;
int y = tmp % oh;
int z = tmp / oh;
int ix = min(max(0, (int)floor((float)x*scalew+offsetw)), iw-1);
int iy = min(max(0, (int)floor((float)y*scaleh+offseth)), ih-1);
out[z*oh*ow + y*ow + x] = in[z*ih*iw + iy*iw + ix];
}
}
template<typename T>
__global__ void INTERP_BILINEAR(const int n, const int ih, const int iw, const int oh, const int ow,
const float scaleh, const float scalew, const float offseth, const float offsetw, const T* in, T* out) {
CUDA_KERNEL_LOOP(index, n) {
int x = index % ow;
int tmp = index / ow;
int y = tmp % oh;
int z = tmp / oh;
float fx = x*scalew+offsetw;
int ix_0 = min(max(0, (int)floor(fx)), iw-1);
int ix_1 = min((int)ceil(fx), iw-1);
float fy = y*scaleh+offseth;
int iy_0 = min(max(0, (int)floor(fy)), ih-1);
int iy_1 = min((int)ceil(fy), ih-1);
int index_00 = z*ih*iw + iy_0*iw + ix_0;
int index_01 = z*ih*iw + iy_0*iw + ix_1;
int index_10 = z*ih*iw + iy_1*iw + ix_0;
int index_11 = z*ih*iw + iy_1*iw + ix_1;
float factor_x = fx-ix_0;
float factor_y = fy-iy_0;
out[z*oh*ow + y*ow + x] = (1.0-factor_x)*(1.0-factor_y)*in[index_00] + factor_x*(1.0-factor_y)*in[index_01] +
(1.0-factor_x)*factor_y*in[index_10] + factor_x*factor_y*in[index_11];
}
}
InterpExecution::InterpExecution(const Interp* interp, Backend *backend) : Execution(backend) {
mWidthOffset = interp->widthOffset();
mHeightOffset = interp->heightOffset();
mResizeType = interp->resizeType();
mScaleWidth = interp->widthScale();
mScaleHeight = interp->heightScale();
}
InterpExecution::~InterpExecution() {
//do nothing
}
ErrorCode InterpExecution::onResize(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) {
//MNN_ASSERT(inputs.size() == 1);
MNN_ASSERT(outputs.size() == 1);
auto input = inputs[0];
auto output = outputs[0];
mChannel = input->channel();
mBatch = input->batch();
mInputHeight = input->height();
mInputWidth = input->width();
mOutputHeight = output->height();
mOutputWidth = output->width();
mCount = mBatch*mChannel*mOutputHeight*mOutputWidth;
//printf("mBatch:%d-mChannel:%d-mInputHeight:%d- mInputWidth:%d- mOutputHeight:%d- mOutputWidth:%d, mScaleHeight:%f- mScaleWidth:%f %f %f\n", mBatch, mChannel, mInputHeight,mInputWidth,mOutputHeight, mOutputWidth, mScaleHeight, mScaleWidth, mWidthOffset, mHeightOffset);
return NO_ERROR;
}
ErrorCode InterpExecution::onExecute(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) {
auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime();
int block_num = runtime->blocks_num(mCount);
int threads_num = runtime->threads_num();
auto input_addr = (void*)inputs[0]->deviceId();
auto output_addr = (void*)outputs[0]->deviceId();
if(mResizeType == 1){
INTERP<<<block_num, threads_num>>>(mCount, mInputHeight, mInputWidth, mOutputHeight, mOutputWidth,
mScaleHeight, mScaleWidth, mHeightOffset, mWidthOffset, (const float *)input_addr, (float *)output_addr);
} else if(mResizeType == 2) {
INTERP_BILINEAR<<<block_num, threads_num>>>(mCount, mInputHeight, mInputWidth, mOutputHeight, mOutputWidth,
mScaleHeight, mScaleWidth, mHeightOffset, mWidthOffset, (const float *)input_addr, (float *)output_addr);
}
return NO_ERROR;
}
class InterpCreator : public CUDABackend::Creator {
public:
virtual Execution* onCreate(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs,
const MNN::Op* op, Backend* backend) const override {
auto param = op->main_as_Interp();
if(param->resizeType() != 1 && param->resizeType() != 2) {
MNN_PRINT("CUDA interp resize type:%d not support, back to CPU\n", param->resizeType());
return nullptr;
}
return new InterpExecution(param, backend);
}
};
static CUDACreatorRegister<InterpCreator> __init(OpType_Interp);
}
}
|
4ecd8a1025b032ad1e4d1be699f62f7d4aefa260.hip | // !!! This is a file automatically generated by hipify!!!
#define GRID_SIZE (1LL << 24)
#define BLOCK_SIZE 512
#define CHUNK_SIZE (GRID_SIZE / BLOCK_SIZE)
#define RNG_MUL 25214903917ULL
#define RNG_ADD 11ULL
#define RNG_MASK ((1ULL << 48) - 1)
#ifndef CACTUS_HEIGHT
#define CACTUS_HEIGHT 7
#endif
#include <chrono>
#include <cstdint>
#include <mutex>
#include <thread>
#include <hip/hip_runtime.h>
#ifdef BOINC
#include "boinc_api.h"
#if defined _WIN32 || defined _WIN64
#include "boinc_win.h"
#endif
#endif
__device__ unsigned long long block_add_gpu[BLOCK_SIZE + 1];
__device__ unsigned long long block_mul_gpu[BLOCK_SIZE + 1];
__device__ unsigned long long chunk_add_gpu[CHUNK_SIZE + 1];
__device__ unsigned long long chunk_mul_gpu[CHUNK_SIZE + 1];
__device__ int32_t FLOOR_LEVEL;
__device__ inline int32_t next(uint32_t *random, uint32_t *index, int bits)
{
return (random[(*index)++] >> (32 - bits));
}
__device__ inline int32_t next_int(uint32_t *random, uint32_t *index, int32_t bound)
{
int32_t bits, value;
do {
bits = next(random, index, 31);
value = bits % bound;
} while (bits - value + (bound - 1) < 0);
return value;
}
__device__ inline int32_t next_int_unknown(uint32_t *random, uint32_t *index, int32_t bound)
{
if ((bound & -bound) == bound) {
return (int32_t) ((bound * (unsigned long long) next(random, index, 31)) >> 31);
} else {
return next_int(random, index, bound);
}
}
__device__ inline uint8_t extract(const uint32_t *heightmap, uint16_t pos)
{
return ((heightmap[pos >> 3] >> ((pos & 7) << 2)) & 15) + FLOOR_LEVEL;
}
__device__ inline void increase(uint32_t *heightmap, uint16_t pos, uint8_t addend)
{
heightmap[pos >> 3] += addend << ((pos & 7) << 2);
}
__global__ void crack(unsigned long long seed, unsigned long long *out, unsigned long long *out_n)
{
__shared__ uint32_t random[BLOCK_SIZE + 1024];
__shared__ uint32_t skip_index[BLOCK_SIZE + 1024 - 100];
__shared__ uint32_t skip_first[BLOCK_SIZE + 1024 - 102];
__shared__ uint32_t skip_always[BLOCK_SIZE + 1024 - 102];
__shared__ uint32_t floor_skip[BLOCK_SIZE + 1024 - 102];
__shared__ uint8_t floor_terrain[BLOCK_SIZE + 1024 - 102];
__shared__ uint32_t offset_skip[BLOCK_SIZE + 1024 - 4];
__shared__ uint8_t offset_height[BLOCK_SIZE + 1024 - 4];
uint32_t heightmap[128];
uint32_t random_index;
seed = (seed * chunk_mul_gpu[blockIdx.x] + chunk_add_gpu[blockIdx.x]) & RNG_MASK;
seed = (seed * block_mul_gpu[threadIdx.x] + block_add_gpu[threadIdx.x]) & RNG_MASK;
unsigned long long seed2 = seed;
seed = ((seed - 11ULL) * 246154705703781ULL) & RNG_MASK;
random[threadIdx.x + BLOCK_SIZE * 0] = (uint32_t) (seed2 >> 16);
for (int i = threadIdx.x + BLOCK_SIZE; i < BLOCK_SIZE + 1024; i += BLOCK_SIZE) {
seed2 = (seed2 * block_mul_gpu[BLOCK_SIZE] + block_add_gpu[BLOCK_SIZE]) & RNG_MASK;
random[i] = (uint32_t) (seed2 >> 16);
}
for (int i = 0; i < 128; i++) {
heightmap[i] = 0;
}
__syncthreads();
for (int i = threadIdx.x; i < BLOCK_SIZE + 1024 - 4; i += BLOCK_SIZE) {
random_index = i;
uint8_t offset = next_int_unknown(random, &random_index, next_int(random, &random_index, 3) + 1) + 1;
offset_height[i] = offset;
offset_skip[i] = random_index;
}
__syncthreads();
for (int i = threadIdx.x; i < BLOCK_SIZE + 1024 - 100; i += BLOCK_SIZE) {
random_index = i;
for (int j = 0; j < 10; j++) {
random_index += 6;
random_index = offset_skip[random_index];
}
skip_index[i] = random_index;
}
__syncthreads();
for (int i = threadIdx.x; i < BLOCK_SIZE + 1024 - 102; i += BLOCK_SIZE) {
random_index = i + 2;
int16_t terrain = next_int_unknown(random, &random_index, (FLOOR_LEVEL + 1) * 2);
floor_terrain[i] = terrain;
floor_skip[i] = random_index;
if (terrain - 3 > FLOOR_LEVEL + CACTUS_HEIGHT + 1) {
skip_first[i] = skip_index[random_index];
skip_always[i] = skip_index[random_index];
} else if (terrain - 3 > FLOOR_LEVEL + 1) {
skip_first[i] = skip_index[random_index];
skip_always[i] = 0;
} else if (terrain + 3 <= FLOOR_LEVEL && terrain - 3 >= 0) {
skip_first[i] = random_index + 60;
skip_always[i] = random_index + 60;
} else {
skip_first[i] = 0;
skip_always[i] = 0;
}
}
__syncthreads();
random_index = threadIdx.x;
uint16_t best = 0;
bool changed = false;
int i = 0;
for (; i < 10 && skip_first[random_index]; i++) {
random_index = skip_first[random_index];
}
for (; i < 10; i++) {
if (!changed && skip_first[random_index]) {
random_index = skip_first[random_index];
continue;
}
uint16_t bx = next(random, &random_index, 4) + 8;
uint16_t bz = next(random, &random_index, 4) + 8;
uint16_t initial = bx * 32 + bz;
int16_t terrain;
if (extract(heightmap, initial) == FLOOR_LEVEL) {
if (skip_always[random_index - 2]) {
random_index = skip_always[random_index - 2];
continue;
}
terrain = floor_terrain[random_index - 2];
random_index = floor_skip[random_index - 2];
} else {
terrain = next_int_unknown(random, &random_index, (extract(heightmap, initial) + 1) * 2);
if (terrain + 3 <= FLOOR_LEVEL && terrain - 3 >= 0) {
random_index += 60;
continue;
}
}
if (terrain - 3 > extract(heightmap, best) + 1) {
random_index = skip_index[random_index];
continue;
}
for (int j = 0; j < 10; j++) {
int16_t bx = next(random, &random_index, 3) - next(random, &random_index, 3);
int16_t by = next(random, &random_index, 2) - next(random, &random_index, 2);
int16_t bz = next(random, &random_index, 3) - next(random, &random_index, 3);
uint16_t xz = initial + bx * 32 + bz;
int16_t y = (int16_t) terrain + by;
if (y <= extract(heightmap, xz) && y >= 0) continue;
uint8_t offset = offset_height[random_index];
random_index = offset_skip[random_index];
if (y != extract(heightmap, xz) + 1) continue;
if (y == FLOOR_LEVEL + 1) {
uint8_t mask = 0;
if (bz != 0x00) mask |= extract(heightmap, xz - 1) - FLOOR_LEVEL;
if (bz != 0x1F) mask |= extract(heightmap, xz + 1) - FLOOR_LEVEL;
if (bx != 0x00) mask |= extract(heightmap, xz - 32) - FLOOR_LEVEL;
if (bx != 0x1F) mask |= extract(heightmap, xz + 32) - FLOOR_LEVEL;
if (mask) continue;
}
increase(heightmap, xz, offset);
changed = true;
if (extract(heightmap, xz) > extract(heightmap, best)) best = xz;
}
}
if (extract(heightmap, best) - FLOOR_LEVEL >= CACTUS_HEIGHT) {
out[atomicAdd((unsigned long long*) out_n, 1ULL)] = seed;
}
}
unsigned long long block_add[BLOCK_SIZE + 1];
unsigned long long block_mul[BLOCK_SIZE + 1];
unsigned long long chunk_add[CHUNK_SIZE + 1];
unsigned long long chunk_mul[CHUNK_SIZE + 1];
unsigned long long offset = 0;
unsigned long long seed = 0;
unsigned long long total_seeds = 0;
time_t elapsed_chkpoint = 0;
std::mutex mutexcuda;
std::thread threads[1];
unsigned long long BEGIN;
unsigned long long BEGINOrig;
unsigned long long END;
int checkpoint_now;
struct checkpoint_vars {
unsigned long long offset;
time_t elapsed_chkpoint;
};
int32_t floor_level_host;
void run(int gpu_device)
{
FILE* kaktseeds = fopen("kaktseeds.txt", "w+");
unsigned long long *out;
unsigned long long *out_n;
hipSetDevice(gpu_device);
hipMallocManaged(&out, GRID_SIZE * sizeof(*out));
hipMallocManaged(&out_n, sizeof(*out_n));
hipMemcpyToSymbol(block_add_gpu, block_add, (BLOCK_SIZE + 1) * sizeof(*block_add));
hipMemcpyToSymbol(block_mul_gpu, block_mul, (BLOCK_SIZE + 1) * sizeof(*block_mul));
hipMemcpyToSymbol(chunk_add_gpu, chunk_add, (CHUNK_SIZE + 1) * sizeof(*chunk_add));
hipMemcpyToSymbol(chunk_mul_gpu, chunk_mul, (CHUNK_SIZE + 1) * sizeof(*chunk_mul));
hipMemcpyToSymbol(FLOOR_LEVEL, &floor_level_host, sizeof(int32_t));
while (true) {
*out_n = 0;
{
if (offset >= END) break;
unsigned long long seed_gpu = (seed * RNG_MUL + RNG_ADD) & RNG_MASK;
hipLaunchKernelGGL(( crack), dim3(CHUNK_SIZE), dim3(BLOCK_SIZE), 0, 0, seed_gpu, out, out_n);
offset += GRID_SIZE;
seed = (seed * chunk_mul[CHUNK_SIZE] + chunk_add[CHUNK_SIZE]) & RNG_MASK;
}
hipDeviceSynchronize();
{
total_seeds += *out_n;
for (unsigned long long i = 0; i < *out_n; i++){
fprintf(kaktseeds,"s: %llu,\n", out[i]);
}
fflush(kaktseeds);
}
}
fclose(kaktseeds);
hipFree(out_n);
hipFree(out);
}
int main(int argc, char *argv[])
{
#ifdef BOINC
BOINC_OPTIONS options;
boinc_options_defaults(options);
options.normal_thread_priority = true;
boinc_init_options(&options);
#endif
block_add[0] = 0;
block_mul[0] = 1;
for (unsigned long long i = 0; i < BLOCK_SIZE; i++) {
block_add[i + 1] = (block_add[i] * RNG_MUL + RNG_ADD) & RNG_MASK;
block_mul[i + 1] = (block_mul[i] * RNG_MUL) & RNG_MASK;
}
chunk_add[0] = 0;
chunk_mul[0] = 1;
for (unsigned long long i = 0; i < CHUNK_SIZE; i++) {
chunk_add[i + 1] = (chunk_add[i] * block_mul[BLOCK_SIZE] + block_add[BLOCK_SIZE]) & RNG_MASK;
chunk_mul[i + 1] = (chunk_mul[i] * block_mul[BLOCK_SIZE]) & RNG_MASK;
}
int gpu_device = 0;
for (int i = 1; i < argc; i += 2) {
const char *param = argv[i];
if (strcmp(param, "-d") == 0 || strcmp(param, "--device") == 0) {
gpu_device = atoi(argv[i + 1]);
} else if (strcmp(param, "-s") == 0 || strcmp(param, "--start") == 0) {
sscanf(argv[i + 1], "%llu", &BEGIN);
} else if (strcmp(param, "-e") == 0 || strcmp(param, "--end") == 0) {
sscanf(argv[i + 1], "%llu", &END);
} else if (strcmp(param, "-h") == 0 || strcmp(param, "--height") == 0){
sscanf(argv[i + 1], "%llu", &floor_level_host);
} else {
fprintf(stderr,"Unknown parameter: %s\n", param);
}
}
BEGINOrig = BEGIN;
FILE *checkpoint_data = boinc_fopen("kaktpoint.txt", "rb");
if (!checkpoint_data) {
fprintf(stderr,"No checkpoint to load\n");
} else {
#ifdef BOINC
boinc_begin_critical_section();
#endif
struct checkpoint_vars data_store;
fread(&data_store, sizeof(data_store), 1, checkpoint_data);
BEGIN = data_store.offset;
elapsed_chkpoint = data_store.elapsed_chkpoint;
fprintf(stderr,"Checkpoint loaded, task time %d s, seed pos: %llu\n", elapsed_chkpoint, BEGIN);
fclose(checkpoint_data);
#ifdef BOINC
boinc_end_critical_section();
#endif
}
for (; offset + GRID_SIZE <= BEGIN; offset += GRID_SIZE)
seed = (seed * chunk_mul[CHUNK_SIZE] + chunk_add[CHUNK_SIZE]) & RNG_MASK;
for (; offset + 1 <= BEGIN; offset += 1)
seed = (seed * RNG_MUL + RNG_ADD) & RNG_MASK;
#ifdef BOINC
APP_INIT_DATA aid;
boinc_get_init_data(aid);
if (aid.gpu_device_num >= 0) {
gpu_device = aid.gpu_device_num;
fprintf(stderr,"boinc gpu %i gpuindex: %i \n", aid.gpu_device_num, gpu_device);
} else {
fprintf(stderr,"stndalone gpuindex %i \n", gpu_device);
}
#endif
threads[0] = std::thread(run, gpu_device);
checkpoint_now = 0;
time_t start_time = time(NULL);
while (offset < END) {
using namespace std::chrono_literals;
std::this_thread::sleep_for(1s);
time_t elapsed = time(NULL) - start_time;
unsigned long long count = offset - BEGIN;
double frac = (double) count / (double) (END - BEGIN);
#ifdef BOINC
boinc_fraction_done(frac);
#endif
checkpoint_now++;
if (checkpoint_now >= 30 || boinc_time_to_checkpoint() ){ // 30 for 30 secs before checkpoint
#ifdef BOINC
boinc_begin_critical_section(); // Boinc should not interrupt this
#endif
// Checkpointing section below
boinc_delete_file("kaktpoint.txt"); // Don't touch, same func as normal fdel
FILE *checkpoint_data = boinc_fopen("kaktpoint.txt", "wb");
struct checkpoint_vars data_store;
data_store.offset = offset;
data_store.elapsed_chkpoint = elapsed_chkpoint + elapsed;
fwrite(&data_store, sizeof(data_store), 1, checkpoint_data);
fclose(checkpoint_data);
checkpoint_now=0;
#ifdef BOINC
boinc_end_critical_section();
boinc_checkpoint_completed(); // Checkpointing completed
#endif
}
}
#ifdef BOINC
boinc_begin_critical_section();
#endif
for (std::thread& thread : threads)
thread.join();
time_t elapsed = time(NULL) - start_time;
unsigned long long count = offset - BEGIN;
double done = (double) count / 1000000.0;
double speed = done / (double) elapsed;
fprintf(stderr, "\nSpeed: %.2lfm/s\n", speed );
fprintf(stderr, "Done\n");
fprintf(stderr, "Processed: %llu seeds in %.2lfs seconds\n", END - BEGINOrig, (double) elapsed_chkpoint + (double) elapsed );
#ifdef BOINC
boinc_end_critical_section();
#endif
boinc_finish(0);
} | 4ecd8a1025b032ad1e4d1be699f62f7d4aefa260.cu | #define GRID_SIZE (1LL << 24)
#define BLOCK_SIZE 512
#define CHUNK_SIZE (GRID_SIZE / BLOCK_SIZE)
#define RNG_MUL 25214903917ULL
#define RNG_ADD 11ULL
#define RNG_MASK ((1ULL << 48) - 1)
#ifndef CACTUS_HEIGHT
#define CACTUS_HEIGHT 7
#endif
#include <chrono>
#include <cstdint>
#include <mutex>
#include <thread>
#include <cuda.h>
#ifdef BOINC
#include "boinc_api.h"
#if defined _WIN32 || defined _WIN64
#include "boinc_win.h"
#endif
#endif
__device__ unsigned long long block_add_gpu[BLOCK_SIZE + 1];
__device__ unsigned long long block_mul_gpu[BLOCK_SIZE + 1];
__device__ unsigned long long chunk_add_gpu[CHUNK_SIZE + 1];
__device__ unsigned long long chunk_mul_gpu[CHUNK_SIZE + 1];
__device__ int32_t FLOOR_LEVEL;
__device__ inline int32_t next(uint32_t *random, uint32_t *index, int bits)
{
return (random[(*index)++] >> (32 - bits));
}
__device__ inline int32_t next_int(uint32_t *random, uint32_t *index, int32_t bound)
{
int32_t bits, value;
do {
bits = next(random, index, 31);
value = bits % bound;
} while (bits - value + (bound - 1) < 0);
return value;
}
__device__ inline int32_t next_int_unknown(uint32_t *random, uint32_t *index, int32_t bound)
{
if ((bound & -bound) == bound) {
return (int32_t) ((bound * (unsigned long long) next(random, index, 31)) >> 31);
} else {
return next_int(random, index, bound);
}
}
__device__ inline uint8_t extract(const uint32_t *heightmap, uint16_t pos)
{
return ((heightmap[pos >> 3] >> ((pos & 7) << 2)) & 15) + FLOOR_LEVEL;
}
__device__ inline void increase(uint32_t *heightmap, uint16_t pos, uint8_t addend)
{
heightmap[pos >> 3] += addend << ((pos & 7) << 2);
}
__global__ void crack(unsigned long long seed, unsigned long long *out, unsigned long long *out_n)
{
__shared__ uint32_t random[BLOCK_SIZE + 1024];
__shared__ uint32_t skip_index[BLOCK_SIZE + 1024 - 100];
__shared__ uint32_t skip_first[BLOCK_SIZE + 1024 - 102];
__shared__ uint32_t skip_always[BLOCK_SIZE + 1024 - 102];
__shared__ uint32_t floor_skip[BLOCK_SIZE + 1024 - 102];
__shared__ uint8_t floor_terrain[BLOCK_SIZE + 1024 - 102];
__shared__ uint32_t offset_skip[BLOCK_SIZE + 1024 - 4];
__shared__ uint8_t offset_height[BLOCK_SIZE + 1024 - 4];
uint32_t heightmap[128];
uint32_t random_index;
seed = (seed * chunk_mul_gpu[blockIdx.x] + chunk_add_gpu[blockIdx.x]) & RNG_MASK;
seed = (seed * block_mul_gpu[threadIdx.x] + block_add_gpu[threadIdx.x]) & RNG_MASK;
unsigned long long seed2 = seed;
seed = ((seed - 11ULL) * 246154705703781ULL) & RNG_MASK;
random[threadIdx.x + BLOCK_SIZE * 0] = (uint32_t) (seed2 >> 16);
for (int i = threadIdx.x + BLOCK_SIZE; i < BLOCK_SIZE + 1024; i += BLOCK_SIZE) {
seed2 = (seed2 * block_mul_gpu[BLOCK_SIZE] + block_add_gpu[BLOCK_SIZE]) & RNG_MASK;
random[i] = (uint32_t) (seed2 >> 16);
}
for (int i = 0; i < 128; i++) {
heightmap[i] = 0;
}
__syncthreads();
for (int i = threadIdx.x; i < BLOCK_SIZE + 1024 - 4; i += BLOCK_SIZE) {
random_index = i;
uint8_t offset = next_int_unknown(random, &random_index, next_int(random, &random_index, 3) + 1) + 1;
offset_height[i] = offset;
offset_skip[i] = random_index;
}
__syncthreads();
for (int i = threadIdx.x; i < BLOCK_SIZE + 1024 - 100; i += BLOCK_SIZE) {
random_index = i;
for (int j = 0; j < 10; j++) {
random_index += 6;
random_index = offset_skip[random_index];
}
skip_index[i] = random_index;
}
__syncthreads();
for (int i = threadIdx.x; i < BLOCK_SIZE + 1024 - 102; i += BLOCK_SIZE) {
random_index = i + 2;
int16_t terrain = next_int_unknown(random, &random_index, (FLOOR_LEVEL + 1) * 2);
floor_terrain[i] = terrain;
floor_skip[i] = random_index;
if (terrain - 3 > FLOOR_LEVEL + CACTUS_HEIGHT + 1) {
skip_first[i] = skip_index[random_index];
skip_always[i] = skip_index[random_index];
} else if (terrain - 3 > FLOOR_LEVEL + 1) {
skip_first[i] = skip_index[random_index];
skip_always[i] = 0;
} else if (terrain + 3 <= FLOOR_LEVEL && terrain - 3 >= 0) {
skip_first[i] = random_index + 60;
skip_always[i] = random_index + 60;
} else {
skip_first[i] = 0;
skip_always[i] = 0;
}
}
__syncthreads();
random_index = threadIdx.x;
uint16_t best = 0;
bool changed = false;
int i = 0;
for (; i < 10 && skip_first[random_index]; i++) {
random_index = skip_first[random_index];
}
for (; i < 10; i++) {
if (!changed && skip_first[random_index]) {
random_index = skip_first[random_index];
continue;
}
uint16_t bx = next(random, &random_index, 4) + 8;
uint16_t bz = next(random, &random_index, 4) + 8;
uint16_t initial = bx * 32 + bz;
int16_t terrain;
if (extract(heightmap, initial) == FLOOR_LEVEL) {
if (skip_always[random_index - 2]) {
random_index = skip_always[random_index - 2];
continue;
}
terrain = floor_terrain[random_index - 2];
random_index = floor_skip[random_index - 2];
} else {
terrain = next_int_unknown(random, &random_index, (extract(heightmap, initial) + 1) * 2);
if (terrain + 3 <= FLOOR_LEVEL && terrain - 3 >= 0) {
random_index += 60;
continue;
}
}
if (terrain - 3 > extract(heightmap, best) + 1) {
random_index = skip_index[random_index];
continue;
}
for (int j = 0; j < 10; j++) {
int16_t bx = next(random, &random_index, 3) - next(random, &random_index, 3);
int16_t by = next(random, &random_index, 2) - next(random, &random_index, 2);
int16_t bz = next(random, &random_index, 3) - next(random, &random_index, 3);
uint16_t xz = initial + bx * 32 + bz;
int16_t y = (int16_t) terrain + by;
if (y <= extract(heightmap, xz) && y >= 0) continue;
uint8_t offset = offset_height[random_index];
random_index = offset_skip[random_index];
if (y != extract(heightmap, xz) + 1) continue;
if (y == FLOOR_LEVEL + 1) {
uint8_t mask = 0;
if (bz != 0x00) mask |= extract(heightmap, xz - 1) - FLOOR_LEVEL;
if (bz != 0x1F) mask |= extract(heightmap, xz + 1) - FLOOR_LEVEL;
if (bx != 0x00) mask |= extract(heightmap, xz - 32) - FLOOR_LEVEL;
if (bx != 0x1F) mask |= extract(heightmap, xz + 32) - FLOOR_LEVEL;
if (mask) continue;
}
increase(heightmap, xz, offset);
changed = true;
if (extract(heightmap, xz) > extract(heightmap, best)) best = xz;
}
}
if (extract(heightmap, best) - FLOOR_LEVEL >= CACTUS_HEIGHT) {
out[atomicAdd((unsigned long long*) out_n, 1ULL)] = seed;
}
}
unsigned long long block_add[BLOCK_SIZE + 1];
unsigned long long block_mul[BLOCK_SIZE + 1];
unsigned long long chunk_add[CHUNK_SIZE + 1];
unsigned long long chunk_mul[CHUNK_SIZE + 1];
unsigned long long offset = 0;
unsigned long long seed = 0;
unsigned long long total_seeds = 0;
time_t elapsed_chkpoint = 0;
std::mutex mutexcuda;
std::thread threads[1];
unsigned long long BEGIN;
unsigned long long BEGINOrig;
unsigned long long END;
int checkpoint_now;
struct checkpoint_vars {
unsigned long long offset;
time_t elapsed_chkpoint;
};
int32_t floor_level_host;
void run(int gpu_device)
{
FILE* kaktseeds = fopen("kaktseeds.txt", "w+");
unsigned long long *out;
unsigned long long *out_n;
cudaSetDevice(gpu_device);
cudaMallocManaged(&out, GRID_SIZE * sizeof(*out));
cudaMallocManaged(&out_n, sizeof(*out_n));
cudaMemcpyToSymbol(block_add_gpu, block_add, (BLOCK_SIZE + 1) * sizeof(*block_add));
cudaMemcpyToSymbol(block_mul_gpu, block_mul, (BLOCK_SIZE + 1) * sizeof(*block_mul));
cudaMemcpyToSymbol(chunk_add_gpu, chunk_add, (CHUNK_SIZE + 1) * sizeof(*chunk_add));
cudaMemcpyToSymbol(chunk_mul_gpu, chunk_mul, (CHUNK_SIZE + 1) * sizeof(*chunk_mul));
cudaMemcpyToSymbol(FLOOR_LEVEL, &floor_level_host, sizeof(int32_t));
while (true) {
*out_n = 0;
{
if (offset >= END) break;
unsigned long long seed_gpu = (seed * RNG_MUL + RNG_ADD) & RNG_MASK;
crack<<<CHUNK_SIZE, BLOCK_SIZE>>>(seed_gpu, out, out_n);
offset += GRID_SIZE;
seed = (seed * chunk_mul[CHUNK_SIZE] + chunk_add[CHUNK_SIZE]) & RNG_MASK;
}
cudaDeviceSynchronize();
{
total_seeds += *out_n;
for (unsigned long long i = 0; i < *out_n; i++){
fprintf(kaktseeds,"s: %llu,\n", out[i]);
}
fflush(kaktseeds);
}
}
fclose(kaktseeds);
cudaFree(out_n);
cudaFree(out);
}
int main(int argc, char *argv[])
{
#ifdef BOINC
BOINC_OPTIONS options;
boinc_options_defaults(options);
options.normal_thread_priority = true;
boinc_init_options(&options);
#endif
block_add[0] = 0;
block_mul[0] = 1;
for (unsigned long long i = 0; i < BLOCK_SIZE; i++) {
block_add[i + 1] = (block_add[i] * RNG_MUL + RNG_ADD) & RNG_MASK;
block_mul[i + 1] = (block_mul[i] * RNG_MUL) & RNG_MASK;
}
chunk_add[0] = 0;
chunk_mul[0] = 1;
for (unsigned long long i = 0; i < CHUNK_SIZE; i++) {
chunk_add[i + 1] = (chunk_add[i] * block_mul[BLOCK_SIZE] + block_add[BLOCK_SIZE]) & RNG_MASK;
chunk_mul[i + 1] = (chunk_mul[i] * block_mul[BLOCK_SIZE]) & RNG_MASK;
}
int gpu_device = 0;
for (int i = 1; i < argc; i += 2) {
const char *param = argv[i];
if (strcmp(param, "-d") == 0 || strcmp(param, "--device") == 0) {
gpu_device = atoi(argv[i + 1]);
} else if (strcmp(param, "-s") == 0 || strcmp(param, "--start") == 0) {
sscanf(argv[i + 1], "%llu", &BEGIN);
} else if (strcmp(param, "-e") == 0 || strcmp(param, "--end") == 0) {
sscanf(argv[i + 1], "%llu", &END);
} else if (strcmp(param, "-h") == 0 || strcmp(param, "--height") == 0){
sscanf(argv[i + 1], "%llu", &floor_level_host);
} else {
fprintf(stderr,"Unknown parameter: %s\n", param);
}
}
BEGINOrig = BEGIN;
FILE *checkpoint_data = boinc_fopen("kaktpoint.txt", "rb");
if (!checkpoint_data) {
fprintf(stderr,"No checkpoint to load\n");
} else {
#ifdef BOINC
boinc_begin_critical_section();
#endif
struct checkpoint_vars data_store;
fread(&data_store, sizeof(data_store), 1, checkpoint_data);
BEGIN = data_store.offset;
elapsed_chkpoint = data_store.elapsed_chkpoint;
fprintf(stderr,"Checkpoint loaded, task time %d s, seed pos: %llu\n", elapsed_chkpoint, BEGIN);
fclose(checkpoint_data);
#ifdef BOINC
boinc_end_critical_section();
#endif
}
for (; offset + GRID_SIZE <= BEGIN; offset += GRID_SIZE)
seed = (seed * chunk_mul[CHUNK_SIZE] + chunk_add[CHUNK_SIZE]) & RNG_MASK;
for (; offset + 1 <= BEGIN; offset += 1)
seed = (seed * RNG_MUL + RNG_ADD) & RNG_MASK;
#ifdef BOINC
APP_INIT_DATA aid;
boinc_get_init_data(aid);
if (aid.gpu_device_num >= 0) {
gpu_device = aid.gpu_device_num;
fprintf(stderr,"boinc gpu %i gpuindex: %i \n", aid.gpu_device_num, gpu_device);
} else {
fprintf(stderr,"stndalone gpuindex %i \n", gpu_device);
}
#endif
threads[0] = std::thread(run, gpu_device);
checkpoint_now = 0;
time_t start_time = time(NULL);
while (offset < END) {
using namespace std::chrono_literals;
std::this_thread::sleep_for(1s);
time_t elapsed = time(NULL) - start_time;
unsigned long long count = offset - BEGIN;
double frac = (double) count / (double) (END - BEGIN);
#ifdef BOINC
boinc_fraction_done(frac);
#endif
checkpoint_now++;
if (checkpoint_now >= 30 || boinc_time_to_checkpoint() ){ // 30 for 30 secs before checkpoint
#ifdef BOINC
boinc_begin_critical_section(); // Boinc should not interrupt this
#endif
// Checkpointing section below
boinc_delete_file("kaktpoint.txt"); // Don't touch, same func as normal fdel
FILE *checkpoint_data = boinc_fopen("kaktpoint.txt", "wb");
struct checkpoint_vars data_store;
data_store.offset = offset;
data_store.elapsed_chkpoint = elapsed_chkpoint + elapsed;
fwrite(&data_store, sizeof(data_store), 1, checkpoint_data);
fclose(checkpoint_data);
checkpoint_now=0;
#ifdef BOINC
boinc_end_critical_section();
boinc_checkpoint_completed(); // Checkpointing completed
#endif
}
}
#ifdef BOINC
boinc_begin_critical_section();
#endif
for (std::thread& thread : threads)
thread.join();
time_t elapsed = time(NULL) - start_time;
unsigned long long count = offset - BEGIN;
double done = (double) count / 1000000.0;
double speed = done / (double) elapsed;
fprintf(stderr, "\nSpeed: %.2lfm/s\n", speed );
fprintf(stderr, "Done\n");
fprintf(stderr, "Processed: %llu seeds in %.2lfs seconds\n", END - BEGINOrig, (double) elapsed_chkpoint + (double) elapsed );
#ifdef BOINC
boinc_end_critical_section();
#endif
boinc_finish(0);
} |
70fa839085ecea56c4cde0b86d105494743b5b79.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <defs.h>
#include <symbol.h>
#include <gpudefs.h>
#include <dvector.h>
#include <gpuconsts.cuh>
__global__ __launch_bounds__( WS * SOW, MBO )
void algSAT_stage4( float *g_out, const float *g_in) {
const int tx = threadIdx.x, ty = threadIdx.y, bx = blockIdx.x, by = blockIdx.y, col = bx*WS+tx, row0 = by*WS;
__shared__ float s_block[ WS ][ WS+1 ];
float (*bdata)[WS+1] = (float (*)[WS+1]) &s_block[ty][tx];
g_in += (row0+ty)*c_width+col;
#pragma unroll
for (int i = 0; i < WS-(WS%SOW); i+=SOW) {
**bdata = *g_in;
bdata += SOW;
g_in += SOW * c_width;
}
if( ty < WS%SOW ) {
**bdata = *g_in;
}
__syncthreads();
if( ty == 0 ) {
{ // calculate y -----------------------
float (*bdata)[WS+1] = (float (*)[WS+1]) &s_block[0][tx];
float prev = 0.f;
#pragma unroll
for (int i = 0; i < WS; ++i, ++bdata)
**bdata = prev = **bdata + prev;
}
{ // calculate x -----------------------
float *bdata = s_block[tx];
float prev = 0.f;
#pragma unroll
for (int i = 0; i < WS; ++i, ++bdata)
*bdata = prev = *bdata + prev;
}
}
__syncthreads();
bdata = (float (*)[WS+1]) &s_block[ty][tx];
g_out += (row0+ty)*c_width+col;
#pragma unroll
for (int i = 0; i < WS-(WS%SOW); i+=SOW) {
*g_out = **bdata;
bdata += SOW;
g_out += SOW * c_width;
}
if( ty < WS%SOW ) {
*g_out = **bdata;
}
}
//-- Host ---------------------------------------------------------------------
__host__
void calc_borders( int& left,
int& top,
int& right,
int& bottom,
const int& w,
const int& h,
const int& extb ) {
left = extb*WS;
top = extb*WS;
if( extb > 0 ) {
right = (extb+1)*WS-(w%WS);
bottom = (extb+1)*WS-(h%WS);
} else {
right = WS-(w%WS);
if( right == WS ) right = 0;
bottom = WS-(h%WS);
if( bottom == WS ) bottom = 0;
}
}
__host__
bool extend( const int& w,
const int& h,
const int& extb ) {
return (w%32>0 or h%32>0 or extb>0);
}
__host__
void calc_alg_setup( alg_setup& algs,
const int& w,
const int& h ) {
algs.width = w;
algs.height = h;
algs.m_size = (w+WS-1)/WS;
algs.n_size = (h+WS-1)/WS;
algs.last_m = algs.m_size-1;
algs.last_n = algs.n_size-1;
algs.border = 0;
algs.carry_width = algs.m_size*WS;
algs.carry_height = algs.n_size*WS;
algs.carry_height = h;
algs.inv_width = 1.f/(float)w;
algs.inv_height = 1.f/(float)h;
}
__host__
void calc_alg_setup( alg_setup& algs,
const int& w,
const int& h,
const int& extb ) {
int bleft, btop, bright, bbottom;
calc_borders( bleft, btop, bright, bbottom, w, h, extb );
algs.width = w;
algs.height = h;
algs.m_size = (w+bleft+bright+WS-1)/WS;
algs.n_size = (h+btop+bbottom+WS-1)/WS;
algs.last_m = (bleft+w-1)/WS;
algs.last_n = (btop+h-1)/WS;
algs.border = extb;
algs.carry_width = algs.m_size*WS;
algs.carry_height = algs.n_size*WS;
algs.inv_width = 1.f/(float)w;
algs.inv_height = 1.f/(float)h;
}
__host__
void prepare_algSAT( alg_setup& algs,
dvector<float>& d_inout,
const float *h_in,
const int& w,
const int& h )
{
algs.width = w;
algs.height = h;
if( w % 32 > 0 ) algs.width += (32 - (w % 32));
if( h % 32 > 0 ) algs.height += (32 - (h % 32));
calc_alg_setup( algs, algs.width, algs.height );
up_alg_setup( algs );
d_inout.copy_from( h_in, w, h, algs.width, algs.height );
}
__host__
void algSAT( dvector<float>& d_out,
const dvector<float>& d_in,
const alg_setup& algs ) {
const int nWm = (algs.width+MTS-1)/MTS, nHm = (algs.height+MTS-1)/MTS;
const dim3 cg_img( algs.m_size, algs.n_size );
const dim3 cg_ybar( nWm, 1 );
const dim3 cg_vhat( 1, nHm );
hipLaunchKernelGGL(( algSAT_stage4), dim3(cg_img), dim3(dim3(WS, SOW)) , 0, 0, d_out, d_in);
}
| 70fa839085ecea56c4cde0b86d105494743b5b79.cu | #include <defs.h>
#include <symbol.h>
#include <gpudefs.h>
#include <dvector.h>
#include <gpuconsts.cuh>
__global__ __launch_bounds__( WS * SOW, MBO )
void algSAT_stage4( float *g_out, const float *g_in) {
const int tx = threadIdx.x, ty = threadIdx.y, bx = blockIdx.x, by = blockIdx.y, col = bx*WS+tx, row0 = by*WS;
__shared__ float s_block[ WS ][ WS+1 ];
float (*bdata)[WS+1] = (float (*)[WS+1]) &s_block[ty][tx];
g_in += (row0+ty)*c_width+col;
#pragma unroll
for (int i = 0; i < WS-(WS%SOW); i+=SOW) {
**bdata = *g_in;
bdata += SOW;
g_in += SOW * c_width;
}
if( ty < WS%SOW ) {
**bdata = *g_in;
}
__syncthreads();
if( ty == 0 ) {
{ // calculate y -----------------------
float (*bdata)[WS+1] = (float (*)[WS+1]) &s_block[0][tx];
float prev = 0.f;
#pragma unroll
for (int i = 0; i < WS; ++i, ++bdata)
**bdata = prev = **bdata + prev;
}
{ // calculate x -----------------------
float *bdata = s_block[tx];
float prev = 0.f;
#pragma unroll
for (int i = 0; i < WS; ++i, ++bdata)
*bdata = prev = *bdata + prev;
}
}
__syncthreads();
bdata = (float (*)[WS+1]) &s_block[ty][tx];
g_out += (row0+ty)*c_width+col;
#pragma unroll
for (int i = 0; i < WS-(WS%SOW); i+=SOW) {
*g_out = **bdata;
bdata += SOW;
g_out += SOW * c_width;
}
if( ty < WS%SOW ) {
*g_out = **bdata;
}
}
//-- Host ---------------------------------------------------------------------
__host__
void calc_borders( int& left,
int& top,
int& right,
int& bottom,
const int& w,
const int& h,
const int& extb ) {
left = extb*WS;
top = extb*WS;
if( extb > 0 ) {
right = (extb+1)*WS-(w%WS);
bottom = (extb+1)*WS-(h%WS);
} else {
right = WS-(w%WS);
if( right == WS ) right = 0;
bottom = WS-(h%WS);
if( bottom == WS ) bottom = 0;
}
}
__host__
bool extend( const int& w,
const int& h,
const int& extb ) {
return (w%32>0 or h%32>0 or extb>0);
}
__host__
void calc_alg_setup( alg_setup& algs,
const int& w,
const int& h ) {
algs.width = w;
algs.height = h;
algs.m_size = (w+WS-1)/WS;
algs.n_size = (h+WS-1)/WS;
algs.last_m = algs.m_size-1;
algs.last_n = algs.n_size-1;
algs.border = 0;
algs.carry_width = algs.m_size*WS;
algs.carry_height = algs.n_size*WS;
algs.carry_height = h;
algs.inv_width = 1.f/(float)w;
algs.inv_height = 1.f/(float)h;
}
__host__
void calc_alg_setup( alg_setup& algs,
const int& w,
const int& h,
const int& extb ) {
int bleft, btop, bright, bbottom;
calc_borders( bleft, btop, bright, bbottom, w, h, extb );
algs.width = w;
algs.height = h;
algs.m_size = (w+bleft+bright+WS-1)/WS;
algs.n_size = (h+btop+bbottom+WS-1)/WS;
algs.last_m = (bleft+w-1)/WS;
algs.last_n = (btop+h-1)/WS;
algs.border = extb;
algs.carry_width = algs.m_size*WS;
algs.carry_height = algs.n_size*WS;
algs.inv_width = 1.f/(float)w;
algs.inv_height = 1.f/(float)h;
}
__host__
void prepare_algSAT( alg_setup& algs,
dvector<float>& d_inout,
const float *h_in,
const int& w,
const int& h )
{
algs.width = w;
algs.height = h;
if( w % 32 > 0 ) algs.width += (32 - (w % 32));
if( h % 32 > 0 ) algs.height += (32 - (h % 32));
calc_alg_setup( algs, algs.width, algs.height );
up_alg_setup( algs );
d_inout.copy_from( h_in, w, h, algs.width, algs.height );
}
__host__
void algSAT( dvector<float>& d_out,
const dvector<float>& d_in,
const alg_setup& algs ) {
const int nWm = (algs.width+MTS-1)/MTS, nHm = (algs.height+MTS-1)/MTS;
const dim3 cg_img( algs.m_size, algs.n_size );
const dim3 cg_ybar( nWm, 1 );
const dim3 cg_vhat( 1, nHm );
algSAT_stage4<<< cg_img, dim3(WS, SOW) >>>( d_out, d_in);
}
|
0b66bc283027566313feec21d84ae3ceafab2b7e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <gtest/gtest.h>
#include <Eigen/Dense>
#include <cassert>
#include "utils/cuda/errors.cuh"
#include "utils/tsdf/voxel_hash.cuh"
#define MAX_BLOCKS 128
class VoxelHashTest : public ::testing::Test {
protected:
VoxelHashTest() {
CUDA_SAFE_CALL(hipMallocManaged(&voxel, sizeof(VoxelRGBW) * MAX_BLOCKS * BLOCK_VOLUME));
CUDA_SAFE_CALL(hipMallocManaged(&voxel_block, sizeof(VoxelBlock) * MAX_BLOCKS));
CUDA_SAFE_CALL(
hipMallocManaged(&point, sizeof(Eigen::Matrix<short, 3, 1>) * MAX_BLOCKS * BLOCK_VOLUME));
CUDA_SAFE_CALL(hipMallocManaged(&block_pos, sizeof(Eigen::Matrix<short, 3, 1>) * MAX_BLOCKS));
}
~VoxelHashTest() {
voxel_hash_table.ReleaseMemory();
CUDA_SAFE_CALL(hipFree(voxel));
CUDA_SAFE_CALL(hipFree(voxel_block));
CUDA_SAFE_CALL(hipFree(point));
CUDA_SAFE_CALL(hipFree(block_pos));
}
VoxelHashTable voxel_hash_table;
VoxelRGBW* voxel;
VoxelBlock* voxel_block;
Eigen::Matrix<short, 3, 1>* point;
Eigen::Matrix<short, 3, 1>* block_pos;
};
__global__ void Allocate(VoxelHashTable hash_table, Eigen::Matrix<short, 3, 1>* block_pos) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
hash_table.Allocate(block_pos[idx]);
}
__global__ void Retrieve(VoxelHashTable hash_table, const Eigen::Matrix<short, 3, 1>* point,
VoxelRGBW* voxel, VoxelBlock* voxel_block) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
voxel[idx] = hash_table.Retrieve<VoxelRGBW>(point[idx], voxel_block[idx]);
}
__global__ void Assignment(VoxelHashTable hash_table, const Eigen::Matrix<short, 3, 1>* point,
VoxelRGBW* voxel) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
VoxelBlock block;
VoxelRGBW* voxel_old = hash_table.RetrieveMutable<VoxelRGBW>(point[idx], block);
assert(voxel_old != NULL);
*voxel_old = voxel[idx];
}
TEST_F(VoxelHashTest, Single) {
// allocate block (1, 1, 1)
*block_pos = Eigen::Matrix<short, 3, 1>::Constant(1);
*point = Eigen::Matrix<short, 3, 1>::Constant(8);
hipLaunchKernelGGL(( Allocate), dim3(1), dim3(1), 0, 0, voxel_hash_table, block_pos);
CUDA_CHECK_ERROR;
hipLaunchKernelGGL(( Retrieve), dim3(1), dim3(1), 0, 0, voxel_hash_table, point, voxel, voxel_block);
CUDA_SAFE_CALL(hipDeviceSynchronize());
EXPECT_EQ(voxel_hash_table.NumActiveBlock(), 1);
EXPECT_EQ(voxel_block->position, *block_pos);
// retrieve empty block
*point = Eigen::Matrix<short, 3, 1>::Constant(0);
hipLaunchKernelGGL(( Retrieve), dim3(1), dim3(1), 0, 0, voxel_hash_table, point, voxel, voxel_block);
CUDA_SAFE_CALL(hipDeviceSynchronize());
EXPECT_EQ(voxel->weight, 0);
// assignment
*block_pos = Eigen::Matrix<short, 3, 1>::Constant(0);
hipLaunchKernelGGL(( Allocate), dim3(1), dim3(1), 0, 0, voxel_hash_table, block_pos);
CUDA_CHECK_ERROR;
voxel_block->offset = 0; // reset cache after re allocation
for (unsigned char i = 0; i < BLOCK_LEN; ++i) {
*point = {0, 0, i};
*voxel = {{i, i, i}, i};
hipLaunchKernelGGL(( Assignment), dim3(1), dim3(1), 0, 0, voxel_hash_table, point, voxel);
CUDA_SAFE_CALL(hipDeviceSynchronize());
}
EXPECT_EQ(voxel_hash_table.NumActiveBlock(), 2);
for (unsigned char i = 0; i < BLOCK_LEN; ++i) {
*point = {0, 0, i};
hipLaunchKernelGGL(( Retrieve), dim3(1), dim3(1), 0, 0, voxel_hash_table, point, voxel, voxel_block);
CUDA_SAFE_CALL(hipDeviceSynchronize());
EXPECT_EQ(voxel->rgb[0], i);
EXPECT_EQ(voxel->rgb[1], i);
EXPECT_EQ(voxel->rgb[2], i);
EXPECT_EQ(voxel->weight, i);
}
}
TEST_F(VoxelHashTest, Multiple) {
for (unsigned char i = 0; i < MAX_BLOCKS; ++i) {
block_pos[i] = {i, i, i};
}
hipLaunchKernelGGL(( Allocate), dim3(1), dim3(MAX_BLOCKS), 0, 0, voxel_hash_table, block_pos);
voxel_hash_table.ResetLocks();
CUDA_SAFE_CALL(hipDeviceSynchronize());
// check received (assume no collision)
EXPECT_EQ(voxel_hash_table.NumActiveBlock(), MAX_BLOCKS);
// assign some voxels
for (unsigned char i = 0; i < MAX_BLOCKS; ++i) {
point[i] = Eigen::Matrix<short, 3, 1>::Constant(i * BLOCK_LEN);
voxel[i] = {{i, i, i}, i};
}
hipLaunchKernelGGL(( Assignment), dim3(1), dim3(MAX_BLOCKS), 0, 0, voxel_hash_table, point, voxel);
CUDA_SAFE_CALL(hipDeviceSynchronize());
// reset buffer
for (unsigned char i = 0; i < MAX_BLOCKS; ++i) {
voxel[i] = {{0, 0, 0}, 0};
block_pos[i] = {0, 0, 0};
}
// retrieve and verify
hipLaunchKernelGGL(( Retrieve), dim3(1), dim3(MAX_BLOCKS), 0, 0, voxel_hash_table, point, voxel, voxel_block);
CUDA_SAFE_CALL(hipDeviceSynchronize());
for (unsigned char i = 0; i < MAX_BLOCKS; ++i) {
EXPECT_EQ(voxel[i].rgb[0], i);
EXPECT_EQ(voxel[i].rgb[1], i);
EXPECT_EQ(voxel[i].rgb[2], i);
EXPECT_EQ(voxel[i].weight, i);
const Eigen::Matrix<short, 3, 1> pos_gt(i, i, i);
EXPECT_EQ(voxel_block[i].position, pos_gt);
}
}
TEST_F(VoxelHashTest, Collision) {
// all hash to the last index NUM_BUCKET - 1
block_pos[0] = {33, 180, 42};
block_pos[1] = {61, 16, 170};
block_pos[2] = {63, 171, 45};
ASSERT_EQ(Hash(block_pos[0]), NUM_BUCKET - 1);
ASSERT_EQ(Hash(block_pos[0]), Hash(block_pos[1]));
ASSERT_EQ(Hash(block_pos[1]), Hash(block_pos[2]));
// hash to another idx
block_pos[3] = {0, 0, 0};
// allocate with conflict
hipLaunchKernelGGL(( Allocate), dim3(1), dim3(4), 0, 0, voxel_hash_table, block_pos);
CUDA_CHECK_ERROR;
voxel_hash_table.ResetLocks();
CUDA_SAFE_CALL(hipDeviceSynchronize());
EXPECT_EQ(voxel_hash_table.NumActiveBlock(), 2);
// allocate again
hipLaunchKernelGGL(( Allocate), dim3(1), dim3(4), 0, 0, voxel_hash_table, block_pos);
CUDA_CHECK_ERROR;
voxel_hash_table.ResetLocks();
CUDA_SAFE_CALL(hipDeviceSynchronize());
EXPECT_EQ(voxel_hash_table.NumActiveBlock(), 3);
// allocate yet again
hipLaunchKernelGGL(( Allocate), dim3(1), dim3(4), 0, 0, voxel_hash_table, block_pos);
CUDA_CHECK_ERROR;
voxel_hash_table.ResetLocks();
CUDA_SAFE_CALL(hipDeviceSynchronize());
EXPECT_EQ(voxel_hash_table.NumActiveBlock(), 4);
// do some assignment
for (unsigned char i = 0; i < 4; ++i) {
point[i] = block_pos[i] * BLOCK_LEN; // use the first point of a block
voxel[i] = {{i, i, i}, i};
}
hipLaunchKernelGGL(( Assignment), dim3(1), dim3(4), 0, 0, voxel_hash_table, point, voxel);
CUDA_CHECK_ERROR;
CUDA_SAFE_CALL(hipDeviceSynchronize());
// reset buffer
for (unsigned char i = 0; i < 4; ++i) {
voxel[i] = {{0, 0, 0}, 0};
block_pos[i] = {0, 0, 0};
voxel_block[i].idx = -1;
}
// retrieve and verify
hipLaunchKernelGGL(( Retrieve), dim3(1), dim3(4), 0, 0, voxel_hash_table, point, voxel, voxel_block);
CUDA_CHECK_ERROR;
CUDA_SAFE_CALL(hipDeviceSynchronize());
for (unsigned char i = 0; i < 4; ++i) {
EXPECT_EQ(voxel[i].rgb[0], i);
EXPECT_EQ(voxel[i].rgb[1], i);
EXPECT_EQ(voxel[i].rgb[2], i);
EXPECT_EQ(voxel[i].weight, i);
}
}
| 0b66bc283027566313feec21d84ae3ceafab2b7e.cu | #include <gtest/gtest.h>
#include <Eigen/Dense>
#include <cassert>
#include "utils/cuda/errors.cuh"
#include "utils/tsdf/voxel_hash.cuh"
#define MAX_BLOCKS 128
class VoxelHashTest : public ::testing::Test {
protected:
VoxelHashTest() {
CUDA_SAFE_CALL(cudaMallocManaged(&voxel, sizeof(VoxelRGBW) * MAX_BLOCKS * BLOCK_VOLUME));
CUDA_SAFE_CALL(cudaMallocManaged(&voxel_block, sizeof(VoxelBlock) * MAX_BLOCKS));
CUDA_SAFE_CALL(
cudaMallocManaged(&point, sizeof(Eigen::Matrix<short, 3, 1>) * MAX_BLOCKS * BLOCK_VOLUME));
CUDA_SAFE_CALL(cudaMallocManaged(&block_pos, sizeof(Eigen::Matrix<short, 3, 1>) * MAX_BLOCKS));
}
~VoxelHashTest() {
voxel_hash_table.ReleaseMemory();
CUDA_SAFE_CALL(cudaFree(voxel));
CUDA_SAFE_CALL(cudaFree(voxel_block));
CUDA_SAFE_CALL(cudaFree(point));
CUDA_SAFE_CALL(cudaFree(block_pos));
}
VoxelHashTable voxel_hash_table;
VoxelRGBW* voxel;
VoxelBlock* voxel_block;
Eigen::Matrix<short, 3, 1>* point;
Eigen::Matrix<short, 3, 1>* block_pos;
};
__global__ void Allocate(VoxelHashTable hash_table, Eigen::Matrix<short, 3, 1>* block_pos) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
hash_table.Allocate(block_pos[idx]);
}
__global__ void Retrieve(VoxelHashTable hash_table, const Eigen::Matrix<short, 3, 1>* point,
VoxelRGBW* voxel, VoxelBlock* voxel_block) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
voxel[idx] = hash_table.Retrieve<VoxelRGBW>(point[idx], voxel_block[idx]);
}
__global__ void Assignment(VoxelHashTable hash_table, const Eigen::Matrix<short, 3, 1>* point,
VoxelRGBW* voxel) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
VoxelBlock block;
VoxelRGBW* voxel_old = hash_table.RetrieveMutable<VoxelRGBW>(point[idx], block);
assert(voxel_old != NULL);
*voxel_old = voxel[idx];
}
TEST_F(VoxelHashTest, Single) {
// allocate block (1, 1, 1)
*block_pos = Eigen::Matrix<short, 3, 1>::Constant(1);
*point = Eigen::Matrix<short, 3, 1>::Constant(8);
Allocate<<<1, 1>>>(voxel_hash_table, block_pos);
CUDA_CHECK_ERROR;
Retrieve<<<1, 1>>>(voxel_hash_table, point, voxel, voxel_block);
CUDA_SAFE_CALL(cudaDeviceSynchronize());
EXPECT_EQ(voxel_hash_table.NumActiveBlock(), 1);
EXPECT_EQ(voxel_block->position, *block_pos);
// retrieve empty block
*point = Eigen::Matrix<short, 3, 1>::Constant(0);
Retrieve<<<1, 1>>>(voxel_hash_table, point, voxel, voxel_block);
CUDA_SAFE_CALL(cudaDeviceSynchronize());
EXPECT_EQ(voxel->weight, 0);
// assignment
*block_pos = Eigen::Matrix<short, 3, 1>::Constant(0);
Allocate<<<1, 1>>>(voxel_hash_table, block_pos);
CUDA_CHECK_ERROR;
voxel_block->offset = 0; // reset cache after re allocation
for (unsigned char i = 0; i < BLOCK_LEN; ++i) {
*point = {0, 0, i};
*voxel = {{i, i, i}, i};
Assignment<<<1, 1>>>(voxel_hash_table, point, voxel);
CUDA_SAFE_CALL(cudaDeviceSynchronize());
}
EXPECT_EQ(voxel_hash_table.NumActiveBlock(), 2);
for (unsigned char i = 0; i < BLOCK_LEN; ++i) {
*point = {0, 0, i};
Retrieve<<<1, 1>>>(voxel_hash_table, point, voxel, voxel_block);
CUDA_SAFE_CALL(cudaDeviceSynchronize());
EXPECT_EQ(voxel->rgb[0], i);
EXPECT_EQ(voxel->rgb[1], i);
EXPECT_EQ(voxel->rgb[2], i);
EXPECT_EQ(voxel->weight, i);
}
}
TEST_F(VoxelHashTest, Multiple) {
for (unsigned char i = 0; i < MAX_BLOCKS; ++i) {
block_pos[i] = {i, i, i};
}
Allocate<<<1, MAX_BLOCKS>>>(voxel_hash_table, block_pos);
voxel_hash_table.ResetLocks();
CUDA_SAFE_CALL(cudaDeviceSynchronize());
// check received (assume no collision)
EXPECT_EQ(voxel_hash_table.NumActiveBlock(), MAX_BLOCKS);
// assign some voxels
for (unsigned char i = 0; i < MAX_BLOCKS; ++i) {
point[i] = Eigen::Matrix<short, 3, 1>::Constant(i * BLOCK_LEN);
voxel[i] = {{i, i, i}, i};
}
Assignment<<<1, MAX_BLOCKS>>>(voxel_hash_table, point, voxel);
CUDA_SAFE_CALL(cudaDeviceSynchronize());
// reset buffer
for (unsigned char i = 0; i < MAX_BLOCKS; ++i) {
voxel[i] = {{0, 0, 0}, 0};
block_pos[i] = {0, 0, 0};
}
// retrieve and verify
Retrieve<<<1, MAX_BLOCKS>>>(voxel_hash_table, point, voxel, voxel_block);
CUDA_SAFE_CALL(cudaDeviceSynchronize());
for (unsigned char i = 0; i < MAX_BLOCKS; ++i) {
EXPECT_EQ(voxel[i].rgb[0], i);
EXPECT_EQ(voxel[i].rgb[1], i);
EXPECT_EQ(voxel[i].rgb[2], i);
EXPECT_EQ(voxel[i].weight, i);
const Eigen::Matrix<short, 3, 1> pos_gt(i, i, i);
EXPECT_EQ(voxel_block[i].position, pos_gt);
}
}
TEST_F(VoxelHashTest, Collision) {
// all hash to the last index NUM_BUCKET - 1
block_pos[0] = {33, 180, 42};
block_pos[1] = {61, 16, 170};
block_pos[2] = {63, 171, 45};
ASSERT_EQ(Hash(block_pos[0]), NUM_BUCKET - 1);
ASSERT_EQ(Hash(block_pos[0]), Hash(block_pos[1]));
ASSERT_EQ(Hash(block_pos[1]), Hash(block_pos[2]));
// hash to another idx
block_pos[3] = {0, 0, 0};
// allocate with conflict
Allocate<<<1, 4>>>(voxel_hash_table, block_pos);
CUDA_CHECK_ERROR;
voxel_hash_table.ResetLocks();
CUDA_SAFE_CALL(cudaDeviceSynchronize());
EXPECT_EQ(voxel_hash_table.NumActiveBlock(), 2);
// allocate again
Allocate<<<1, 4>>>(voxel_hash_table, block_pos);
CUDA_CHECK_ERROR;
voxel_hash_table.ResetLocks();
CUDA_SAFE_CALL(cudaDeviceSynchronize());
EXPECT_EQ(voxel_hash_table.NumActiveBlock(), 3);
// allocate yet again
Allocate<<<1, 4>>>(voxel_hash_table, block_pos);
CUDA_CHECK_ERROR;
voxel_hash_table.ResetLocks();
CUDA_SAFE_CALL(cudaDeviceSynchronize());
EXPECT_EQ(voxel_hash_table.NumActiveBlock(), 4);
// do some assignment
for (unsigned char i = 0; i < 4; ++i) {
point[i] = block_pos[i] * BLOCK_LEN; // use the first point of a block
voxel[i] = {{i, i, i}, i};
}
Assignment<<<1, 4>>>(voxel_hash_table, point, voxel);
CUDA_CHECK_ERROR;
CUDA_SAFE_CALL(cudaDeviceSynchronize());
// reset buffer
for (unsigned char i = 0; i < 4; ++i) {
voxel[i] = {{0, 0, 0}, 0};
block_pos[i] = {0, 0, 0};
voxel_block[i].idx = -1;
}
// retrieve and verify
Retrieve<<<1, 4>>>(voxel_hash_table, point, voxel, voxel_block);
CUDA_CHECK_ERROR;
CUDA_SAFE_CALL(cudaDeviceSynchronize());
for (unsigned char i = 0; i < 4; ++i) {
EXPECT_EQ(voxel[i].rgb[0], i);
EXPECT_EQ(voxel[i].rgb[1], i);
EXPECT_EQ(voxel[i].rgb[2], i);
EXPECT_EQ(voxel[i].weight, i);
}
}
|
c88c4f6ff369537a109aa5b6d36e6af73220dbd2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "histogram.cuh"
#define SIZE (100*1024*1024)
#define HISTO_SIZE 256
__global__ void histogram_shared_mem(int *input, int *histo, long size)
{
__shared__ int temp[256];
temp[threadIdx.x] = 0;
__syncthreads();
int gid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int temp_id = gid;
while (temp_id < size)
{
atomicAdd(&temp[input[temp_id]], 1);
temp_id += stride;
}
__syncthreads();
atomicAdd(&(histo[threadIdx.x]), temp[threadIdx.x]);
}
__global__ void histogram_basic(int *input, int *histo, long size)
{
int gid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int temp_id = gid;
while (temp_id < size)
{
atomicAdd(&histo[input[temp_id]], 1);
temp_id += stride;
}
}
void histogram_cpu(int* input, int* h_histo_cpu, int size)
{
// capture the start time
clock_t start, stop;
start = clock();
for (int i = 0; i < 256; i++)
h_histo_cpu[i] = 0;
for (int i = 0; i < SIZE; i++)
h_histo_cpu[input[i]]++;
stop = clock();
float elapsedTime = (float)(stop - start) /(float)CLOCKS_PER_SEC * 1000.0f;
printf("Histogram CPU execution time: %3.1f ms\n", elapsedTime);
}
void histogram_gpu(int* h_input, int* h_histo_gpu, int size)
{
long byte_size = SIZE * sizeof(int);
int histo_byte_size = HISTO_SIZE * sizeof(int);
hipEvent_t start, stop;
gpuErrchk(hipEventCreate(&start));
gpuErrchk(hipEventCreate(&stop));
gpuErrchk(hipEventRecord(start, 0));
// allocate memory on the GPU for the file's data
int * d_input, *d_histo;
gpuErrchk(hipMalloc((void**)&d_input, byte_size));
gpuErrchk(hipMemcpy(d_input, h_input, byte_size, hipMemcpyHostToDevice));
gpuErrchk(hipMalloc((void**)&d_histo, histo_byte_size));
gpuErrchk(hipMemset(d_histo, 0, histo_byte_size));
// kernel launch - 2x the number of mps gave best timing
hipDeviceProp_t prop;
gpuErrchk(hipGetDeviceProperties(&prop, 0));
int blocks = prop.multiProcessorCount;
dim3 block(HISTO_SIZE);
//dim3 grid(blocks * 32);
dim3 grid(SIZE / block.x +1);
histogram_basic << <grid, block >> > (d_input, d_histo, SIZE);
gpuErrchk(hipMemcpy(h_histo_gpu, d_histo, histo_byte_size, hipMemcpyDeviceToHost));
// get stop time, and display the timing results
gpuErrchk(hipEventRecord(stop, 0));
gpuErrchk(hipEventSynchronize(stop));
float elapsedTime;
gpuErrchk(hipEventElapsedTime(&elapsedTime, start, stop));
printf("Histogram GPU execution time : %3.1f ms\n", elapsedTime);
gpuErrchk(hipEventDestroy(start));
gpuErrchk(hipEventDestroy(stop));
hipFree(d_histo);
hipFree(d_input);
gpuErrchk(hipDeviceReset());
}
void histogram_gpu_multistreams(int* h_input, int* h_histo_gpu, int size)
{
long byte_size = SIZE * sizeof(int);
int histo_byte_size = HISTO_SIZE * sizeof(int);
hipEvent_t start, stop;
gpuErrchk(hipEventCreate(&start));
gpuErrchk(hipEventCreate(&stop));
gpuErrchk(hipEventRecord(start, 0));
// allocate memory on the GPU for the file's data
int * d_input, *d_histo;
gpuErrchk(hipMalloc((void**)&d_input, byte_size));
gpuErrchk(hipMemcpy(d_input, h_input, byte_size, hipMemcpyHostToDevice));
gpuErrchk(hipMalloc((void**)&d_histo, histo_byte_size));
gpuErrchk(hipMemset(d_histo, 0, histo_byte_size));
// kernel launch - 2x the number of mps gave best timing
hipDeviceProp_t prop;
gpuErrchk(hipGetDeviceProperties(&prop, 0));
int blocks = prop.multiProcessorCount;
dim3 block(HISTO_SIZE);
//dim3 grid(blocks * 32);
dim3 grid(SIZE / block.x + 1);
histogram_basic << <grid, block >> > (d_input, d_histo, SIZE);
gpuErrchk(hipMemcpy(h_histo_gpu, d_histo, histo_byte_size, hipMemcpyDeviceToHost));
// get stop time, and display the timing results
gpuErrchk(hipEventRecord(stop, 0));
gpuErrchk(hipEventSynchronize(stop));
float elapsedTime;
gpuErrchk(hipEventElapsedTime(&elapsedTime, start, stop));
printf("Histogram GPU execution time : %3.1f ms\n", elapsedTime);
gpuErrchk(hipEventDestroy(start));
gpuErrchk(hipEventDestroy(stop));
hipFree(d_histo);
hipFree(d_input);
gpuErrchk(hipDeviceReset());
}
//int main(void)
//{
// long byte_size = SIZE * sizeof(int);
// int histo_byte_size = HISTO_SIZE * sizeof(int);
//
// int *h_input, *h_histo_cpu, *h_histo_gpu;
//
// h_input = (int*)malloc(byte_size);
// h_histo_cpu = (int*)malloc(histo_byte_size);
// h_histo_gpu = (int*)malloc(histo_byte_size);
//
// //initialize the array from 0 to 255
// initialize(h_input, SIZE, INIT_0_TO_X, 256);
//
// histogram_cpu(h_input, h_histo_cpu, SIZE);
// histogram_gpu(h_input, h_histo_gpu, SIZE);
//
// compare_arrays(h_histo_gpu, h_histo_cpu, HISTO_SIZE);
//
// free(h_histo_gpu);
// free(h_histo_cpu);
// free(h_input);
// return 0;
//}
| c88c4f6ff369537a109aa5b6d36e6af73220dbd2.cu | #include "histogram.cuh"
#define SIZE (100*1024*1024)
#define HISTO_SIZE 256
__global__ void histogram_shared_mem(int *input, int *histo, long size)
{
__shared__ int temp[256];
temp[threadIdx.x] = 0;
__syncthreads();
int gid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int temp_id = gid;
while (temp_id < size)
{
atomicAdd(&temp[input[temp_id]], 1);
temp_id += stride;
}
__syncthreads();
atomicAdd(&(histo[threadIdx.x]), temp[threadIdx.x]);
}
__global__ void histogram_basic(int *input, int *histo, long size)
{
int gid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int temp_id = gid;
while (temp_id < size)
{
atomicAdd(&histo[input[temp_id]], 1);
temp_id += stride;
}
}
void histogram_cpu(int* input, int* h_histo_cpu, int size)
{
// capture the start time
clock_t start, stop;
start = clock();
for (int i = 0; i < 256; i++)
h_histo_cpu[i] = 0;
for (int i = 0; i < SIZE; i++)
h_histo_cpu[input[i]]++;
stop = clock();
float elapsedTime = (float)(stop - start) /(float)CLOCKS_PER_SEC * 1000.0f;
printf("Histogram CPU execution time: %3.1f ms\n", elapsedTime);
}
void histogram_gpu(int* h_input, int* h_histo_gpu, int size)
{
long byte_size = SIZE * sizeof(int);
int histo_byte_size = HISTO_SIZE * sizeof(int);
cudaEvent_t start, stop;
gpuErrchk(cudaEventCreate(&start));
gpuErrchk(cudaEventCreate(&stop));
gpuErrchk(cudaEventRecord(start, 0));
// allocate memory on the GPU for the file's data
int * d_input, *d_histo;
gpuErrchk(cudaMalloc((void**)&d_input, byte_size));
gpuErrchk(cudaMemcpy(d_input, h_input, byte_size, cudaMemcpyHostToDevice));
gpuErrchk(cudaMalloc((void**)&d_histo, histo_byte_size));
gpuErrchk(cudaMemset(d_histo, 0, histo_byte_size));
// kernel launch - 2x the number of mps gave best timing
cudaDeviceProp prop;
gpuErrchk(cudaGetDeviceProperties(&prop, 0));
int blocks = prop.multiProcessorCount;
dim3 block(HISTO_SIZE);
//dim3 grid(blocks * 32);
dim3 grid(SIZE / block.x +1);
histogram_basic << <grid, block >> > (d_input, d_histo, SIZE);
gpuErrchk(cudaMemcpy(h_histo_gpu, d_histo, histo_byte_size, cudaMemcpyDeviceToHost));
// get stop time, and display the timing results
gpuErrchk(cudaEventRecord(stop, 0));
gpuErrchk(cudaEventSynchronize(stop));
float elapsedTime;
gpuErrchk(cudaEventElapsedTime(&elapsedTime, start, stop));
printf("Histogram GPU execution time : %3.1f ms\n", elapsedTime);
gpuErrchk(cudaEventDestroy(start));
gpuErrchk(cudaEventDestroy(stop));
cudaFree(d_histo);
cudaFree(d_input);
gpuErrchk(cudaDeviceReset());
}
void histogram_gpu_multistreams(int* h_input, int* h_histo_gpu, int size)
{
long byte_size = SIZE * sizeof(int);
int histo_byte_size = HISTO_SIZE * sizeof(int);
cudaEvent_t start, stop;
gpuErrchk(cudaEventCreate(&start));
gpuErrchk(cudaEventCreate(&stop));
gpuErrchk(cudaEventRecord(start, 0));
// allocate memory on the GPU for the file's data
int * d_input, *d_histo;
gpuErrchk(cudaMalloc((void**)&d_input, byte_size));
gpuErrchk(cudaMemcpy(d_input, h_input, byte_size, cudaMemcpyHostToDevice));
gpuErrchk(cudaMalloc((void**)&d_histo, histo_byte_size));
gpuErrchk(cudaMemset(d_histo, 0, histo_byte_size));
// kernel launch - 2x the number of mps gave best timing
cudaDeviceProp prop;
gpuErrchk(cudaGetDeviceProperties(&prop, 0));
int blocks = prop.multiProcessorCount;
dim3 block(HISTO_SIZE);
//dim3 grid(blocks * 32);
dim3 grid(SIZE / block.x + 1);
histogram_basic << <grid, block >> > (d_input, d_histo, SIZE);
gpuErrchk(cudaMemcpy(h_histo_gpu, d_histo, histo_byte_size, cudaMemcpyDeviceToHost));
// get stop time, and display the timing results
gpuErrchk(cudaEventRecord(stop, 0));
gpuErrchk(cudaEventSynchronize(stop));
float elapsedTime;
gpuErrchk(cudaEventElapsedTime(&elapsedTime, start, stop));
printf("Histogram GPU execution time : %3.1f ms\n", elapsedTime);
gpuErrchk(cudaEventDestroy(start));
gpuErrchk(cudaEventDestroy(stop));
cudaFree(d_histo);
cudaFree(d_input);
gpuErrchk(cudaDeviceReset());
}
//int main(void)
//{
// long byte_size = SIZE * sizeof(int);
// int histo_byte_size = HISTO_SIZE * sizeof(int);
//
// int *h_input, *h_histo_cpu, *h_histo_gpu;
//
// h_input = (int*)malloc(byte_size);
// h_histo_cpu = (int*)malloc(histo_byte_size);
// h_histo_gpu = (int*)malloc(histo_byte_size);
//
// //initialize the array from 0 to 255
// initialize(h_input, SIZE, INIT_0_TO_X, 256);
//
// histogram_cpu(h_input, h_histo_cpu, SIZE);
// histogram_gpu(h_input, h_histo_gpu, SIZE);
//
// compare_arrays(h_histo_gpu, h_histo_cpu, HISTO_SIZE);
//
// free(h_histo_gpu);
// free(h_histo_cpu);
// free(h_input);
// return 0;
//}
|
saber_gru.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "saber/funcs/impl/cuda/saber_gru.h"
#include "saber/core/tensor_op.h"
#include "hip/hip_fp16.h"
namespace anakin {
namespace saber {
////TODO:can try record vector in shared
template <typename Dtype>
__global__ void trans_map2in(Dtype* output, const Dtype* input, const int* map, int count,
int lastdim) {
CUDA_KERNEL_LE(tid, count) {
int seq = tid / lastdim;
output[tid] = input[map[seq] * lastdim + tid % lastdim];
}
}
template <typename Dtype>
__global__ void trans_map2out(Dtype* output, const Dtype* input, const int* map, int count,
int lastdim) {
CUDA_KERNEL_LE(tid, count) {
int seq = tid / lastdim;
output[map[seq]*lastdim + tid % lastdim] = input[tid];
}
}
template <>
void SaberGru<NV, AK_FLOAT, AK_FLOAT, AK_FLOAT, NCHW, NCHW, NCHW>::seq2hw(\
std::vector<DataTensor_out*> outputs, std::vector<DataTensor_in*> inputs,
GruParam<OpTensor>& param, int hidden_size,
void* real_temp_out
) {
DataTensor_in* din = inputs[0];
DataTensor_out* dout = outputs[0];
int wordsize = din->channel();
std::vector<int> offset_vec = din->get_seq_offset();
CHECK_GE(offset_vec.size(), 2) << "offset must >=2" ;
int batch_size = offset_vec.size() - 1;
int max_len = 0;
std::vector<int> length_vec;
if ((void*)(outputs[0]->data()) == real_temp_out) {
DLOG(INFO) << "not use inner space";
return;
}
const OutDataType* origin = _temp_tensor_out.data();
OutDataType* target = dout->mutable_data();
//source is sequence id in seq target is hw id in seq,map is source to target ptr offset
int seq_sum = offset_vec[batch_size];
CUDA_CHECK(hipMemcpyAsync(_temp_map_dev.mutable_data(), _temp_map_host.data(), sizeof(int)*seq_sum,
hipMemcpyHostToDevice, _ctx.get_compute_stream()));
int count=seq_sum * hidden_size;
int block_dim=count;
int grid_dim=1;
if(count>1024){
block_dim=256;
grid_dim=(count+block_dim-1)/block_dim;
}
hipLaunchKernelGGL(( trans_map2in) , dim3(grid_dim), dim3(block_dim), 0, _ctx.get_compute_stream(), target, origin, _temp_map_dev.data(),
count, hidden_size);
// trans_map2in_old <<< 4, 128, 0, _ctx.get_compute_stream()>>>(target, origin, _temp_map_dev.data(),
// count, hidden_size);
}
//TODO:gem by self, flatten by time, padding by nothing (zhangs)
template <>
const float* SaberGru<NV, AK_FLOAT, AK_FLOAT, AK_FLOAT, NCHW, NCHW, NCHW>::hw2seq(\
std::vector<DataTensor_in*> inputs, GruParam<OpTensor>& param, \
int word_size, int hidden_size, int& sequence_len) {
DataTensor_in* din = inputs[0];
std::vector<int> offset_vec = din->get_seq_offset();
CHECK_GE(offset_vec.size(), 2) << "offset must >=2" ;
int batch_size = offset_vec.size() - 1;
int seq_sum = offset_vec[offset_vec.size() - 1];
int wordsize = din->channel();
int max_len = 0;
std::vector<int> length_vec(batch_size);
for (int i = 0; i < offset_vec.size() - 1; ++i) {
int len = offset_vec[i + 1] - offset_vec[i];
max_len = max_len > len ? max_len : len;
length_vec[i] = len;
}
Shape seq_shape(1, max_len, batch_size, word_size);
_temp_tensor_in.try_expand_size(seq_shape);
Shape seq_out_shape(1, max_len, batch_size, hidden_size);
_temp_tensor_out.try_expand_size(seq_out_shape);
sequence_len = max_len;
if (batch_size == 1 || max_len == 1) {
return din->mutable_data();
}
InDataType* target = _temp_tensor_in.mutable_data();
const InDataType* origin = din->data();
_temp_map_host.try_expand_size(seq_sum);
_temp_map_dev.try_expand_size(seq_sum);
int* map = _temp_map_host.mutable_data();
if (param.is_reverse) {
for (int batchid = 0; batchid < batch_size; ++batchid) {
int batch_offset = max_len - length_vec[batchid];
for (int seqid = 0; seqid < length_vec[batchid]; ++seqid) {
int source = (offset_vec[batchid] + seqid);
int target = ((seqid + batch_offset) * batch_size + batchid);
map[source] = target;
}
}
} else {
for (int batchid = 0; batchid < batch_size; ++batchid) {
for (int seqid = 0; seqid < length_vec[batchid]; ++seqid) {
int source = (offset_vec[batchid] + seqid);
int target = (seqid * batch_size + batchid);
map[source] = target;
}
}
}
CUDA_CHECK(hipMemcpyAsync(_temp_map_dev.mutable_data(), _temp_map_host.data(), sizeof(int)*seq_sum,
hipMemcpyHostToDevice, _ctx.get_compute_stream()));
int count=seq_sum * wordsize;
int block_dim=count;
int grid_dim=1;
if(count>1024){
block_dim=256;
grid_dim=(count+block_dim-1)/block_dim;
}
hipLaunchKernelGGL(( trans_map2out) , dim3(grid_dim), dim3(block_dim), 0, _ctx.get_compute_stream(), target, origin, _temp_map_dev.data(),
count, wordsize);
// trans_map2out_old <<< 4, 128, 0, _ctx.get_compute_stream()>>>(target, origin, _temp_map_dev.data(),
// count, wordsize);
return _temp_tensor_in.data();
}
#define SIGMOID_THRESHOLD_MIN_PADDLE -40.0
#define SIGMOID_THRESHOLD_MAX_PADDLE 13.0
#define EXP_MAX_INPUT_PADDLE 40.0
template <typename T>
inline static __device__ T identity(const T a) {
return a;
}
template <typename T>
inline static __device__ T relu(const T a) {
return a > static_cast<T>(0.0) ? a : static_cast<T>(0.0);
}
template <typename T>
inline static __device__ T sigmoid_paddle(const T a) {
const T min = SIGMOID_THRESHOLD_MIN_PADDLE;
const T max = SIGMOID_THRESHOLD_MAX_PADDLE;
T tmp = (a < min) ? min : ((a > max) ? max : a);
return static_cast<T>(1.0) / (static_cast<T>(1.0) + exp(-tmp));
}
template <typename T>
inline static __device__ T tanh_paddle(const T a) {
T tmp = -2.0 * a;
tmp = (tmp > EXP_MAX_INPUT_PADDLE) ? EXP_MAX_INPUT_PADDLE : tmp;
return (2.0 / (1.0 + exp(tmp))) - 1.0;
}
static void anakin_NV_gemm(hipblasHandle_t handle, const bool TransA,
const bool TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (!TransA/* == CblasNoTrans*/) ? K : M;
int ldb = (!TransB/* == CblasNoTrans*/) ? N : K;
hipblasOperation_t cuTransA =
(!TransA/* == CblasNoTrans*/) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(!TransB/* == CblasNoTrans*/) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasSgemm(handle, cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
/**
* gridDim=batchsize
* @tparam Dtype
* @param w_x_r
* @param w_h_r
* @param br
* @param hidden_size
* @param output_r
* @param w_x_z
* @param w_h_z
* @param bz
* @param output_z
*/
template <typename Dtype>
__global__ void cal_reset_update(Dtype* w_x_r, Dtype* w_h_r, const Dtype* b_r,
const int hidden_size, Dtype* output_r,
Dtype* w_x_z, Dtype* w_h_z, const Dtype* b_z, Dtype* output_z) {
int w_base_index = blockIdx.x * hidden_size * 3;
int h_base_index = blockIdx.x * hidden_size;
Dtype* in_w_x_r = w_x_r + w_base_index;
Dtype* in_w_h_r = w_h_r + w_base_index;
Dtype* in_w_x_z = w_x_z + w_base_index;
Dtype* in_w_h_z = w_h_z + w_base_index;
Dtype* out_output_r = output_r + h_base_index;
Dtype* out_output_z = output_z + h_base_index;
for (int index = threadIdx.x; index < hidden_size; index += blockDim.x) {
Dtype before_act_r = in_w_x_r[index] + in_w_h_r[index] + b_r[index];
out_output_r[index] = Dtype(Dtype(1) / (Dtype(1) + expf(-before_act_r)));
Dtype before_act_z = in_w_x_z[index] + in_w_h_z[index] + b_z[index];
out_output_z[index] = Dtype(Dtype(1) / (Dtype(1) + expf(-before_act_z)));
}
}
template <typename Dtype>
__global__ void cal_final(Dtype* w_x_o, Dtype* w_h_o, Dtype* reset, const Dtype* b_o,
const int hidden_size, Dtype* update, Dtype* output, Dtype* hidden_pre) {
int w_base_index = blockIdx.x * hidden_size * 3;
int h_base_index = blockIdx.x * hidden_size;
Dtype* in_w_x_o = w_x_o + w_base_index;
Dtype* in_w_h_o = w_h_o + w_base_index;
Dtype* in_hidden_pre = hidden_pre + h_base_index;
Dtype* in_update = update + h_base_index;
Dtype* in_reset = reset + h_base_index;
Dtype* out_output = output + h_base_index;
for (int index = threadIdx.x; index < hidden_size; index += blockDim.x) {
Dtype before_act_h = in_w_x_o[index] + in_w_h_o[index] * in_reset[index]
+ b_o[index];
Dtype acted = tanhf(before_act_h);
Dtype update_t = in_update[index];
out_output[index] = (1 - update_t) * acted + update_t* in_hidden_pre[index];
}
}
template <typename Dtype>
__global__ void cal_one_kernel_paddlesigmoid_tanh_cudnn_formula(Dtype* w_x_r, Dtype* w_x_z,
Dtype* w_x_o,
Dtype* w_h_r, Dtype* w_h_z, Dtype* w_h_o,
const Dtype* b_r, const Dtype* b_z, const Dtype* b_o,
int hidden_size, Dtype* output, const Dtype* hidden_pre) {
int w_base_index = blockIdx.x * hidden_size * 3;
int h_base_index = blockIdx.x * hidden_size;
Dtype* in_w_x_r = w_x_r + w_base_index;
Dtype* in_w_h_r = w_h_r + w_base_index;
Dtype* in_w_x_z = w_x_z + w_base_index;
Dtype* in_w_h_z = w_h_z + w_base_index;
Dtype* in_w_x_o = w_x_o + w_base_index;
Dtype* in_w_h_o = w_h_o + w_base_index;
const Dtype* in_hidden_pre = hidden_pre + h_base_index;
Dtype* out_output = output + h_base_index;
for (int index = threadIdx.x; index < hidden_size; index += blockDim.x) {
const Dtype min = SIGMOID_THRESHOLD_MIN_PADDLE;
const Dtype max = SIGMOID_THRESHOLD_MAX_PADDLE;
Dtype before_act_r = in_w_x_r[index] + in_w_h_r[index] + b_r[index];
before_act_r = (before_act_r < min) ? min : ((before_act_r > max) ? max : before_act_r);
Dtype act_r = static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + exp(-before_act_r));
Dtype before_act_z = in_w_x_z[index] + in_w_h_z[index] + b_z[index];
before_act_z = (before_act_z < min) ? min : ((before_act_z > max) ? max : before_act_z);
Dtype act_z = static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + exp(-before_act_z));
Dtype before_act_h = in_w_x_o[index] + in_w_h_o[index] * act_r
+ b_o[index];
before_act_h = (before_act_h > EXP_MAX_INPUT_PADDLE) ? EXP_MAX_INPUT_PADDLE : before_act_h;
Dtype acted = tanhf(before_act_h);
out_output[index] = (1 - act_z) * acted + act_z * in_hidden_pre[index];
}
}
template <typename Dtype>
__global__ void cal_one_kernel_sigmoid_tanh_modi_cudnn_formula(Dtype* w_x_r, Dtype* w_x_z,
Dtype* w_x_o,
Dtype* w_h_r, Dtype* w_h_z, Dtype* w_h_o,
const Dtype* b_r, const Dtype* b_z, const Dtype* b_o,
int hidden_size, Dtype* output, const Dtype* hidden_pre) {
int w_base_index = blockIdx.x * hidden_size * 3 + threadIdx.x;
int h_base_index = blockIdx.x * hidden_size + threadIdx.x;
for (int index = threadIdx.x; index < hidden_size;
index += blockDim.x, w_base_index += blockDim.x, h_base_index += blockDim.x) {
Dtype before_act_r = w_x_r[w_base_index] + w_h_r[w_base_index] + b_r[index];
Dtype act_r = static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + expf(-before_act_r));
Dtype before_act_z = w_x_z[w_base_index] + w_h_z[w_base_index] + b_z[index];
Dtype act_z = static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + expf(-before_act_z));
Dtype before_act_h = w_x_o[w_base_index] + w_h_o[w_base_index] * act_r
+ b_o[index];
Dtype acted = tanh(before_act_h);
output[h_base_index] = (static_cast<Dtype>(1.0) - act_z) * acted + act_z * hidden_pre[h_base_index];
}
}
template <typename Dtype>
__global__ void cal_one_kernel_paddlesigmoid_relu_paddle_formula(Dtype* w_x_r, Dtype* w_x_z,
Dtype* w_x_o,
Dtype* w_h_r, Dtype* w_h_z, const Dtype* w_o,
const Dtype* b_r, const Dtype* b_z, const Dtype* b_o,
int hidden_size, Dtype* output, const Dtype* hidden_pre) {
int index = threadIdx.x;
if (index > hidden_size) {
return;
}
int w_base_index = blockIdx.x * hidden_size * 3 + index;
int u_base_index = blockIdx.x * hidden_size * 2 + index;
int h_base_index = blockIdx.x * hidden_size + index;
extern __shared__ Dtype shared_hidden_pre[];
Dtype hidden_pre_value = hidden_pre[h_base_index];
Dtype before_act_r = w_x_r[w_base_index] + w_h_r[u_base_index] + b_r[index];
Dtype act_r = sigmoid_paddle(before_act_r);
shared_hidden_pre[index] = hidden_pre_value * act_r;
Dtype before_act_z = w_x_z[w_base_index] + w_h_z[u_base_index] + b_z[index];
Dtype act_z = sigmoid_paddle(before_act_z);
Dtype w_h_o = static_cast<Dtype>(0.0);
int k_index = index;
__syncthreads();
for (int w_index = 0; w_index < hidden_size; ++w_index) {
w_h_o += shared_hidden_pre[w_index] * w_o[k_index];
k_index += hidden_size;
}
Dtype before_act_h = w_x_o[w_base_index] + w_h_o
+ b_o[index];
Dtype acted = relu(before_act_h);
output[h_base_index] = (static_cast<Dtype>(1.0) - act_z) * hidden_pre_value + act_z * acted;
}
template <typename Dtype>
__global__ void cal_one_kernel_sigmoid_tanh_paddle_formula(Dtype* w_x_r, Dtype* w_x_z, Dtype* w_x_o,
Dtype* w_h_r, Dtype* w_h_z, const Dtype* w_o,
const Dtype* b_r, const Dtype* b_z, const Dtype* b_o,
int hidden_size, Dtype* output, const Dtype* hidden_pre) {
int index = threadIdx.x;
if (index > hidden_size) {
return;
}
int w_base_index = blockIdx.x * hidden_size * 3 + index;
int u_base_index = blockIdx.x * hidden_size * 2 + index;
int h_base_index = blockIdx.x * hidden_size + index;
extern __shared__ Dtype shared_hidden_pre[];
Dtype hidden_pre_value = hidden_pre[h_base_index];
Dtype before_act_r = w_x_r[w_base_index] + w_h_r[u_base_index] + b_r[index];
Dtype act_r = static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + expf(-before_act_r));
// printf("%d %f=[%f , %f ,%f]\n",index,act_r,w_x_r[w_base_index],w_h_r[u_base_index],b_r[index]);
shared_hidden_pre[index] = hidden_pre_value * act_r;
Dtype before_act_z = w_x_z[w_base_index] + w_h_z[u_base_index] + b_z[index];
Dtype act_z = static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + expf(-before_act_z));
Dtype w_h_o = static_cast<Dtype>(0.0);
int k_index = index;
__syncthreads();
for (int w_index = 0; w_index < hidden_size; ++w_index) {
w_h_o += shared_hidden_pre[w_index] * w_o[k_index];
k_index += hidden_size;
}
Dtype before_act_h = w_x_o[w_base_index] + w_h_o
+ b_o[index];
Dtype acted = tanhf(before_act_h);
output[h_base_index] = (static_cast<Dtype>(1.0) - act_z) * hidden_pre_value + act_z * acted;
// printf("output %d = %f\n",index,output[h_base_index]);
}
template <typename Dtype>
__global__ void cal_one_kernel_sigmoid_tanh(Dtype* w_x_r, Dtype* w_x_z, Dtype* w_x_o,
Dtype* w_h_r, Dtype* w_h_z, Dtype* w_h_o,
const Dtype* b_r, const Dtype* b_z, const Dtype* b_o,
int hidden_size, Dtype* output, const Dtype* hidden_pre) {
int w_base_index = blockIdx.x * hidden_size * 3;
int h_base_index = blockIdx.x * hidden_size;
Dtype* in_w_x_r = w_x_r + w_base_index;
Dtype* in_w_h_r = w_h_r + w_base_index;
Dtype* in_w_x_z = w_x_z + w_base_index;
Dtype* in_w_h_z = w_h_z + w_base_index;
Dtype* in_w_x_o = w_x_o + w_base_index;
Dtype* in_w_h_o = w_h_o + w_base_index;
const Dtype* in_hidden_pre = hidden_pre + h_base_index;
Dtype* out_output = output + h_base_index;
for (int index = threadIdx.x; index < hidden_size; index += blockDim.x) {
Dtype before_act_r = in_w_x_r[index] + in_w_h_r[index] + b_r[index];
Dtype act_r = static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + expf(-before_act_r));
Dtype before_act_z = in_w_x_z[index] + in_w_h_z[index] + b_z[index];
Dtype act_z = static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + expf(-before_act_z));
Dtype before_act_h = in_w_x_o[index] + in_w_h_o[index] * act_r
+ b_o[index];
Dtype acted = tanhf(before_act_h);
out_output[index] = (static_cast<Dtype>(1.0) - act_z) * acted + act_z * in_hidden_pre[index];
}
}
template <typename Dtype>
__global__ void cal_one_kernel_sigmoid_tanh_index_modi(Dtype* w_x_r, Dtype* w_x_z, Dtype* w_x_o,
Dtype* w_h_r, Dtype* w_h_z, Dtype* w_h_o,
const Dtype* b_r, const Dtype* b_z, const Dtype* b_o,
int hidden_size, Dtype* output, const Dtype* hidden_pre,
int seq_batch_hidden, int batch_size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= seq_batch_hidden) {
return;
}
int batch_id = tid / hidden_size % batch_size;
int index = tid % hidden_size;
int w_base_index = batch_id * hidden_size * 3;
int h_base_index = batch_id * hidden_size;
int index_w = index + w_base_index;
int index_h = index + h_base_index;
{
Dtype before_act_r = w_x_r[index_w] + w_h_r[index_w] + b_r[index];
Dtype act_r = static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + expf(-before_act_r));
Dtype before_act_z = w_x_z[index_w] + w_h_z[index_w] + b_z[index];
Dtype act_z = static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + expf(-before_act_z));
Dtype before_act_h = w_x_o[index_w] + w_h_o[index_w] * act_r
+ b_o[index];
Dtype acted = tanhf(before_act_h);
output[index_h] = (static_cast<Dtype>(1.0) - act_z) * acted + act_z * hidden_pre[index_h];
}
}
template <typename Dtype>
__global__ void cal_one_kernel_sigmoid_tanh_index(Dtype* w_x_r, Dtype* w_x_z, Dtype* w_x_o,
Dtype* w_h_r, Dtype* w_h_z, Dtype* w_h_o,
const Dtype* b_r, const Dtype* b_z, const Dtype* b_o,
int hidden_size, Dtype* output, const Dtype* hidden_pre,
int seq_batch_hidden, int batch_size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= seq_batch_hidden) {
return;
}
int batch_id = tid / hidden_size % batch_size;
int index = tid % hidden_size;
int w_base_index = batch_id * hidden_size * 3;
int h_base_index = batch_id * hidden_size;
Dtype* in_w_x_r = w_x_r + w_base_index;
Dtype* in_w_h_r = w_h_r + w_base_index;
Dtype* in_w_x_z = w_x_z + w_base_index;
Dtype* in_w_h_z = w_h_z + w_base_index;
Dtype* in_w_x_o = w_x_o + w_base_index;
Dtype* in_w_h_o = w_h_o + w_base_index;
const Dtype* in_hidden_pre = hidden_pre + h_base_index;
Dtype* out_output = output + h_base_index;
{
Dtype before_act_r = in_w_x_r[index] + in_w_h_r[index] + b_r[index];
Dtype act_r = Dtype(Dtype(1) / (Dtype(1) + expf(-before_act_r)));
Dtype before_act_z = in_w_x_z[index] + in_w_h_z[index] + b_z[index];
Dtype act_z = Dtype(Dtype(1) / (Dtype(1) + expf(-before_act_z)));
Dtype before_act_h = in_w_x_o[index] + in_w_h_o[index] * act_r
+ b_o[index];
Dtype acted = tanhf(before_act_h);
out_output[index] = (1 - act_z) * acted + act_z * in_hidden_pre[index];
}
}
template <typename Dtype>
__global__ void cal_one_kernel_paddlesigmoid_relu_cudnn_formula(Dtype* w_x_r, Dtype* w_x_z,
Dtype* w_x_o,
Dtype* w_h_r, Dtype* w_h_z, Dtype* w_h_o,
const Dtype* b_r, const Dtype* b_z, const Dtype* b_o,
int hidden_size, Dtype* output, const Dtype* hidden_pre) {
int w_base_index = blockIdx.x * hidden_size * 3;
int h_base_index = blockIdx.x * hidden_size;
Dtype* in_w_x_r = w_x_r + w_base_index;
Dtype* in_w_h_r = w_h_r + w_base_index;
Dtype* in_w_x_z = w_x_z + w_base_index;
Dtype* in_w_h_z = w_h_z + w_base_index;
Dtype* in_w_x_o = w_x_o + w_base_index;
Dtype* in_w_h_o = w_h_o + w_base_index;
const Dtype* in_hidden_pre = hidden_pre + h_base_index;
Dtype* out_output = output + h_base_index;
for (int index = threadIdx.x; index < hidden_size; index += blockDim.x) {
const Dtype min = SIGMOID_THRESHOLD_MIN_PADDLE;
const Dtype max = SIGMOID_THRESHOLD_MAX_PADDLE;
Dtype before_act_r = in_w_x_r[index] + in_w_h_r[index] + b_r[index];
before_act_r = (before_act_r < min) ? min : ((before_act_r > max) ? max : before_act_r);
Dtype act_r = static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + exp(-before_act_r));
Dtype before_act_z = in_w_x_z[index] + in_w_h_z[index] + b_z[index];
before_act_z = (before_act_z < min) ? min : ((before_act_z > max) ? max : before_act_z);
Dtype act_z = static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + exp(-before_act_z));
Dtype before_act_h = in_w_x_o[index] + in_w_h_o[index] * act_r
+ b_o[index];
Dtype acted = before_act_h > static_cast<Dtype>(0.0) ? before_act_h : static_cast<Dtype>(0.0);
out_output[index] = (1 - act_z) * acted + act_z * in_hidden_pre[index];
}
}
template <typename Dtype>
__global__ void cal_one_kernel_sigmoid_tanh_modi(Dtype* w_x_r, Dtype* w_x_z, Dtype* w_x_o,
Dtype* w_h_r, Dtype* w_h_z, Dtype* w_h_o,
const Dtype* b_r, const Dtype* b_z, const Dtype* b_o,
int hidden_size, Dtype* output, const Dtype* hidden_pre) {
int w_base_index = blockIdx.x * hidden_size * 3 + threadIdx.x;
int h_base_index = blockIdx.x * hidden_size + threadIdx.x;
for (int index = threadIdx.x; index < hidden_size;
index += blockDim.x, w_base_index += blockDim.x, h_base_index += blockDim.x) {
Dtype before_act_r = w_x_r[w_base_index] + w_h_r[w_base_index] + b_r[index];
Dtype act_r = static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + expf(-before_act_r));
Dtype before_act_z = w_x_z[w_base_index] + w_h_z[w_base_index] + b_z[index];
Dtype act_z = static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + expf(-before_act_z));
Dtype before_act_h = w_x_o[w_base_index] + w_h_o[w_base_index] * act_r
+ b_o[index];
Dtype acted = tanhf(before_act_h);
output[h_base_index] = (static_cast<Dtype>(1.0) - act_z) * acted + act_z * hidden_pre[h_base_index];
}
}
template <>
SaberStatus SaberGru<NV, AK_FLOAT, AK_FLOAT, AK_FLOAT, NCHW, NCHW, NCHW>::gru_cudnn(
const std::vector<DataTensor_in*> inputs,
std::vector<DataTensor_out*> outputs,
GruParam<OpTensor>& param) {
DataTensor_in* x = inputs[0];
const InDataType* x_data = x->data();
std::vector<int> offset=x->get_seq_offset();
const InDataType* h;
DataTensor_out* dout = outputs[0];
OutDataType* dout_data = dout->mutable_data();
//TODO:check shape first
const OpTensor* b = param.bias();
int batch_size = offset.size() - 1;; //x->get_seq_offset().size()-1;
int sequence = x->num();
int hidden_size = b->valid_size() / 3;
bool isHW2Seq=offset.size()>2;
int o_offset = 0;
int r_offset = 1;
int z_offset = 2;
// CHECK_EQ(w_h2h->height(), hidden_size) << "w_h2h->height()==batch_size";
// CHECK_EQ(w_h2h->width(), hidden_size * 3) << "w_h2h->width()==hidden_size*3";
//
// CHECK_EQ(w_i2h->height(), word_size) << "w_i2h->height()==word_size";
// CHECK_EQ(w_i2h->width(), hidden_size * 3) << "w_i2h->width()==hidden_size*3";
if (isHW2Seq) {
x_data = hw2seq(inputs, param, _word_size, hidden_size, sequence);
batch_size = offset.size() - 1;
if (x_data != x->data()) {
dout_data = _temp_tensor_out.mutable_data();
}
}
Shape shape_wx(sequence, batch_size, 3, hidden_size);
_temp_WX.try_expand_size(shape_wx);
Shape shape_wh(1, batch_size, 3, hidden_size);
_temp_WH.try_expand_size(shape_wh);
anakin_NV_gemm(_cublas_handle, false, false, sequence * batch_size, 3 * hidden_size,
_word_size, 1.0, x_data, _weights_i2h.data(), 0.0, _temp_WX.mutable_data());
const OpDataType* b_r = b->data() + r_offset * hidden_size;
const OpDataType* b_z = b->data() + z_offset * hidden_size;
const OpDataType* b_o = b->data() + o_offset * hidden_size;
if (inputs.size() == 1) {
CUDA_CHECK(hipMemsetAsync(dout_data, 0, sizeof(InDataType) * batch_size * hidden_size,
_ctx.get_compute_stream()));
h = dout_data;
} else {
h = inputs[1]->data();
CHECK_EQ(inputs[1]->valid_size(), batch_size * hidden_size) <<
"h size should be batch_size * hidden_size";
}
for (int seq = 0; seq < sequence; seq++) {
const InDataType* hidden_in;
InDataType* hidden_out = dout_data + seq * batch_size * hidden_size;
if (seq == 0) {
hidden_in = h;
} else {
hidden_in = dout_data + (seq - 1) * batch_size * hidden_size;
}
anakin_NV_gemm(_cublas_handle, false, false, batch_size,
3 * hidden_size, hidden_size, 1.0, hidden_in,
_weights_h2h.data(), 0.0, _temp_WH.mutable_data());
OpDataType* w_x_r = _temp_WX.mutable_data() + r_offset * hidden_size
+ seq * batch_size * hidden_size * 3;
OpDataType* w_x_z = _temp_WX.mutable_data() + z_offset * hidden_size
+ seq * batch_size * hidden_size * 3;
OpDataType* w_x_o = _temp_WX.mutable_data() + o_offset * hidden_size
+ seq * batch_size * hidden_size * 3;
OpDataType* w_h_r = _temp_WH.mutable_data() + r_offset * hidden_size;
OpDataType* w_h_z = _temp_WH.mutable_data() + z_offset * hidden_size;
OpDataType* w_h_o = _temp_WH.mutable_data() + o_offset * hidden_size;
int frame_per_block = hidden_size <= 1024 ? hidden_size : 1024;
if (param.gate_activity == Active_sigmoid
&& param.h_activity == Active_tanh) {
cal_one_kernel_sigmoid_tanh_modi_cudnn_formula
<< < batch_size, frame_per_block, 0, _ctx.get_compute_stream() >> >
(w_x_r, w_x_z, w_x_o, w_h_r, w_h_z, w_h_o
, b_r, b_z, b_o, hidden_size, hidden_out, hidden_in);
} else if (param.gate_activity == Active_sigmoid_fluid
&& param.h_activity == Active_tanh) {
cal_one_kernel_paddlesigmoid_tanh_cudnn_formula
<< < batch_size, frame_per_block, 0, _ctx.get_compute_stream() >> >
(w_x_r, w_x_z, w_x_o, w_h_r, w_h_z, w_h_o
, b_r, b_z, b_o, hidden_size, hidden_out, hidden_in);
} else if (param.gate_activity == Active_sigmoid_fluid
&& param.h_activity == Active_relu) {
cal_one_kernel_paddlesigmoid_relu_cudnn_formula
<< < batch_size, frame_per_block, 0, _ctx.get_compute_stream() >> >
(w_x_r, w_x_z, w_x_o, w_h_r, w_h_z, w_h_o
, b_r, b_z, b_o, hidden_size, hidden_out, hidden_in);
} else {
LOG(ERROR) << "not support active function";
}
}
if (isHW2Seq) {
seq2hw(outputs, inputs, param, hidden_size, dout_data);
outputs[0]->set_seq_offset(inputs[0]->get_seq_offset());
}
return SaberSuccess;
}
template<>
SaberStatus SaberGru<NV, AK_FLOAT, AK_FLOAT, AK_FLOAT, NCHW, NCHW, NCHW>::dispatch(\
const std::vector<DataTensor_in*>& inputs,
std::vector<DataTensor_out*>& outputs,
GruParam <OpTensor>& param) {
if (param.formula == GRU_CUDNN) {
LOG(ERROR) << "saber cudnn formula not support reverse yet";
if (param.is_reverse) {
LOG(ERROR) << "saber cudnn formula not support reverse yet";
}
return gru_cudnn(inputs, outputs, param);
}
// LOG(INFO)<<"gru_paddle";
DataTensor_in* x = inputs[0];
std::vector<int> offset=x->get_seq_offset();
const InDataType* x_data = x->data();
const InDataType* h;
DataTensor_out* dout = outputs[0];
OutDataType* dout_data = dout->mutable_data();
//TODO:check shape first
const OpTensor* b = param.bias();
int batch_size = offset.size() - 1; //x->get_seq_offset().size()-1;
int sequence = x->num();
int hidden_size = b->valid_size() / 3;
bool isHW2Seq=offset.size()>2;
int o_offset = 0;
int r_offset = 1;
int z_offset = 2;
// CHECK_EQ(w_h2h->height(), hidden_size) << "w_h2h->height()==batch_size";
// CHECK_EQ(w_h2h->width(), hidden_size * 3) << "w_h2h->width()==hidden_size*3";
//
// CHECK_EQ(w_i2h->height(), word_size) << "w_i2h->height()==word_size";
// CHECK_EQ(w_i2h->width(), hidden_size * 3) << "w_i2h->width()==hidden_size*3";
if (isHW2Seq) {
x_data = hw2seq(inputs, param, _word_size, hidden_size, sequence);
// batch_size = inputs[0]->get_seq_offset().size() - 1;
if (x_data != x->data()) {
dout_data = _temp_tensor_out.mutable_data();
}
}
Shape shape_WX(sequence, batch_size, 3, hidden_size);
_temp_WX.try_expand_size(shape_WX);
Shape shape_WH(1, batch_size, 2, hidden_size);
_temp_WH.try_expand_size(shape_WH);
anakin_NV_gemm(_cublas_handle, false, false, sequence * batch_size, 3 * hidden_size,
_word_size, 1.0, x_data, _weights_i2h.data(), 0.0, _temp_WX.mutable_data());
const OpDataType* b_r = b->data() + r_offset * hidden_size;
const OpDataType* b_z = b->data() + z_offset * hidden_size;
const OpDataType* b_o = b->data() + o_offset * hidden_size;
if (inputs.size() == 1) {
CUDA_CHECK(hipMemsetAsync(dout_data, 0, sizeof(OutDataType)*batch_size * hidden_size,
_ctx.get_compute_stream()));
h = dout_data;
} else {
h = inputs[1]->data();
}
for (int seq = 0; seq < sequence; ++seq) {
int realseq = seq;
int last_seq = realseq - 1;
if (param.is_reverse) {
// DLOG(INFO)<<"reverse gru";
realseq = sequence - 1 - seq;
last_seq = realseq + 1;
}
const OutDataType* hidden_in;
OutDataType* hidden_out = dout_data + realseq * batch_size * hidden_size;
if (seq == 0) {
hidden_in = h;
} else {
hidden_in = dout_data + last_seq * batch_size * hidden_size;
}
anakin_NV_gemm(_cublas_handle, false, false, batch_size,
2 * hidden_size, hidden_size, 1.0, hidden_in,
_weights_h2h.data() + hidden_size * hidden_size, 0.0, _temp_WH.mutable_data());
OutDataType* w_x_r = _temp_WX.mutable_data() + r_offset * hidden_size
+ realseq * batch_size * hidden_size * 3;
OutDataType* w_x_z = _temp_WX.mutable_data() + z_offset * hidden_size
+ realseq * batch_size * hidden_size * 3;
OutDataType* w_x_o = _temp_WX.mutable_data() + o_offset * hidden_size
+ realseq * batch_size * hidden_size * 3;
OutDataType* w_h_r = _temp_WH.mutable_data() + 0 * hidden_size;
OutDataType* w_h_z = _temp_WH.mutable_data() + 1 * hidden_size;
const OpDataType * w_o = _weights_h2h.data();
CHECK_LE(hidden_size, 1024) << "now not support hidden size > 1024 for paddle formula";
int frame_per_block = hidden_size <= 1024 ? hidden_size : 1024;
// DLOG(INFO) << "act = " << param._gate_activity << "," << param._h_activity;
if (param.gate_activity == Active_sigmoid
&& param.h_activity == Active_tanh) {
hipLaunchKernelGGL(( cal_one_kernel_sigmoid_tanh_paddle_formula)
, dim3(batch_size), dim3(frame_per_block), sizeof(OutDataType)*hidden_size
, _ctx.get_compute_stream(),
w_x_r, w_x_z, w_x_o, w_h_r, w_h_z, w_o
, b_r, b_z, b_o, hidden_size, hidden_out, hidden_in);
} else if (param.gate_activity == Active_sigmoid_fluid
&& param.h_activity == Active_relu) {
cal_one_kernel_paddlesigmoid_relu_paddle_formula
<< < batch_size, frame_per_block, sizeof(OutDataType)*hidden_size
, _ctx.get_compute_stream() >> >
(w_x_r, w_x_z, w_x_o, w_h_r, w_h_z, w_o
, b_r, b_z, b_o, hidden_size, hidden_out, hidden_in);
} else {
LOG(ERROR) << "not support active function";
}
}
if (isHW2Seq) {
seq2hw(outputs, inputs, param, hidden_size, dout_data);
}
outputs[0]->set_seq_offset(inputs[0]->get_seq_offset());
return SaberSuccess;
}
}
}
| saber_gru.cu | #include "saber/funcs/impl/cuda/saber_gru.h"
#include "saber/core/tensor_op.h"
#include "cuda_fp16.h"
namespace anakin {
namespace saber {
////TODO:can try record vector in shared
template <typename Dtype>
__global__ void trans_map2in(Dtype* output, const Dtype* input, const int* map, int count,
int lastdim) {
CUDA_KERNEL_LE(tid, count) {
int seq = tid / lastdim;
output[tid] = input[map[seq] * lastdim + tid % lastdim];
}
}
template <typename Dtype>
__global__ void trans_map2out(Dtype* output, const Dtype* input, const int* map, int count,
int lastdim) {
CUDA_KERNEL_LE(tid, count) {
int seq = tid / lastdim;
output[map[seq]*lastdim + tid % lastdim] = input[tid];
}
}
template <>
void SaberGru<NV, AK_FLOAT, AK_FLOAT, AK_FLOAT, NCHW, NCHW, NCHW>::seq2hw(\
std::vector<DataTensor_out*> outputs, std::vector<DataTensor_in*> inputs,
GruParam<OpTensor>& param, int hidden_size,
void* real_temp_out
) {
DataTensor_in* din = inputs[0];
DataTensor_out* dout = outputs[0];
int wordsize = din->channel();
std::vector<int> offset_vec = din->get_seq_offset();
CHECK_GE(offset_vec.size(), 2) << "offset must >=2" ;
int batch_size = offset_vec.size() - 1;
int max_len = 0;
std::vector<int> length_vec;
if ((void*)(outputs[0]->data()) == real_temp_out) {
DLOG(INFO) << "not use inner space";
return;
}
const OutDataType* origin = _temp_tensor_out.data();
OutDataType* target = dout->mutable_data();
//source is sequence id in seq target is hw id in seq,map is source to target ptr offset
int seq_sum = offset_vec[batch_size];
CUDA_CHECK(cudaMemcpyAsync(_temp_map_dev.mutable_data(), _temp_map_host.data(), sizeof(int)*seq_sum,
cudaMemcpyHostToDevice, _ctx.get_compute_stream()));
int count=seq_sum * hidden_size;
int block_dim=count;
int grid_dim=1;
if(count>1024){
block_dim=256;
grid_dim=(count+block_dim-1)/block_dim;
}
trans_map2in <<< grid_dim, block_dim, 0, _ctx.get_compute_stream()>>>(target, origin, _temp_map_dev.data(),
count, hidden_size);
// trans_map2in_old <<< 4, 128, 0, _ctx.get_compute_stream()>>>(target, origin, _temp_map_dev.data(),
// count, hidden_size);
}
//TODO:gem by self, flatten by time, padding by nothing (zhangs)
template <>
const float* SaberGru<NV, AK_FLOAT, AK_FLOAT, AK_FLOAT, NCHW, NCHW, NCHW>::hw2seq(\
std::vector<DataTensor_in*> inputs, GruParam<OpTensor>& param, \
int word_size, int hidden_size, int& sequence_len) {
DataTensor_in* din = inputs[0];
std::vector<int> offset_vec = din->get_seq_offset();
CHECK_GE(offset_vec.size(), 2) << "offset must >=2" ;
int batch_size = offset_vec.size() - 1;
int seq_sum = offset_vec[offset_vec.size() - 1];
int wordsize = din->channel();
int max_len = 0;
std::vector<int> length_vec(batch_size);
for (int i = 0; i < offset_vec.size() - 1; ++i) {
int len = offset_vec[i + 1] - offset_vec[i];
max_len = max_len > len ? max_len : len;
length_vec[i] = len;
}
Shape seq_shape(1, max_len, batch_size, word_size);
_temp_tensor_in.try_expand_size(seq_shape);
Shape seq_out_shape(1, max_len, batch_size, hidden_size);
_temp_tensor_out.try_expand_size(seq_out_shape);
sequence_len = max_len;
if (batch_size == 1 || max_len == 1) {
return din->mutable_data();
}
InDataType* target = _temp_tensor_in.mutable_data();
const InDataType* origin = din->data();
_temp_map_host.try_expand_size(seq_sum);
_temp_map_dev.try_expand_size(seq_sum);
int* map = _temp_map_host.mutable_data();
if (param.is_reverse) {
for (int batchid = 0; batchid < batch_size; ++batchid) {
int batch_offset = max_len - length_vec[batchid];
for (int seqid = 0; seqid < length_vec[batchid]; ++seqid) {
int source = (offset_vec[batchid] + seqid);
int target = ((seqid + batch_offset) * batch_size + batchid);
map[source] = target;
}
}
} else {
for (int batchid = 0; batchid < batch_size; ++batchid) {
for (int seqid = 0; seqid < length_vec[batchid]; ++seqid) {
int source = (offset_vec[batchid] + seqid);
int target = (seqid * batch_size + batchid);
map[source] = target;
}
}
}
CUDA_CHECK(cudaMemcpyAsync(_temp_map_dev.mutable_data(), _temp_map_host.data(), sizeof(int)*seq_sum,
cudaMemcpyHostToDevice, _ctx.get_compute_stream()));
int count=seq_sum * wordsize;
int block_dim=count;
int grid_dim=1;
if(count>1024){
block_dim=256;
grid_dim=(count+block_dim-1)/block_dim;
}
trans_map2out <<< grid_dim, block_dim, 0, _ctx.get_compute_stream()>>>(target, origin, _temp_map_dev.data(),
count, wordsize);
// trans_map2out_old <<< 4, 128, 0, _ctx.get_compute_stream()>>>(target, origin, _temp_map_dev.data(),
// count, wordsize);
return _temp_tensor_in.data();
}
#define SIGMOID_THRESHOLD_MIN_PADDLE -40.0
#define SIGMOID_THRESHOLD_MAX_PADDLE 13.0
#define EXP_MAX_INPUT_PADDLE 40.0
template <typename T>
inline static __device__ T identity(const T a) {
return a;
}
template <typename T>
inline static __device__ T relu(const T a) {
return a > static_cast<T>(0.0) ? a : static_cast<T>(0.0);
}
template <typename T>
inline static __device__ T sigmoid_paddle(const T a) {
const T min = SIGMOID_THRESHOLD_MIN_PADDLE;
const T max = SIGMOID_THRESHOLD_MAX_PADDLE;
T tmp = (a < min) ? min : ((a > max) ? max : a);
return static_cast<T>(1.0) / (static_cast<T>(1.0) + exp(-tmp));
}
template <typename T>
inline static __device__ T tanh_paddle(const T a) {
T tmp = -2.0 * a;
tmp = (tmp > EXP_MAX_INPUT_PADDLE) ? EXP_MAX_INPUT_PADDLE : tmp;
return (2.0 / (1.0 + exp(tmp))) - 1.0;
}
static void anakin_NV_gemm(cublasHandle_t handle, const bool TransA,
const bool TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (!TransA/* == CblasNoTrans*/) ? K : M;
int ldb = (!TransB/* == CblasNoTrans*/) ? N : K;
cublasOperation_t cuTransA =
(!TransA/* == CblasNoTrans*/) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(!TransB/* == CblasNoTrans*/) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasSgemm(handle, cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
/**
* gridDim=batchsize
* @tparam Dtype
* @param w_x_r
* @param w_h_r
* @param br
* @param hidden_size
* @param output_r
* @param w_x_z
* @param w_h_z
* @param bz
* @param output_z
*/
template <typename Dtype>
__global__ void cal_reset_update(Dtype* w_x_r, Dtype* w_h_r, const Dtype* b_r,
const int hidden_size, Dtype* output_r,
Dtype* w_x_z, Dtype* w_h_z, const Dtype* b_z, Dtype* output_z) {
int w_base_index = blockIdx.x * hidden_size * 3;
int h_base_index = blockIdx.x * hidden_size;
Dtype* in_w_x_r = w_x_r + w_base_index;
Dtype* in_w_h_r = w_h_r + w_base_index;
Dtype* in_w_x_z = w_x_z + w_base_index;
Dtype* in_w_h_z = w_h_z + w_base_index;
Dtype* out_output_r = output_r + h_base_index;
Dtype* out_output_z = output_z + h_base_index;
for (int index = threadIdx.x; index < hidden_size; index += blockDim.x) {
Dtype before_act_r = in_w_x_r[index] + in_w_h_r[index] + b_r[index];
out_output_r[index] = Dtype(Dtype(1) / (Dtype(1) + expf(-before_act_r)));
Dtype before_act_z = in_w_x_z[index] + in_w_h_z[index] + b_z[index];
out_output_z[index] = Dtype(Dtype(1) / (Dtype(1) + expf(-before_act_z)));
}
}
template <typename Dtype>
__global__ void cal_final(Dtype* w_x_o, Dtype* w_h_o, Dtype* reset, const Dtype* b_o,
const int hidden_size, Dtype* update, Dtype* output, Dtype* hidden_pre) {
int w_base_index = blockIdx.x * hidden_size * 3;
int h_base_index = blockIdx.x * hidden_size;
Dtype* in_w_x_o = w_x_o + w_base_index;
Dtype* in_w_h_o = w_h_o + w_base_index;
Dtype* in_hidden_pre = hidden_pre + h_base_index;
Dtype* in_update = update + h_base_index;
Dtype* in_reset = reset + h_base_index;
Dtype* out_output = output + h_base_index;
for (int index = threadIdx.x; index < hidden_size; index += blockDim.x) {
Dtype before_act_h = in_w_x_o[index] + in_w_h_o[index] * in_reset[index]
+ b_o[index];
Dtype acted = tanhf(before_act_h);
Dtype update_t = in_update[index];
out_output[index] = (1 - update_t) * acted + update_t* in_hidden_pre[index];
}
}
template <typename Dtype>
__global__ void cal_one_kernel_paddlesigmoid_tanh_cudnn_formula(Dtype* w_x_r, Dtype* w_x_z,
Dtype* w_x_o,
Dtype* w_h_r, Dtype* w_h_z, Dtype* w_h_o,
const Dtype* b_r, const Dtype* b_z, const Dtype* b_o,
int hidden_size, Dtype* output, const Dtype* hidden_pre) {
int w_base_index = blockIdx.x * hidden_size * 3;
int h_base_index = blockIdx.x * hidden_size;
Dtype* in_w_x_r = w_x_r + w_base_index;
Dtype* in_w_h_r = w_h_r + w_base_index;
Dtype* in_w_x_z = w_x_z + w_base_index;
Dtype* in_w_h_z = w_h_z + w_base_index;
Dtype* in_w_x_o = w_x_o + w_base_index;
Dtype* in_w_h_o = w_h_o + w_base_index;
const Dtype* in_hidden_pre = hidden_pre + h_base_index;
Dtype* out_output = output + h_base_index;
for (int index = threadIdx.x; index < hidden_size; index += blockDim.x) {
const Dtype min = SIGMOID_THRESHOLD_MIN_PADDLE;
const Dtype max = SIGMOID_THRESHOLD_MAX_PADDLE;
Dtype before_act_r = in_w_x_r[index] + in_w_h_r[index] + b_r[index];
before_act_r = (before_act_r < min) ? min : ((before_act_r > max) ? max : before_act_r);
Dtype act_r = static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + exp(-before_act_r));
Dtype before_act_z = in_w_x_z[index] + in_w_h_z[index] + b_z[index];
before_act_z = (before_act_z < min) ? min : ((before_act_z > max) ? max : before_act_z);
Dtype act_z = static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + exp(-before_act_z));
Dtype before_act_h = in_w_x_o[index] + in_w_h_o[index] * act_r
+ b_o[index];
before_act_h = (before_act_h > EXP_MAX_INPUT_PADDLE) ? EXP_MAX_INPUT_PADDLE : before_act_h;
Dtype acted = tanhf(before_act_h);
out_output[index] = (1 - act_z) * acted + act_z * in_hidden_pre[index];
}
}
template <typename Dtype>
__global__ void cal_one_kernel_sigmoid_tanh_modi_cudnn_formula(Dtype* w_x_r, Dtype* w_x_z,
Dtype* w_x_o,
Dtype* w_h_r, Dtype* w_h_z, Dtype* w_h_o,
const Dtype* b_r, const Dtype* b_z, const Dtype* b_o,
int hidden_size, Dtype* output, const Dtype* hidden_pre) {
int w_base_index = blockIdx.x * hidden_size * 3 + threadIdx.x;
int h_base_index = blockIdx.x * hidden_size + threadIdx.x;
for (int index = threadIdx.x; index < hidden_size;
index += blockDim.x, w_base_index += blockDim.x, h_base_index += blockDim.x) {
Dtype before_act_r = w_x_r[w_base_index] + w_h_r[w_base_index] + b_r[index];
Dtype act_r = static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + expf(-before_act_r));
Dtype before_act_z = w_x_z[w_base_index] + w_h_z[w_base_index] + b_z[index];
Dtype act_z = static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + expf(-before_act_z));
Dtype before_act_h = w_x_o[w_base_index] + w_h_o[w_base_index] * act_r
+ b_o[index];
Dtype acted = tanh(before_act_h);
output[h_base_index] = (static_cast<Dtype>(1.0) - act_z) * acted + act_z * hidden_pre[h_base_index];
}
}
template <typename Dtype>
__global__ void cal_one_kernel_paddlesigmoid_relu_paddle_formula(Dtype* w_x_r, Dtype* w_x_z,
Dtype* w_x_o,
Dtype* w_h_r, Dtype* w_h_z, const Dtype* w_o,
const Dtype* b_r, const Dtype* b_z, const Dtype* b_o,
int hidden_size, Dtype* output, const Dtype* hidden_pre) {
int index = threadIdx.x;
if (index > hidden_size) {
return;
}
int w_base_index = blockIdx.x * hidden_size * 3 + index;
int u_base_index = blockIdx.x * hidden_size * 2 + index;
int h_base_index = blockIdx.x * hidden_size + index;
extern __shared__ Dtype shared_hidden_pre[];
Dtype hidden_pre_value = hidden_pre[h_base_index];
Dtype before_act_r = w_x_r[w_base_index] + w_h_r[u_base_index] + b_r[index];
Dtype act_r = sigmoid_paddle(before_act_r);
shared_hidden_pre[index] = hidden_pre_value * act_r;
Dtype before_act_z = w_x_z[w_base_index] + w_h_z[u_base_index] + b_z[index];
Dtype act_z = sigmoid_paddle(before_act_z);
Dtype w_h_o = static_cast<Dtype>(0.0);
int k_index = index;
__syncthreads();
for (int w_index = 0; w_index < hidden_size; ++w_index) {
w_h_o += shared_hidden_pre[w_index] * w_o[k_index];
k_index += hidden_size;
}
Dtype before_act_h = w_x_o[w_base_index] + w_h_o
+ b_o[index];
Dtype acted = relu(before_act_h);
output[h_base_index] = (static_cast<Dtype>(1.0) - act_z) * hidden_pre_value + act_z * acted;
}
template <typename Dtype>
__global__ void cal_one_kernel_sigmoid_tanh_paddle_formula(Dtype* w_x_r, Dtype* w_x_z, Dtype* w_x_o,
Dtype* w_h_r, Dtype* w_h_z, const Dtype* w_o,
const Dtype* b_r, const Dtype* b_z, const Dtype* b_o,
int hidden_size, Dtype* output, const Dtype* hidden_pre) {
int index = threadIdx.x;
if (index > hidden_size) {
return;
}
int w_base_index = blockIdx.x * hidden_size * 3 + index;
int u_base_index = blockIdx.x * hidden_size * 2 + index;
int h_base_index = blockIdx.x * hidden_size + index;
extern __shared__ Dtype shared_hidden_pre[];
Dtype hidden_pre_value = hidden_pre[h_base_index];
Dtype before_act_r = w_x_r[w_base_index] + w_h_r[u_base_index] + b_r[index];
Dtype act_r = static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + expf(-before_act_r));
// printf("%d %f=[%f , %f ,%f]\n",index,act_r,w_x_r[w_base_index],w_h_r[u_base_index],b_r[index]);
shared_hidden_pre[index] = hidden_pre_value * act_r;
Dtype before_act_z = w_x_z[w_base_index] + w_h_z[u_base_index] + b_z[index];
Dtype act_z = static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + expf(-before_act_z));
Dtype w_h_o = static_cast<Dtype>(0.0);
int k_index = index;
__syncthreads();
for (int w_index = 0; w_index < hidden_size; ++w_index) {
w_h_o += shared_hidden_pre[w_index] * w_o[k_index];
k_index += hidden_size;
}
Dtype before_act_h = w_x_o[w_base_index] + w_h_o
+ b_o[index];
Dtype acted = tanhf(before_act_h);
output[h_base_index] = (static_cast<Dtype>(1.0) - act_z) * hidden_pre_value + act_z * acted;
// printf("output %d = %f\n",index,output[h_base_index]);
}
template <typename Dtype>
__global__ void cal_one_kernel_sigmoid_tanh(Dtype* w_x_r, Dtype* w_x_z, Dtype* w_x_o,
Dtype* w_h_r, Dtype* w_h_z, Dtype* w_h_o,
const Dtype* b_r, const Dtype* b_z, const Dtype* b_o,
int hidden_size, Dtype* output, const Dtype* hidden_pre) {
int w_base_index = blockIdx.x * hidden_size * 3;
int h_base_index = blockIdx.x * hidden_size;
Dtype* in_w_x_r = w_x_r + w_base_index;
Dtype* in_w_h_r = w_h_r + w_base_index;
Dtype* in_w_x_z = w_x_z + w_base_index;
Dtype* in_w_h_z = w_h_z + w_base_index;
Dtype* in_w_x_o = w_x_o + w_base_index;
Dtype* in_w_h_o = w_h_o + w_base_index;
const Dtype* in_hidden_pre = hidden_pre + h_base_index;
Dtype* out_output = output + h_base_index;
for (int index = threadIdx.x; index < hidden_size; index += blockDim.x) {
Dtype before_act_r = in_w_x_r[index] + in_w_h_r[index] + b_r[index];
Dtype act_r = static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + expf(-before_act_r));
Dtype before_act_z = in_w_x_z[index] + in_w_h_z[index] + b_z[index];
Dtype act_z = static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + expf(-before_act_z));
Dtype before_act_h = in_w_x_o[index] + in_w_h_o[index] * act_r
+ b_o[index];
Dtype acted = tanhf(before_act_h);
out_output[index] = (static_cast<Dtype>(1.0) - act_z) * acted + act_z * in_hidden_pre[index];
}
}
template <typename Dtype>
__global__ void cal_one_kernel_sigmoid_tanh_index_modi(Dtype* w_x_r, Dtype* w_x_z, Dtype* w_x_o,
Dtype* w_h_r, Dtype* w_h_z, Dtype* w_h_o,
const Dtype* b_r, const Dtype* b_z, const Dtype* b_o,
int hidden_size, Dtype* output, const Dtype* hidden_pre,
int seq_batch_hidden, int batch_size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= seq_batch_hidden) {
return;
}
int batch_id = tid / hidden_size % batch_size;
int index = tid % hidden_size;
int w_base_index = batch_id * hidden_size * 3;
int h_base_index = batch_id * hidden_size;
int index_w = index + w_base_index;
int index_h = index + h_base_index;
{
Dtype before_act_r = w_x_r[index_w] + w_h_r[index_w] + b_r[index];
Dtype act_r = static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + expf(-before_act_r));
Dtype before_act_z = w_x_z[index_w] + w_h_z[index_w] + b_z[index];
Dtype act_z = static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + expf(-before_act_z));
Dtype before_act_h = w_x_o[index_w] + w_h_o[index_w] * act_r
+ b_o[index];
Dtype acted = tanhf(before_act_h);
output[index_h] = (static_cast<Dtype>(1.0) - act_z) * acted + act_z * hidden_pre[index_h];
}
}
template <typename Dtype>
__global__ void cal_one_kernel_sigmoid_tanh_index(Dtype* w_x_r, Dtype* w_x_z, Dtype* w_x_o,
Dtype* w_h_r, Dtype* w_h_z, Dtype* w_h_o,
const Dtype* b_r, const Dtype* b_z, const Dtype* b_o,
int hidden_size, Dtype* output, const Dtype* hidden_pre,
int seq_batch_hidden, int batch_size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= seq_batch_hidden) {
return;
}
int batch_id = tid / hidden_size % batch_size;
int index = tid % hidden_size;
int w_base_index = batch_id * hidden_size * 3;
int h_base_index = batch_id * hidden_size;
Dtype* in_w_x_r = w_x_r + w_base_index;
Dtype* in_w_h_r = w_h_r + w_base_index;
Dtype* in_w_x_z = w_x_z + w_base_index;
Dtype* in_w_h_z = w_h_z + w_base_index;
Dtype* in_w_x_o = w_x_o + w_base_index;
Dtype* in_w_h_o = w_h_o + w_base_index;
const Dtype* in_hidden_pre = hidden_pre + h_base_index;
Dtype* out_output = output + h_base_index;
{
Dtype before_act_r = in_w_x_r[index] + in_w_h_r[index] + b_r[index];
Dtype act_r = Dtype(Dtype(1) / (Dtype(1) + expf(-before_act_r)));
Dtype before_act_z = in_w_x_z[index] + in_w_h_z[index] + b_z[index];
Dtype act_z = Dtype(Dtype(1) / (Dtype(1) + expf(-before_act_z)));
Dtype before_act_h = in_w_x_o[index] + in_w_h_o[index] * act_r
+ b_o[index];
Dtype acted = tanhf(before_act_h);
out_output[index] = (1 - act_z) * acted + act_z * in_hidden_pre[index];
}
}
template <typename Dtype>
__global__ void cal_one_kernel_paddlesigmoid_relu_cudnn_formula(Dtype* w_x_r, Dtype* w_x_z,
Dtype* w_x_o,
Dtype* w_h_r, Dtype* w_h_z, Dtype* w_h_o,
const Dtype* b_r, const Dtype* b_z, const Dtype* b_o,
int hidden_size, Dtype* output, const Dtype* hidden_pre) {
int w_base_index = blockIdx.x * hidden_size * 3;
int h_base_index = blockIdx.x * hidden_size;
Dtype* in_w_x_r = w_x_r + w_base_index;
Dtype* in_w_h_r = w_h_r + w_base_index;
Dtype* in_w_x_z = w_x_z + w_base_index;
Dtype* in_w_h_z = w_h_z + w_base_index;
Dtype* in_w_x_o = w_x_o + w_base_index;
Dtype* in_w_h_o = w_h_o + w_base_index;
const Dtype* in_hidden_pre = hidden_pre + h_base_index;
Dtype* out_output = output + h_base_index;
for (int index = threadIdx.x; index < hidden_size; index += blockDim.x) {
const Dtype min = SIGMOID_THRESHOLD_MIN_PADDLE;
const Dtype max = SIGMOID_THRESHOLD_MAX_PADDLE;
Dtype before_act_r = in_w_x_r[index] + in_w_h_r[index] + b_r[index];
before_act_r = (before_act_r < min) ? min : ((before_act_r > max) ? max : before_act_r);
Dtype act_r = static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + exp(-before_act_r));
Dtype before_act_z = in_w_x_z[index] + in_w_h_z[index] + b_z[index];
before_act_z = (before_act_z < min) ? min : ((before_act_z > max) ? max : before_act_z);
Dtype act_z = static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + exp(-before_act_z));
Dtype before_act_h = in_w_x_o[index] + in_w_h_o[index] * act_r
+ b_o[index];
Dtype acted = before_act_h > static_cast<Dtype>(0.0) ? before_act_h : static_cast<Dtype>(0.0);
out_output[index] = (1 - act_z) * acted + act_z * in_hidden_pre[index];
}
}
template <typename Dtype>
__global__ void cal_one_kernel_sigmoid_tanh_modi(Dtype* w_x_r, Dtype* w_x_z, Dtype* w_x_o,
Dtype* w_h_r, Dtype* w_h_z, Dtype* w_h_o,
const Dtype* b_r, const Dtype* b_z, const Dtype* b_o,
int hidden_size, Dtype* output, const Dtype* hidden_pre) {
int w_base_index = blockIdx.x * hidden_size * 3 + threadIdx.x;
int h_base_index = blockIdx.x * hidden_size + threadIdx.x;
for (int index = threadIdx.x; index < hidden_size;
index += blockDim.x, w_base_index += blockDim.x, h_base_index += blockDim.x) {
Dtype before_act_r = w_x_r[w_base_index] + w_h_r[w_base_index] + b_r[index];
Dtype act_r = static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + expf(-before_act_r));
Dtype before_act_z = w_x_z[w_base_index] + w_h_z[w_base_index] + b_z[index];
Dtype act_z = static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + expf(-before_act_z));
Dtype before_act_h = w_x_o[w_base_index] + w_h_o[w_base_index] * act_r
+ b_o[index];
Dtype acted = tanhf(before_act_h);
output[h_base_index] = (static_cast<Dtype>(1.0) - act_z) * acted + act_z * hidden_pre[h_base_index];
}
}
template <>
SaberStatus SaberGru<NV, AK_FLOAT, AK_FLOAT, AK_FLOAT, NCHW, NCHW, NCHW>::gru_cudnn(
const std::vector<DataTensor_in*> inputs,
std::vector<DataTensor_out*> outputs,
GruParam<OpTensor>& param) {
DataTensor_in* x = inputs[0];
const InDataType* x_data = x->data();
std::vector<int> offset=x->get_seq_offset();
const InDataType* h;
DataTensor_out* dout = outputs[0];
OutDataType* dout_data = dout->mutable_data();
//TODO:check shape first
const OpTensor* b = param.bias();
int batch_size = offset.size() - 1;; //x->get_seq_offset().size()-1;
int sequence = x->num();
int hidden_size = b->valid_size() / 3;
bool isHW2Seq=offset.size()>2;
int o_offset = 0;
int r_offset = 1;
int z_offset = 2;
// CHECK_EQ(w_h2h->height(), hidden_size) << "w_h2h->height()==batch_size";
// CHECK_EQ(w_h2h->width(), hidden_size * 3) << "w_h2h->width()==hidden_size*3";
//
// CHECK_EQ(w_i2h->height(), word_size) << "w_i2h->height()==word_size";
// CHECK_EQ(w_i2h->width(), hidden_size * 3) << "w_i2h->width()==hidden_size*3";
if (isHW2Seq) {
x_data = hw2seq(inputs, param, _word_size, hidden_size, sequence);
batch_size = offset.size() - 1;
if (x_data != x->data()) {
dout_data = _temp_tensor_out.mutable_data();
}
}
Shape shape_wx(sequence, batch_size, 3, hidden_size);
_temp_WX.try_expand_size(shape_wx);
Shape shape_wh(1, batch_size, 3, hidden_size);
_temp_WH.try_expand_size(shape_wh);
anakin_NV_gemm(_cublas_handle, false, false, sequence * batch_size, 3 * hidden_size,
_word_size, 1.0, x_data, _weights_i2h.data(), 0.0, _temp_WX.mutable_data());
const OpDataType* b_r = b->data() + r_offset * hidden_size;
const OpDataType* b_z = b->data() + z_offset * hidden_size;
const OpDataType* b_o = b->data() + o_offset * hidden_size;
if (inputs.size() == 1) {
CUDA_CHECK(cudaMemsetAsync(dout_data, 0, sizeof(InDataType) * batch_size * hidden_size,
_ctx.get_compute_stream()));
h = dout_data;
} else {
h = inputs[1]->data();
CHECK_EQ(inputs[1]->valid_size(), batch_size * hidden_size) <<
"h size should be batch_size * hidden_size";
}
for (int seq = 0; seq < sequence; seq++) {
const InDataType* hidden_in;
InDataType* hidden_out = dout_data + seq * batch_size * hidden_size;
if (seq == 0) {
hidden_in = h;
} else {
hidden_in = dout_data + (seq - 1) * batch_size * hidden_size;
}
anakin_NV_gemm(_cublas_handle, false, false, batch_size,
3 * hidden_size, hidden_size, 1.0, hidden_in,
_weights_h2h.data(), 0.0, _temp_WH.mutable_data());
OpDataType* w_x_r = _temp_WX.mutable_data() + r_offset * hidden_size
+ seq * batch_size * hidden_size * 3;
OpDataType* w_x_z = _temp_WX.mutable_data() + z_offset * hidden_size
+ seq * batch_size * hidden_size * 3;
OpDataType* w_x_o = _temp_WX.mutable_data() + o_offset * hidden_size
+ seq * batch_size * hidden_size * 3;
OpDataType* w_h_r = _temp_WH.mutable_data() + r_offset * hidden_size;
OpDataType* w_h_z = _temp_WH.mutable_data() + z_offset * hidden_size;
OpDataType* w_h_o = _temp_WH.mutable_data() + o_offset * hidden_size;
int frame_per_block = hidden_size <= 1024 ? hidden_size : 1024;
if (param.gate_activity == Active_sigmoid
&& param.h_activity == Active_tanh) {
cal_one_kernel_sigmoid_tanh_modi_cudnn_formula
<< < batch_size, frame_per_block, 0, _ctx.get_compute_stream() >> >
(w_x_r, w_x_z, w_x_o, w_h_r, w_h_z, w_h_o
, b_r, b_z, b_o, hidden_size, hidden_out, hidden_in);
} else if (param.gate_activity == Active_sigmoid_fluid
&& param.h_activity == Active_tanh) {
cal_one_kernel_paddlesigmoid_tanh_cudnn_formula
<< < batch_size, frame_per_block, 0, _ctx.get_compute_stream() >> >
(w_x_r, w_x_z, w_x_o, w_h_r, w_h_z, w_h_o
, b_r, b_z, b_o, hidden_size, hidden_out, hidden_in);
} else if (param.gate_activity == Active_sigmoid_fluid
&& param.h_activity == Active_relu) {
cal_one_kernel_paddlesigmoid_relu_cudnn_formula
<< < batch_size, frame_per_block, 0, _ctx.get_compute_stream() >> >
(w_x_r, w_x_z, w_x_o, w_h_r, w_h_z, w_h_o
, b_r, b_z, b_o, hidden_size, hidden_out, hidden_in);
} else {
LOG(ERROR) << "not support active function";
}
}
if (isHW2Seq) {
seq2hw(outputs, inputs, param, hidden_size, dout_data);
outputs[0]->set_seq_offset(inputs[0]->get_seq_offset());
}
return SaberSuccess;
}
template<>
SaberStatus SaberGru<NV, AK_FLOAT, AK_FLOAT, AK_FLOAT, NCHW, NCHW, NCHW>::dispatch(\
const std::vector<DataTensor_in*>& inputs,
std::vector<DataTensor_out*>& outputs,
GruParam <OpTensor>& param) {
if (param.formula == GRU_CUDNN) {
LOG(ERROR) << "saber cudnn formula not support reverse yet";
if (param.is_reverse) {
LOG(ERROR) << "saber cudnn formula not support reverse yet";
}
return gru_cudnn(inputs, outputs, param);
}
// LOG(INFO)<<"gru_paddle";
DataTensor_in* x = inputs[0];
std::vector<int> offset=x->get_seq_offset();
const InDataType* x_data = x->data();
const InDataType* h;
DataTensor_out* dout = outputs[0];
OutDataType* dout_data = dout->mutable_data();
//TODO:check shape first
const OpTensor* b = param.bias();
int batch_size = offset.size() - 1; //x->get_seq_offset().size()-1;
int sequence = x->num();
int hidden_size = b->valid_size() / 3;
bool isHW2Seq=offset.size()>2;
int o_offset = 0;
int r_offset = 1;
int z_offset = 2;
// CHECK_EQ(w_h2h->height(), hidden_size) << "w_h2h->height()==batch_size";
// CHECK_EQ(w_h2h->width(), hidden_size * 3) << "w_h2h->width()==hidden_size*3";
//
// CHECK_EQ(w_i2h->height(), word_size) << "w_i2h->height()==word_size";
// CHECK_EQ(w_i2h->width(), hidden_size * 3) << "w_i2h->width()==hidden_size*3";
if (isHW2Seq) {
x_data = hw2seq(inputs, param, _word_size, hidden_size, sequence);
// batch_size = inputs[0]->get_seq_offset().size() - 1;
if (x_data != x->data()) {
dout_data = _temp_tensor_out.mutable_data();
}
}
Shape shape_WX(sequence, batch_size, 3, hidden_size);
_temp_WX.try_expand_size(shape_WX);
Shape shape_WH(1, batch_size, 2, hidden_size);
_temp_WH.try_expand_size(shape_WH);
anakin_NV_gemm(_cublas_handle, false, false, sequence * batch_size, 3 * hidden_size,
_word_size, 1.0, x_data, _weights_i2h.data(), 0.0, _temp_WX.mutable_data());
const OpDataType* b_r = b->data() + r_offset * hidden_size;
const OpDataType* b_z = b->data() + z_offset * hidden_size;
const OpDataType* b_o = b->data() + o_offset * hidden_size;
if (inputs.size() == 1) {
CUDA_CHECK(cudaMemsetAsync(dout_data, 0, sizeof(OutDataType)*batch_size * hidden_size,
_ctx.get_compute_stream()));
h = dout_data;
} else {
h = inputs[1]->data();
}
for (int seq = 0; seq < sequence; ++seq) {
int realseq = seq;
int last_seq = realseq - 1;
if (param.is_reverse) {
// DLOG(INFO)<<"reverse gru";
realseq = sequence - 1 - seq;
last_seq = realseq + 1;
}
const OutDataType* hidden_in;
OutDataType* hidden_out = dout_data + realseq * batch_size * hidden_size;
if (seq == 0) {
hidden_in = h;
} else {
hidden_in = dout_data + last_seq * batch_size * hidden_size;
}
anakin_NV_gemm(_cublas_handle, false, false, batch_size,
2 * hidden_size, hidden_size, 1.0, hidden_in,
_weights_h2h.data() + hidden_size * hidden_size, 0.0, _temp_WH.mutable_data());
OutDataType* w_x_r = _temp_WX.mutable_data() + r_offset * hidden_size
+ realseq * batch_size * hidden_size * 3;
OutDataType* w_x_z = _temp_WX.mutable_data() + z_offset * hidden_size
+ realseq * batch_size * hidden_size * 3;
OutDataType* w_x_o = _temp_WX.mutable_data() + o_offset * hidden_size
+ realseq * batch_size * hidden_size * 3;
OutDataType* w_h_r = _temp_WH.mutable_data() + 0 * hidden_size;
OutDataType* w_h_z = _temp_WH.mutable_data() + 1 * hidden_size;
const OpDataType * w_o = _weights_h2h.data();
CHECK_LE(hidden_size, 1024) << "now not support hidden size > 1024 for paddle formula";
int frame_per_block = hidden_size <= 1024 ? hidden_size : 1024;
// DLOG(INFO) << "act = " << param._gate_activity << "," << param._h_activity;
if (param.gate_activity == Active_sigmoid
&& param.h_activity == Active_tanh) {
cal_one_kernel_sigmoid_tanh_paddle_formula
<<< batch_size, frame_per_block, sizeof(OutDataType)*hidden_size
, _ctx.get_compute_stream()>>>(
w_x_r, w_x_z, w_x_o, w_h_r, w_h_z, w_o
, b_r, b_z, b_o, hidden_size, hidden_out, hidden_in);
} else if (param.gate_activity == Active_sigmoid_fluid
&& param.h_activity == Active_relu) {
cal_one_kernel_paddlesigmoid_relu_paddle_formula
<< < batch_size, frame_per_block, sizeof(OutDataType)*hidden_size
, _ctx.get_compute_stream() >> >
(w_x_r, w_x_z, w_x_o, w_h_r, w_h_z, w_o
, b_r, b_z, b_o, hidden_size, hidden_out, hidden_in);
} else {
LOG(ERROR) << "not support active function";
}
}
if (isHW2Seq) {
seq2hw(outputs, inputs, param, hidden_size, dout_data);
}
outputs[0]->set_seq_offset(inputs[0]->get_seq_offset());
return SaberSuccess;
}
}
}
|
c8f2b345149e6e611e794353e83faa3abc64f2a0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "srad.h"
#include <stdio.h>
#define NUM_ITER 500
__global__ void
srad(
float *E_C,
float *W_C,
float *N_C,
float *S_C,
float *J_cuda,
float *C_cuda,
int cols,
int rows,
float q0sqr
)
{
for (int k = 0; k < NUM_ITER; ++k) {
//block id
int bx = blockIdx.x;
int by = blockIdx.y;
//thread id
int tx = threadIdx.x;
int ty = threadIdx.y;
//indices
int index = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + tx;
int index_n = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + tx - cols;
int index_s = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * BLOCK_SIZE + tx;
int index_w = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty - 1;
int index_e = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + BLOCK_SIZE;
float n, w, e, s, jc, g2, l, num, den, qsqr, c;
//shared memory allocation
__shared__ float temp[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float temp_result[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float north[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float south[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float east[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float west[BLOCK_SIZE][BLOCK_SIZE];
//load data to shared memory
north[ty][tx] = J_cuda[index_n];
south[ty][tx] = J_cuda[index_s];
if ( by == 0 ){
north[ty][tx] = J_cuda[BLOCK_SIZE * bx + tx];
}
else if ( by == gridDim.y - 1 ){
south[ty][tx] = J_cuda[cols * BLOCK_SIZE * (gridDim.y - 1) + BLOCK_SIZE * bx + cols * ( BLOCK_SIZE - 1 ) + tx];
}
__syncthreads();
west[ty][tx] = J_cuda[index_w];
east[ty][tx] = J_cuda[index_e];
if ( bx == 0 ){
west[ty][tx] = J_cuda[cols * BLOCK_SIZE * by + cols * ty];
}
else if ( bx == gridDim.x - 1 ){
east[ty][tx] = J_cuda[cols * BLOCK_SIZE * by + BLOCK_SIZE * ( gridDim.x - 1) + cols * ty + BLOCK_SIZE-1];
}
__syncthreads();
temp[ty][tx] = J_cuda[index];
__syncthreads();
jc = temp[ty][tx];
if ( ty == 0 && tx == 0 ){ //nw
n = north[ty][tx] - jc;
s = temp[ty+1][tx] - jc;
w = west[ty][tx] - jc;
e = temp[ty][tx+1] - jc;
}
else if ( ty == 0 && tx == BLOCK_SIZE-1 ){ //ne
n = north[ty][tx] - jc;
s = temp[ty+1][tx] - jc;
w = temp[ty][tx-1] - jc;
e = east[ty][tx] - jc;
}
else if ( ty == BLOCK_SIZE -1 && tx == BLOCK_SIZE - 1){ //se
n = temp[ty-1][tx] - jc;
s = south[ty][tx] - jc;
w = temp[ty][tx-1] - jc;
e = east[ty][tx] - jc;
}
else if ( ty == BLOCK_SIZE -1 && tx == 0 ){//sw
n = temp[ty-1][tx] - jc;
s = south[ty][tx] - jc;
w = west[ty][tx] - jc;
e = temp[ty][tx+1] - jc;
}
else if ( ty == 0 ){ //n
n = north[ty][tx] - jc;
s = temp[ty+1][tx] - jc;
w = temp[ty][tx-1] - jc;
e = temp[ty][tx+1] - jc;
}
else if ( tx == BLOCK_SIZE -1 ){ //e
n = temp[ty-1][tx] - jc;
s = temp[ty+1][tx] - jc;
w = temp[ty][tx-1] - jc;
e = east[ty][tx] - jc;
}
else if ( ty == BLOCK_SIZE -1){ //s
n = temp[ty-1][tx] - jc;
s = south[ty][tx] - jc;
w = temp[ty][tx-1] - jc;
e = temp[ty][tx+1] - jc;
}
else if ( tx == 0 ){ //w
n = temp[ty-1][tx] - jc;
s = temp[ty+1][tx] - jc;
w = west[ty][tx] - jc;
e = temp[ty][tx+1] - jc;
}
else{ //the data elements which are not on the borders
n = temp[ty-1][tx] - jc;
s = temp[ty+1][tx] - jc;
w = temp[ty][tx-1] - jc;
e = temp[ty][tx+1] - jc;
}
g2 = ( n * n + s * s + w * w + e * e ) / (jc * jc);
l = ( n + s + w + e ) / jc;
num = (0.5*g2) - ((1.0/16.0)*(l*l)) ;
den = 1 + (.25*l);
qsqr = num/(den*den);
// diffusion coefficent (equ 33)
den = (qsqr-q0sqr) / (q0sqr * (1+q0sqr)) ;
c = 1.0 / (1.0+den) ;
// saturate diffusion coefficent
if (c < 0){temp_result[ty][tx] = 0;}
else if (c > 1) {temp_result[ty][tx] = 1;}
else {temp_result[ty][tx] = c;}
__syncthreads();
C_cuda[index] = temp_result[ty][tx];
E_C[index] = e;
W_C[index] = w;
S_C[index] = s;
N_C[index] = n;
}
}
__global__ void
srad2(
float *E_C,
float *W_C,
float *N_C,
float *S_C,
float *J_cuda,
float *C_cuda,
int cols,
int rows,
float lambda,
float q0sqr
)
{
for (int k = 0; k < NUM_ITER; ++k) {
//block id
int bx = blockIdx.x;
int by = blockIdx.y;
//thread id
int tx = threadIdx.x;
int ty = threadIdx.y;
//indices
int index = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + tx;
int index_s = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * BLOCK_SIZE + tx;
int index_e = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + BLOCK_SIZE;
float cc, cn, cs, ce, cw, d_sum;
//shared memory allocation
__shared__ float south_c[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float east_c[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float c_cuda_temp[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float c_cuda_result[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float temp[BLOCK_SIZE][BLOCK_SIZE];
//load data to shared memory
temp[ty][tx] = J_cuda[index];
__syncthreads();
south_c[ty][tx] = C_cuda[index_s];
if ( by == gridDim.y - 1 ){
south_c[ty][tx] = C_cuda[cols * BLOCK_SIZE * (gridDim.y - 1) + BLOCK_SIZE * bx + cols * ( BLOCK_SIZE - 1 ) + tx];
}
__syncthreads();
east_c[ty][tx] = C_cuda[index_e];
if ( bx == gridDim.x - 1 ){
east_c[ty][tx] = C_cuda[cols * BLOCK_SIZE * by + BLOCK_SIZE * ( gridDim.x - 1) + cols * ty + BLOCK_SIZE-1];
}
__syncthreads();
c_cuda_temp[ty][tx] = C_cuda[index];
__syncthreads();
cc = c_cuda_temp[ty][tx];
if ( ty == BLOCK_SIZE -1 && tx == BLOCK_SIZE - 1){ //se
cn = cc;
cs = south_c[ty][tx];
cw = cc;
ce = east_c[ty][tx];
}
else if ( tx == BLOCK_SIZE -1 ){ //e
cn = cc;
cs = c_cuda_temp[ty+1][tx];
cw = cc;
ce = east_c[ty][tx];
}
else if ( ty == BLOCK_SIZE -1){ //s
cn = cc;
cs = south_c[ty][tx];
cw = cc;
ce = c_cuda_temp[ty][tx+1];
}
else{ //the data elements which are not on the borders
cn = cc;
cs = c_cuda_temp[ty+1][tx];
cw = cc;
ce = c_cuda_temp[ty][tx+1];
}
// divergence (equ 58)
d_sum = cn * N_C[index] + cs * S_C[index] + cw * W_C[index] + ce * E_C[index];
// image update (equ 61)
c_cuda_result[ty][tx] = temp[ty][tx] + 0.25 * lambda * d_sum;
__syncthreads();
J_cuda[index] = c_cuda_result[ty][tx];
}
}
| c8f2b345149e6e611e794353e83faa3abc64f2a0.cu | #include "srad.h"
#include <stdio.h>
#define NUM_ITER 500
__global__ void
srad(
float *E_C,
float *W_C,
float *N_C,
float *S_C,
float *J_cuda,
float *C_cuda,
int cols,
int rows,
float q0sqr
)
{
for (int k = 0; k < NUM_ITER; ++k) {
//block id
int bx = blockIdx.x;
int by = blockIdx.y;
//thread id
int tx = threadIdx.x;
int ty = threadIdx.y;
//indices
int index = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + tx;
int index_n = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + tx - cols;
int index_s = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * BLOCK_SIZE + tx;
int index_w = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty - 1;
int index_e = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + BLOCK_SIZE;
float n, w, e, s, jc, g2, l, num, den, qsqr, c;
//shared memory allocation
__shared__ float temp[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float temp_result[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float north[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float south[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float east[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float west[BLOCK_SIZE][BLOCK_SIZE];
//load data to shared memory
north[ty][tx] = J_cuda[index_n];
south[ty][tx] = J_cuda[index_s];
if ( by == 0 ){
north[ty][tx] = J_cuda[BLOCK_SIZE * bx + tx];
}
else if ( by == gridDim.y - 1 ){
south[ty][tx] = J_cuda[cols * BLOCK_SIZE * (gridDim.y - 1) + BLOCK_SIZE * bx + cols * ( BLOCK_SIZE - 1 ) + tx];
}
__syncthreads();
west[ty][tx] = J_cuda[index_w];
east[ty][tx] = J_cuda[index_e];
if ( bx == 0 ){
west[ty][tx] = J_cuda[cols * BLOCK_SIZE * by + cols * ty];
}
else if ( bx == gridDim.x - 1 ){
east[ty][tx] = J_cuda[cols * BLOCK_SIZE * by + BLOCK_SIZE * ( gridDim.x - 1) + cols * ty + BLOCK_SIZE-1];
}
__syncthreads();
temp[ty][tx] = J_cuda[index];
__syncthreads();
jc = temp[ty][tx];
if ( ty == 0 && tx == 0 ){ //nw
n = north[ty][tx] - jc;
s = temp[ty+1][tx] - jc;
w = west[ty][tx] - jc;
e = temp[ty][tx+1] - jc;
}
else if ( ty == 0 && tx == BLOCK_SIZE-1 ){ //ne
n = north[ty][tx] - jc;
s = temp[ty+1][tx] - jc;
w = temp[ty][tx-1] - jc;
e = east[ty][tx] - jc;
}
else if ( ty == BLOCK_SIZE -1 && tx == BLOCK_SIZE - 1){ //se
n = temp[ty-1][tx] - jc;
s = south[ty][tx] - jc;
w = temp[ty][tx-1] - jc;
e = east[ty][tx] - jc;
}
else if ( ty == BLOCK_SIZE -1 && tx == 0 ){//sw
n = temp[ty-1][tx] - jc;
s = south[ty][tx] - jc;
w = west[ty][tx] - jc;
e = temp[ty][tx+1] - jc;
}
else if ( ty == 0 ){ //n
n = north[ty][tx] - jc;
s = temp[ty+1][tx] - jc;
w = temp[ty][tx-1] - jc;
e = temp[ty][tx+1] - jc;
}
else if ( tx == BLOCK_SIZE -1 ){ //e
n = temp[ty-1][tx] - jc;
s = temp[ty+1][tx] - jc;
w = temp[ty][tx-1] - jc;
e = east[ty][tx] - jc;
}
else if ( ty == BLOCK_SIZE -1){ //s
n = temp[ty-1][tx] - jc;
s = south[ty][tx] - jc;
w = temp[ty][tx-1] - jc;
e = temp[ty][tx+1] - jc;
}
else if ( tx == 0 ){ //w
n = temp[ty-1][tx] - jc;
s = temp[ty+1][tx] - jc;
w = west[ty][tx] - jc;
e = temp[ty][tx+1] - jc;
}
else{ //the data elements which are not on the borders
n = temp[ty-1][tx] - jc;
s = temp[ty+1][tx] - jc;
w = temp[ty][tx-1] - jc;
e = temp[ty][tx+1] - jc;
}
g2 = ( n * n + s * s + w * w + e * e ) / (jc * jc);
l = ( n + s + w + e ) / jc;
num = (0.5*g2) - ((1.0/16.0)*(l*l)) ;
den = 1 + (.25*l);
qsqr = num/(den*den);
// diffusion coefficent (equ 33)
den = (qsqr-q0sqr) / (q0sqr * (1+q0sqr)) ;
c = 1.0 / (1.0+den) ;
// saturate diffusion coefficent
if (c < 0){temp_result[ty][tx] = 0;}
else if (c > 1) {temp_result[ty][tx] = 1;}
else {temp_result[ty][tx] = c;}
__syncthreads();
C_cuda[index] = temp_result[ty][tx];
E_C[index] = e;
W_C[index] = w;
S_C[index] = s;
N_C[index] = n;
}
}
__global__ void
srad2(
float *E_C,
float *W_C,
float *N_C,
float *S_C,
float *J_cuda,
float *C_cuda,
int cols,
int rows,
float lambda,
float q0sqr
)
{
for (int k = 0; k < NUM_ITER; ++k) {
//block id
int bx = blockIdx.x;
int by = blockIdx.y;
//thread id
int tx = threadIdx.x;
int ty = threadIdx.y;
//indices
int index = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + tx;
int index_s = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * BLOCK_SIZE + tx;
int index_e = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + BLOCK_SIZE;
float cc, cn, cs, ce, cw, d_sum;
//shared memory allocation
__shared__ float south_c[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float east_c[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float c_cuda_temp[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float c_cuda_result[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float temp[BLOCK_SIZE][BLOCK_SIZE];
//load data to shared memory
temp[ty][tx] = J_cuda[index];
__syncthreads();
south_c[ty][tx] = C_cuda[index_s];
if ( by == gridDim.y - 1 ){
south_c[ty][tx] = C_cuda[cols * BLOCK_SIZE * (gridDim.y - 1) + BLOCK_SIZE * bx + cols * ( BLOCK_SIZE - 1 ) + tx];
}
__syncthreads();
east_c[ty][tx] = C_cuda[index_e];
if ( bx == gridDim.x - 1 ){
east_c[ty][tx] = C_cuda[cols * BLOCK_SIZE * by + BLOCK_SIZE * ( gridDim.x - 1) + cols * ty + BLOCK_SIZE-1];
}
__syncthreads();
c_cuda_temp[ty][tx] = C_cuda[index];
__syncthreads();
cc = c_cuda_temp[ty][tx];
if ( ty == BLOCK_SIZE -1 && tx == BLOCK_SIZE - 1){ //se
cn = cc;
cs = south_c[ty][tx];
cw = cc;
ce = east_c[ty][tx];
}
else if ( tx == BLOCK_SIZE -1 ){ //e
cn = cc;
cs = c_cuda_temp[ty+1][tx];
cw = cc;
ce = east_c[ty][tx];
}
else if ( ty == BLOCK_SIZE -1){ //s
cn = cc;
cs = south_c[ty][tx];
cw = cc;
ce = c_cuda_temp[ty][tx+1];
}
else{ //the data elements which are not on the borders
cn = cc;
cs = c_cuda_temp[ty+1][tx];
cw = cc;
ce = c_cuda_temp[ty][tx+1];
}
// divergence (equ 58)
d_sum = cn * N_C[index] + cs * S_C[index] + cw * W_C[index] + ce * E_C[index];
// image update (equ 61)
c_cuda_result[ty][tx] = temp[ty][tx] + 0.25 * lambda * d_sum;
__syncthreads();
J_cuda[index] = c_cuda_result[ty][tx];
}
}
|
c88fe0e4136ee57559fe35e5e306737774de8e53.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
#ifdef _MSC_VER
#define kBNLL_THRESHOLD 50.
#else
const float kBNLL_THRESHOLD = 50.;
#endif
template <typename Dtype>
__global__ void BNLLForward(const int n, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > 0 ?
in[index] + log(1. + exp(-in[index])) :
log(1. + exp(in[index]));
}
}
template <typename Dtype>
void BNLLLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( BNLLForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, top_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void BNLLBackward(const int n, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
Dtype expval = exp(min(in_data[index], Dtype(kBNLL_THRESHOLD)));
out_diff[index] = in_diff[index] * expval / (expval + 1.);
}
}
template <typename Dtype>
void BNLLLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( BNLLBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, bottom_data, bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(BNLLLayer);
} // namespace caffe
| c88fe0e4136ee57559fe35e5e306737774de8e53.cu | #include <algorithm>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
#ifdef _MSC_VER
#define kBNLL_THRESHOLD 50.
#else
const float kBNLL_THRESHOLD = 50.;
#endif
template <typename Dtype>
__global__ void BNLLForward(const int n, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > 0 ?
in[index] + log(1. + exp(-in[index])) :
log(1. + exp(in[index]));
}
}
template <typename Dtype>
void BNLLLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
BNLLForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, top_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void BNLLBackward(const int n, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
Dtype expval = exp(min(in_data[index], Dtype(kBNLL_THRESHOLD)));
out_diff[index] = in_diff[index] * expval / (expval + 1.);
}
}
template <typename Dtype>
void BNLLLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
BNLLBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, bottom_data, bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(BNLLLayer);
} // namespace caffe
|
814b4b88b25088a07b987051a6c7c9a0304c12e7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "gpu_runtime.h"
extern __global__ void array_set_kernel(float *output, float value, size_t size);
extern __global__ void float_memory_copy(float *A, const float *B, size_t len);
extern int Float_Add(float *A, const float *B, int len, DLStreamHandle stream_handle);
__global__ void im2col_kernel(int N, int C, int H, int W, int filter_outChannel, int filter_H, int filter_W, const float *input_data_x, float *workspace_data, const int padding, const int stride, const int blocks) {
int block_id = blockIdx.x;
int thread_id = threadIdx.x;
int max_threads_per_block = blockDim.x;
int thread_index = block_id * max_threads_per_block + thread_id;
int out_H = (H + 2 * padding - filter_H) / stride + 1;
int out_W = (W + 2 * padding - filter_W) / stride + 1;
for (int i = thread_index; i < N * C * out_H * out_W; i += blocks * max_threads_per_block)
{
int N_i = i / (C * out_H * out_W);
int base_N = N_i * C * out_H * out_W;
int C_i = (i - base_N) / (out_H * out_W);
int base_C = C_i * out_H * out_W;
int out_H_i = (i - base_N - base_C) / out_W;
int out_W_i = i % out_W;
assert(base_N + base_C + out_H_i * out_W + out_W_i == i);
int in_x = out_H_i * stride - padding;
int in_y = out_W_i * stride - padding;
for (int x = in_x; x < in_x + filter_H; x++)
for(int y = in_y; y < in_y + filter_W; y++)
{
if( x<0 || x>=H || y<0 || y>=W)
workspace_data[(base_N + base_C) * filter_H * filter_W + ((x - in_x) * filter_W + (y - in_y)) * out_H * out_W + out_H_i * out_W + out_W_i] = 0;
else
workspace_data[(base_N + base_C) * filter_H * filter_W + ((x - in_x) * filter_W + (y - in_y)) * out_H * out_W + out_H_i * out_W + out_W_i] = input_data_x[ (N_i * C + C_i) * H * W + x * W + y];
}
}
}
__global__ void gemm_kernel(const float *A, const float *B, float *C, int rowA, int colA, int rowB, int colB) {
int r = blockIdx.y * blockDim.y + threadIdx.y;
int c = blockIdx.x * blockDim.x + threadIdx.x;
assert (rowB % colA == 0);
int K = rowB / colA;
if (r >= rowA || c >= colB) return;
for (int k = 0; k < K; k++)
{
float Cvalue = 0.0;
for (int e = 0; e < colA; e++)
Cvalue += A[r * colA + e] * B[(e + k * colA) * colB + c];
C[ (r + k * rowA) * colB + c] = Cvalue;
}
}
int DLGpuConv2d(const DLArrayHandle input_x, const DLArrayHandle input_f, DLArrayHandle output, DLArrayHandle workspace_arr, const int padding, const int stride, DLStreamHandle stream_handle = NULL, ProfilerHandle p = NULL){
assert(input_x->ndim == 4);
assert(input_f->ndim == 4);
assert(input_x->shape[1] == input_f->shape[1]);
int N = input_x->shape[0];
int C = input_x->shape[1];
int H = input_x->shape[2];
int W = input_x->shape[3];
int filter_outChannel = input_f->shape[0];
// int filter_inChannel = input_f->shape[1];
int filter_H = input_f->shape[2];
int filter_W = input_f->shape[3];
assert((H + 2 * padding - filter_H) % stride == 0);
assert((W + 2 * padding - filter_W) % stride == 0);
int out_H = (H + 2 * padding - filter_H) / stride + 1;
int out_W = (W + 2 * padding - filter_W) / stride + 1;
int y_col_size = out_H * out_W;
int y_row_size = C * filter_H * filter_W;
const float *input_data_x = (const float *)input_x->data;
const float *input_data_f = (const float *)input_f->data;
float *output_data = (float *)output->data;
float *workspace_data = (float *)workspace_arr->data;
// get max threads and blocks
int dev_id = (input_x->ctx).device_id;;
hipSetDevice(dev_id);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev_id);
int threads = deviceProp.maxThreadsPerBlock;
int blocks = deviceProp.maxThreadsPerMultiProcessor/threads*deviceProp.multiProcessorCount;
// im2col kernel
if (stream_handle)
hipLaunchKernelGGL(( im2col_kernel), dim3(blocks), dim3(threads), 0, *(hipStream_t*)stream_handle->handle, N, C, H, W, filter_outChannel, filter_H, filter_W, input_data_x, workspace_data, padding, stride, blocks);
else
hipLaunchKernelGGL(( im2col_kernel), dim3(blocks), dim3(threads), 0, 0, N, C, H, W, filter_outChannel, filter_H, filter_W, input_data_x, workspace_data, padding, stride, blocks);
// sgemm
const int BLOCK_SIZE = 16;
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((::max(y_row_size, y_col_size) + dimBlock.x - 1) / dimBlock.x, (::max(filter_outChannel, y_row_size) + dimBlock.y - 1) / dimBlock.y);
if (stream_handle)
hipLaunchKernelGGL(( gemm_kernel), dim3(dimGrid), dim3(dimBlock), 0, *(hipStream_t*)stream_handle->handle, input_data_f, workspace_data, output_data, filter_outChannel, y_row_size, N * y_row_size, y_col_size);
else
hipLaunchKernelGGL(( gemm_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, input_data_f, workspace_data, output_data, filter_outChannel, y_row_size, N * y_row_size, y_col_size);
if(p != NULL){
int size_input_x = 1, size_input_f = 1, size_output = 1, size_workspace = 1;
for(int i = 0; i < input_x -> ndim; i++)
size_input_x *= input_x -> shape[i];
for(int i = 0; i < input_f -> ndim; i++)
size_input_f *= input_f -> shape[i];
for(int i = 0; i < output -> ndim; i++)
size_output *= output -> shape[i];
for(int i = 0; i < workspace_arr -> ndim; i++)
size_workspace *= workspace_arr -> shape[i];
p -> input_memory = 1.0 * (size_input_x + size_input_f) * sizeof(float) / 1024 / 1024;
p -> output_memory = 1.0 * size_output * sizeof(float) / 1024 / 1024;
p -> workspace_memory = 1.0 * size_workspace * sizeof(float) / 1024 / 1024;
}
return 0;
}
__global__ void trans_im2col_kernel(int N, int C, int H, int W, int filter_outChannel, int filter_H, int filter_W, float *input_data_x, float *workspace_data, const int padding, const int stride, const int blocks) {
int block_id = blockIdx.x;
int thread_id = threadIdx.x;
int max_threads_per_block = blockDim.x;
int thread_index = block_id * max_threads_per_block + thread_id;
int out_H = (H + 2 * padding - filter_H) / stride + 1;
int out_W = (W + 2 * padding - filter_W) / stride + 1;
for (int i = thread_index; i < N * C * out_H * out_W; i += blocks * max_threads_per_block)
{
int N_i = i / (C * out_H * out_W);
int base_N = N_i * C * out_H * out_W;
int C_i = (i - base_N) / (out_H * out_W);
int base_C = C_i * out_H * out_W;
int out_H_i = (i - base_N - base_C) / out_W;
int out_W_i = i % out_W;
assert(base_N + base_C + out_H_i * out_W + out_W_i == i);
int in_x = out_H_i * stride - padding;
int in_y = out_W_i * stride - padding;
for (int x = in_x; x < in_x + filter_H; x++)
for(int y = in_y; y < in_y + filter_W; y++)
{
if( x<0 || x>=H || y<0 || y>=W)
workspace_data[(base_N + base_C) * filter_H * filter_W + ((x - in_x) * filter_W + (y - in_y)) * out_H * out_W + out_H_i * out_W + out_W_i] = 0;
else
atomicAdd(&input_data_x[ (N_i * C + C_i) * H * W + x * W + y],workspace_data[(base_N + base_C) * filter_H * filter_W + ((x - in_x) * filter_W + (y - in_y)) * out_H * out_W + out_H_i * out_W + out_W_i]);
// input_data_x[ (N_i * C + C_i) * H * W + x * W + y] += workspace_data[(base_N + base_C) * filter_H * filter_W + ((x - in_x) * filter_W + (y - in_y)) * out_H * out_W + out_H_i * out_W + out_W_i];
}
}
}
__global__ void transA_gemm_kernel(const float * A,const float * B, float *C, int rowA, int colA,int rowB, int colB){
size_t r = blockIdx.x * blockDim.x + threadIdx.x;
size_t c = blockIdx.y * blockDim.y + threadIdx.y;
if(r >= colA || c >= colB) return ;
assert (rowB % rowA == 0);
size_t batch_size = rowB / rowA;
// output shape(output_batch, filter_col_size, output_col_size)
for(int i = 0 ; i < batch_size; i++){
float tmp = 0;
// C[batch_size][colA][colB] -> C[i][r][c]
for(int j = 0; j < rowA; j++)
//A[j][r] * B[i][j][c]
tmp += A[j * colA + r] * B[i * rowA * colB + j * colB + c];
C[i * colA * colB + r * colB + c] = tmp;
}
}
__global__ void batch_transB_gemm_kernel(const float * A,const float * B, float *C, int rowA, int colA,int rowB, int colB, int batch_size){
size_t r = blockIdx.x * blockDim.x + threadIdx.x;
size_t c = blockIdx.y * blockDim.y + threadIdx.y;
if(r >= rowA || c >= rowB) return ;
assert (colA == colB);
// output shape(batch_size, filter_row_size, filter_col_size)
for(int i = 0 ; i < batch_size; i++){
float tmp = 0;
// C[batch_size][rowA][rowB] -> C[i][r][c]
for(int j = 0; j < colA; j++)
//A[i][r][j] * B[i][c][j]
tmp += A[i * rowA * colB + r *colB + j] * B[i * rowB * colB + c * colB + j];
C[i * rowA * rowB + r * rowB + c] = tmp;
}
}
int DLGpuConv2d_Gradient_of_Data(const DLArrayHandle input_f, const DLArrayHandle gradient_y, DLArrayHandle gradient_x,DLArrayHandle workspace_im2col,const int padding, const int stride, DLStreamHandle stream_handle = NULL, ProfilerHandle p = NULL){
size_t input_N = gradient_x->shape[0];
size_t input_C = gradient_x->shape[1];
size_t input_H = gradient_x->shape[2];
size_t input_W = gradient_x->shape[3];
size_t filter_outChannel = input_f->shape[0];
size_t filter_inChannel = input_f->shape[1];
size_t filter_H = input_f->shape[2];
size_t filter_W = input_f->shape[3];
size_t output_N = gradient_y->shape[0];
size_t output_C = gradient_y->shape[1];
size_t output_H = gradient_y->shape[2];
size_t output_W = gradient_y->shape[3];
float *gradient_x_data = (float *)gradient_x -> data;
float *output_data = (float*)gradient_y -> data;
size_t output_batch = output_N;
size_t output_row_size = output_C;
size_t output_col_size = output_H * output_W;
const float *filter_data = (const float*)input_f -> data;
size_t filter_row_size = filter_outChannel;
size_t filter_col_size = filter_inChannel * filter_H * filter_W;
float *gradient_im2col_XX;
gradient_im2col_XX = (float *)workspace_im2col->data;
// output size (output_N, filter_C * filter_H * filter_W, output_H * output*W) == (output_batch, filter_col_size, output_col_size)
const int BLOCK_SIZE = 16;
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((filter_col_size + BLOCK_SIZE - 1) / dimBlock.x, (output_col_size + BLOCK_SIZE - 1) / dimBlock.y);
if (stream_handle)
hipLaunchKernelGGL(( transA_gemm_kernel), dim3(dimGrid), dim3(dimBlock), 0, *(hipStream_t*)stream_handle->handle, filter_data, output_data, gradient_im2col_XX, filter_row_size, filter_col_size, output_batch * output_row_size, output_col_size);
else
hipLaunchKernelGGL(( transA_gemm_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, filter_data, output_data, gradient_im2col_XX, filter_row_size, filter_col_size, output_batch * output_row_size, output_col_size);
// get max threads and blocks
int dev_id = (input_f->ctx).device_id;
hipSetDevice(dev_id);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev_id);
int threads = deviceProp.maxThreadsPerBlock;
int blocks = deviceProp.maxThreadsPerMultiProcessor/threads*deviceProp.multiProcessorCount;
// get the gradient of input_x
size_t numthread = input_N * input_C * input_H * input_W;
size_t numblocks = (numthread + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK;
if (stream_handle)
hipLaunchKernelGGL(( array_set_kernel), dim3(numblocks) , dim3(THREADS_PER_BLOCK) , 0, *(hipStream_t*)stream_handle->handle, gradient_x_data , 0 ,numthread);
else
hipLaunchKernelGGL(( array_set_kernel), dim3(numblocks) , dim3(THREADS_PER_BLOCK) , 0, 0, gradient_x_data , 0 ,numthread);
if (stream_handle)
hipLaunchKernelGGL(( trans_im2col_kernel), dim3(blocks), dim3(threads), 0, *(hipStream_t*)stream_handle->handle, input_N, input_C, input_H, input_W, filter_outChannel, filter_H, filter_W, gradient_x_data, gradient_im2col_XX, padding, stride, blocks);
else
hipLaunchKernelGGL(( trans_im2col_kernel), dim3(blocks), dim3(threads), 0, 0, input_N, input_C, input_H, input_W, filter_outChannel, filter_H, filter_W, gradient_x_data, gradient_im2col_XX, padding, stride, blocks);
if(p != NULL){
int size_input_f = 1, size_grad_y = 1, size_output = 1, size_workspace = 1;
for(int i = 0; i < input_f -> ndim; i++)
size_input_f *= input_f -> shape[i];
for(int i = 0; i < gradient_y -> ndim; i++)
size_grad_y *= gradient_y -> shape[i];
for(int i = 0; i < gradient_x -> ndim; i++)
size_output *= gradient_x -> shape[i];
for(int i = 0; i < workspace_im2col -> ndim; i++)
size_workspace *= workspace_im2col -> shape[i];
p -> input_memory = 1.0 * (size_input_f + size_grad_y) * sizeof(float) / 1024 / 1024;
p -> output_memory = 1.0 * size_output * sizeof(float) / 1024 / 1024;
p -> workspace_memory = 1.0 * size_workspace * sizeof(float) / 1024 / 1024;
}
return 0;
}
int DLGpuConv2d_Gradient_of_Filter(const DLArrayHandle input_x, const DLArrayHandle gradient_y, DLArrayHandle gradient_f ,DLArrayHandle workspace_im2col,DLArrayHandle workspace_batch_filter,const int padding, const int stride, DLStreamHandle stream_handle = NULL, ProfilerHandle p = NULL){
size_t input_N = input_x->shape[0];
size_t input_C = input_x->shape[1];
size_t input_H = input_x->shape[2];
size_t input_W = input_x->shape[3];
size_t filter_outChannel = gradient_f->shape[0];
size_t filter_inChannel = gradient_f->shape[1];
size_t filter_H = gradient_f->shape[2];
size_t filter_W = gradient_f->shape[3];
size_t output_N = gradient_y->shape[0];
size_t output_C = gradient_y->shape[1];
size_t output_H = gradient_y->shape[2];
size_t output_W = gradient_y->shape[3];
const float * input_x_data = (const float*)input_x ->data;
float *gradient_f_data = (float *)gradient_f -> data;
float *output_data = (float*)gradient_y -> data;
size_t output_batch = output_N;
size_t output_row_size = output_C;
size_t output_col_size = output_H * output_W;
size_t filter_row_size = filter_outChannel;
size_t filter_col_size = filter_inChannel * filter_H * filter_W;
// get max threads and blocks
int dev_id = (input_x->ctx).device_id;
hipSetDevice(dev_id);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev_id);
int threads = deviceProp.maxThreadsPerBlock;
int blocks = deviceProp.maxThreadsPerMultiProcessor/threads*deviceProp.multiProcessorCount;
float *im2col_XX;
im2col_XX = (float *)workspace_im2col->data;
if (stream_handle)
hipLaunchKernelGGL(( im2col_kernel), dim3(blocks), dim3(threads), 0, *(hipStream_t*)stream_handle->handle, input_N, input_C, input_H, input_W, filter_outChannel, filter_H, filter_W, input_x_data, im2col_XX, padding, stride, blocks);
else
hipLaunchKernelGGL(( im2col_kernel), dim3(blocks), dim3(threads), 0, 0, input_N, input_C, input_H, input_W, filter_outChannel, filter_H, filter_W, input_x_data, im2col_XX, padding, stride, blocks);
size_t im2col_XX_row = filter_col_size;
size_t im2col_XX_col = output_col_size;
float *batch_filter;
// batch_filter = new float[input_N * filter_row_size * filter_col_size];
batch_filter = (float *)(workspace_batch_filter->data);
const int BLOCK_SIZE = 16;
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((filter_row_size + BLOCK_SIZE - 1)/dimBlock.x, (filter_col_size + BLOCK_SIZE - 1)/dimBlock.y);
if (stream_handle)
hipLaunchKernelGGL(( batch_transB_gemm_kernel), dim3(dimGrid), dim3(dimBlock), 0, *(hipStream_t*)stream_handle->handle, output_data, im2col_XX, batch_filter, output_row_size, output_col_size, im2col_XX_row, im2col_XX_col,output_batch);
else
hipLaunchKernelGGL(( batch_transB_gemm_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, output_data, im2col_XX, batch_filter, output_row_size, output_col_size, im2col_XX_row, im2col_XX_col,output_batch);
size_t total = filter_row_size * filter_col_size;
while(output_batch !=1 ){
Float_Add(batch_filter, batch_filter + (output_batch + 1)/2*total, output_batch / 2*total, stream_handle);
output_batch = (output_batch + 1)/2;
}
size_t BLOCKS = (total + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
if (stream_handle)
hipLaunchKernelGGL(( float_memory_copy), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), 0, *(hipStream_t*)stream_handle->handle, gradient_f_data, batch_filter, total);
else
hipLaunchKernelGGL(( float_memory_copy), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, gradient_f_data, batch_filter, total);
if(p != NULL){
int size_input_x = 1, size_grad_y = 1, size_output = 1, size_workspace_im2col = 1, size_workspace_batch_filter = 1;
for(int i = 0; i < input_x -> ndim; i++)
size_input_x *= input_x -> shape[i];
for(int i = 0; i < gradient_y -> ndim; i++)
size_grad_y *= gradient_y -> shape[i];
for(int i = 0; i < gradient_f -> ndim; i++)
size_output *= gradient_f -> shape[i];
for(int i = 0; i < workspace_im2col -> ndim; i++)
size_workspace_im2col *= workspace_im2col -> shape[i];
for(int i = 0; i < workspace_batch_filter -> ndim; i++)
size_workspace_batch_filter *= workspace_batch_filter -> shape[i];
p -> input_memory = 1.0 * (size_input_x + size_grad_y) * sizeof(float) / 1024 / 1024;
p -> output_memory = 1.0 * size_output * sizeof(float) / 1024 / 1024;
p -> workspace_memory = 1.0 * (size_workspace_im2col + size_workspace_batch_filter) * sizeof(float) / 1024 / 1024;
}
return 0;
}
// void print(const float *data, int n, int c, int h, int w) {
// std::vector<float> buffer(1 << 20);
// CUDA_CALL(hipMemcpy(
// buffer.data(), data,
// n * c * h * w * sizeof(float),
// hipMemcpyDeviceToHost));
// int a = 0;
// for (int i = 0; i < n; ++i) {
// for (int j = 0; j < c; ++j) {
// std::cout << "n=" << i << ", c=" << j << ":" << std::endl;
// for (int k = 0; k < h; ++k) {
// for (int l = 0; l < w; ++l) {
// std::cout << std::right << buffer[a]<<" ";
// ++a;
// }
// std::cout << std::endl;
// }
// }
// }
// std::cout << std::endl;
// }
| 814b4b88b25088a07b987051a6c7c9a0304c12e7.cu | #include "gpu_runtime.h"
extern __global__ void array_set_kernel(float *output, float value, size_t size);
extern __global__ void float_memory_copy(float *A, const float *B, size_t len);
extern int Float_Add(float *A, const float *B, int len, DLStreamHandle stream_handle);
__global__ void im2col_kernel(int N, int C, int H, int W, int filter_outChannel, int filter_H, int filter_W, const float *input_data_x, float *workspace_data, const int padding, const int stride, const int blocks) {
int block_id = blockIdx.x;
int thread_id = threadIdx.x;
int max_threads_per_block = blockDim.x;
int thread_index = block_id * max_threads_per_block + thread_id;
int out_H = (H + 2 * padding - filter_H) / stride + 1;
int out_W = (W + 2 * padding - filter_W) / stride + 1;
for (int i = thread_index; i < N * C * out_H * out_W; i += blocks * max_threads_per_block)
{
int N_i = i / (C * out_H * out_W);
int base_N = N_i * C * out_H * out_W;
int C_i = (i - base_N) / (out_H * out_W);
int base_C = C_i * out_H * out_W;
int out_H_i = (i - base_N - base_C) / out_W;
int out_W_i = i % out_W;
assert(base_N + base_C + out_H_i * out_W + out_W_i == i);
int in_x = out_H_i * stride - padding;
int in_y = out_W_i * stride - padding;
for (int x = in_x; x < in_x + filter_H; x++)
for(int y = in_y; y < in_y + filter_W; y++)
{
if( x<0 || x>=H || y<0 || y>=W)
workspace_data[(base_N + base_C) * filter_H * filter_W + ((x - in_x) * filter_W + (y - in_y)) * out_H * out_W + out_H_i * out_W + out_W_i] = 0;
else
workspace_data[(base_N + base_C) * filter_H * filter_W + ((x - in_x) * filter_W + (y - in_y)) * out_H * out_W + out_H_i * out_W + out_W_i] = input_data_x[ (N_i * C + C_i) * H * W + x * W + y];
}
}
}
__global__ void gemm_kernel(const float *A, const float *B, float *C, int rowA, int colA, int rowB, int colB) {
int r = blockIdx.y * blockDim.y + threadIdx.y;
int c = blockIdx.x * blockDim.x + threadIdx.x;
assert (rowB % colA == 0);
int K = rowB / colA;
if (r >= rowA || c >= colB) return;
for (int k = 0; k < K; k++)
{
float Cvalue = 0.0;
for (int e = 0; e < colA; e++)
Cvalue += A[r * colA + e] * B[(e + k * colA) * colB + c];
C[ (r + k * rowA) * colB + c] = Cvalue;
}
}
int DLGpuConv2d(const DLArrayHandle input_x, const DLArrayHandle input_f, DLArrayHandle output, DLArrayHandle workspace_arr, const int padding, const int stride, DLStreamHandle stream_handle = NULL, ProfilerHandle p = NULL){
assert(input_x->ndim == 4);
assert(input_f->ndim == 4);
assert(input_x->shape[1] == input_f->shape[1]);
int N = input_x->shape[0];
int C = input_x->shape[1];
int H = input_x->shape[2];
int W = input_x->shape[3];
int filter_outChannel = input_f->shape[0];
// int filter_inChannel = input_f->shape[1];
int filter_H = input_f->shape[2];
int filter_W = input_f->shape[3];
assert((H + 2 * padding - filter_H) % stride == 0);
assert((W + 2 * padding - filter_W) % stride == 0);
int out_H = (H + 2 * padding - filter_H) / stride + 1;
int out_W = (W + 2 * padding - filter_W) / stride + 1;
int y_col_size = out_H * out_W;
int y_row_size = C * filter_H * filter_W;
const float *input_data_x = (const float *)input_x->data;
const float *input_data_f = (const float *)input_f->data;
float *output_data = (float *)output->data;
float *workspace_data = (float *)workspace_arr->data;
// get max threads and blocks
int dev_id = (input_x->ctx).device_id;;
cudaSetDevice(dev_id);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev_id);
int threads = deviceProp.maxThreadsPerBlock;
int blocks = deviceProp.maxThreadsPerMultiProcessor/threads*deviceProp.multiProcessorCount;
// im2col kernel
if (stream_handle)
im2col_kernel<<<blocks, threads, 0, *(cudaStream_t*)stream_handle->handle>>>(N, C, H, W, filter_outChannel, filter_H, filter_W, input_data_x, workspace_data, padding, stride, blocks);
else
im2col_kernel<<<blocks, threads>>>(N, C, H, W, filter_outChannel, filter_H, filter_W, input_data_x, workspace_data, padding, stride, blocks);
// sgemm
const int BLOCK_SIZE = 16;
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((std::max(y_row_size, y_col_size) + dimBlock.x - 1) / dimBlock.x, (std::max(filter_outChannel, y_row_size) + dimBlock.y - 1) / dimBlock.y);
if (stream_handle)
gemm_kernel<<<dimGrid, dimBlock, 0, *(cudaStream_t*)stream_handle->handle>>>(input_data_f, workspace_data, output_data, filter_outChannel, y_row_size, N * y_row_size, y_col_size);
else
gemm_kernel<<<dimGrid, dimBlock>>>(input_data_f, workspace_data, output_data, filter_outChannel, y_row_size, N * y_row_size, y_col_size);
if(p != NULL){
int size_input_x = 1, size_input_f = 1, size_output = 1, size_workspace = 1;
for(int i = 0; i < input_x -> ndim; i++)
size_input_x *= input_x -> shape[i];
for(int i = 0; i < input_f -> ndim; i++)
size_input_f *= input_f -> shape[i];
for(int i = 0; i < output -> ndim; i++)
size_output *= output -> shape[i];
for(int i = 0; i < workspace_arr -> ndim; i++)
size_workspace *= workspace_arr -> shape[i];
p -> input_memory = 1.0 * (size_input_x + size_input_f) * sizeof(float) / 1024 / 1024;
p -> output_memory = 1.0 * size_output * sizeof(float) / 1024 / 1024;
p -> workspace_memory = 1.0 * size_workspace * sizeof(float) / 1024 / 1024;
}
return 0;
}
__global__ void trans_im2col_kernel(int N, int C, int H, int W, int filter_outChannel, int filter_H, int filter_W, float *input_data_x, float *workspace_data, const int padding, const int stride, const int blocks) {
int block_id = blockIdx.x;
int thread_id = threadIdx.x;
int max_threads_per_block = blockDim.x;
int thread_index = block_id * max_threads_per_block + thread_id;
int out_H = (H + 2 * padding - filter_H) / stride + 1;
int out_W = (W + 2 * padding - filter_W) / stride + 1;
for (int i = thread_index; i < N * C * out_H * out_W; i += blocks * max_threads_per_block)
{
int N_i = i / (C * out_H * out_W);
int base_N = N_i * C * out_H * out_W;
int C_i = (i - base_N) / (out_H * out_W);
int base_C = C_i * out_H * out_W;
int out_H_i = (i - base_N - base_C) / out_W;
int out_W_i = i % out_W;
assert(base_N + base_C + out_H_i * out_W + out_W_i == i);
int in_x = out_H_i * stride - padding;
int in_y = out_W_i * stride - padding;
for (int x = in_x; x < in_x + filter_H; x++)
for(int y = in_y; y < in_y + filter_W; y++)
{
if( x<0 || x>=H || y<0 || y>=W)
workspace_data[(base_N + base_C) * filter_H * filter_W + ((x - in_x) * filter_W + (y - in_y)) * out_H * out_W + out_H_i * out_W + out_W_i] = 0;
else
atomicAdd(&input_data_x[ (N_i * C + C_i) * H * W + x * W + y],workspace_data[(base_N + base_C) * filter_H * filter_W + ((x - in_x) * filter_W + (y - in_y)) * out_H * out_W + out_H_i * out_W + out_W_i]);
// input_data_x[ (N_i * C + C_i) * H * W + x * W + y] += workspace_data[(base_N + base_C) * filter_H * filter_W + ((x - in_x) * filter_W + (y - in_y)) * out_H * out_W + out_H_i * out_W + out_W_i];
}
}
}
__global__ void transA_gemm_kernel(const float * A,const float * B, float *C, int rowA, int colA,int rowB, int colB){
size_t r = blockIdx.x * blockDim.x + threadIdx.x;
size_t c = blockIdx.y * blockDim.y + threadIdx.y;
if(r >= colA || c >= colB) return ;
assert (rowB % rowA == 0);
size_t batch_size = rowB / rowA;
// output shape(output_batch, filter_col_size, output_col_size)
for(int i = 0 ; i < batch_size; i++){
float tmp = 0;
// C[batch_size][colA][colB] -> C[i][r][c]
for(int j = 0; j < rowA; j++)
//A[j][r] * B[i][j][c]
tmp += A[j * colA + r] * B[i * rowA * colB + j * colB + c];
C[i * colA * colB + r * colB + c] = tmp;
}
}
__global__ void batch_transB_gemm_kernel(const float * A,const float * B, float *C, int rowA, int colA,int rowB, int colB, int batch_size){
size_t r = blockIdx.x * blockDim.x + threadIdx.x;
size_t c = blockIdx.y * blockDim.y + threadIdx.y;
if(r >= rowA || c >= rowB) return ;
assert (colA == colB);
// output shape(batch_size, filter_row_size, filter_col_size)
for(int i = 0 ; i < batch_size; i++){
float tmp = 0;
// C[batch_size][rowA][rowB] -> C[i][r][c]
for(int j = 0; j < colA; j++)
//A[i][r][j] * B[i][c][j]
tmp += A[i * rowA * colB + r *colB + j] * B[i * rowB * colB + c * colB + j];
C[i * rowA * rowB + r * rowB + c] = tmp;
}
}
int DLGpuConv2d_Gradient_of_Data(const DLArrayHandle input_f, const DLArrayHandle gradient_y, DLArrayHandle gradient_x,DLArrayHandle workspace_im2col,const int padding, const int stride, DLStreamHandle stream_handle = NULL, ProfilerHandle p = NULL){
size_t input_N = gradient_x->shape[0];
size_t input_C = gradient_x->shape[1];
size_t input_H = gradient_x->shape[2];
size_t input_W = gradient_x->shape[3];
size_t filter_outChannel = input_f->shape[0];
size_t filter_inChannel = input_f->shape[1];
size_t filter_H = input_f->shape[2];
size_t filter_W = input_f->shape[3];
size_t output_N = gradient_y->shape[0];
size_t output_C = gradient_y->shape[1];
size_t output_H = gradient_y->shape[2];
size_t output_W = gradient_y->shape[3];
float *gradient_x_data = (float *)gradient_x -> data;
float *output_data = (float*)gradient_y -> data;
size_t output_batch = output_N;
size_t output_row_size = output_C;
size_t output_col_size = output_H * output_W;
const float *filter_data = (const float*)input_f -> data;
size_t filter_row_size = filter_outChannel;
size_t filter_col_size = filter_inChannel * filter_H * filter_W;
float *gradient_im2col_XX;
gradient_im2col_XX = (float *)workspace_im2col->data;
// output size (output_N, filter_C * filter_H * filter_W, output_H * output*W) == (output_batch, filter_col_size, output_col_size)
const int BLOCK_SIZE = 16;
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((filter_col_size + BLOCK_SIZE - 1) / dimBlock.x, (output_col_size + BLOCK_SIZE - 1) / dimBlock.y);
if (stream_handle)
transA_gemm_kernel<<<dimGrid, dimBlock, 0, *(cudaStream_t*)stream_handle->handle>>>(filter_data, output_data, gradient_im2col_XX, filter_row_size, filter_col_size, output_batch * output_row_size, output_col_size);
else
transA_gemm_kernel<<<dimGrid, dimBlock>>>(filter_data, output_data, gradient_im2col_XX, filter_row_size, filter_col_size, output_batch * output_row_size, output_col_size);
// get max threads and blocks
int dev_id = (input_f->ctx).device_id;
cudaSetDevice(dev_id);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev_id);
int threads = deviceProp.maxThreadsPerBlock;
int blocks = deviceProp.maxThreadsPerMultiProcessor/threads*deviceProp.multiProcessorCount;
// get the gradient of input_x
size_t numthread = input_N * input_C * input_H * input_W;
size_t numblocks = (numthread + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK;
if (stream_handle)
array_set_kernel<<<numblocks , THREADS_PER_BLOCK , 0, *(cudaStream_t*)stream_handle->handle>>>(gradient_x_data , 0 ,numthread);
else
array_set_kernel<<<numblocks , THREADS_PER_BLOCK >>>(gradient_x_data , 0 ,numthread);
if (stream_handle)
trans_im2col_kernel<<<blocks, threads, 0, *(cudaStream_t*)stream_handle->handle>>>(input_N, input_C, input_H, input_W, filter_outChannel, filter_H, filter_W, gradient_x_data, gradient_im2col_XX, padding, stride, blocks);
else
trans_im2col_kernel<<<blocks, threads>>>(input_N, input_C, input_H, input_W, filter_outChannel, filter_H, filter_W, gradient_x_data, gradient_im2col_XX, padding, stride, blocks);
if(p != NULL){
int size_input_f = 1, size_grad_y = 1, size_output = 1, size_workspace = 1;
for(int i = 0; i < input_f -> ndim; i++)
size_input_f *= input_f -> shape[i];
for(int i = 0; i < gradient_y -> ndim; i++)
size_grad_y *= gradient_y -> shape[i];
for(int i = 0; i < gradient_x -> ndim; i++)
size_output *= gradient_x -> shape[i];
for(int i = 0; i < workspace_im2col -> ndim; i++)
size_workspace *= workspace_im2col -> shape[i];
p -> input_memory = 1.0 * (size_input_f + size_grad_y) * sizeof(float) / 1024 / 1024;
p -> output_memory = 1.0 * size_output * sizeof(float) / 1024 / 1024;
p -> workspace_memory = 1.0 * size_workspace * sizeof(float) / 1024 / 1024;
}
return 0;
}
int DLGpuConv2d_Gradient_of_Filter(const DLArrayHandle input_x, const DLArrayHandle gradient_y, DLArrayHandle gradient_f ,DLArrayHandle workspace_im2col,DLArrayHandle workspace_batch_filter,const int padding, const int stride, DLStreamHandle stream_handle = NULL, ProfilerHandle p = NULL){
size_t input_N = input_x->shape[0];
size_t input_C = input_x->shape[1];
size_t input_H = input_x->shape[2];
size_t input_W = input_x->shape[3];
size_t filter_outChannel = gradient_f->shape[0];
size_t filter_inChannel = gradient_f->shape[1];
size_t filter_H = gradient_f->shape[2];
size_t filter_W = gradient_f->shape[3];
size_t output_N = gradient_y->shape[0];
size_t output_C = gradient_y->shape[1];
size_t output_H = gradient_y->shape[2];
size_t output_W = gradient_y->shape[3];
const float * input_x_data = (const float*)input_x ->data;
float *gradient_f_data = (float *)gradient_f -> data;
float *output_data = (float*)gradient_y -> data;
size_t output_batch = output_N;
size_t output_row_size = output_C;
size_t output_col_size = output_H * output_W;
size_t filter_row_size = filter_outChannel;
size_t filter_col_size = filter_inChannel * filter_H * filter_W;
// get max threads and blocks
int dev_id = (input_x->ctx).device_id;
cudaSetDevice(dev_id);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev_id);
int threads = deviceProp.maxThreadsPerBlock;
int blocks = deviceProp.maxThreadsPerMultiProcessor/threads*deviceProp.multiProcessorCount;
float *im2col_XX;
im2col_XX = (float *)workspace_im2col->data;
if (stream_handle)
im2col_kernel<<<blocks, threads, 0, *(cudaStream_t*)stream_handle->handle>>>(input_N, input_C, input_H, input_W, filter_outChannel, filter_H, filter_W, input_x_data, im2col_XX, padding, stride, blocks);
else
im2col_kernel<<<blocks, threads>>>(input_N, input_C, input_H, input_W, filter_outChannel, filter_H, filter_W, input_x_data, im2col_XX, padding, stride, blocks);
size_t im2col_XX_row = filter_col_size;
size_t im2col_XX_col = output_col_size;
float *batch_filter;
// batch_filter = new float[input_N * filter_row_size * filter_col_size];
batch_filter = (float *)(workspace_batch_filter->data);
const int BLOCK_SIZE = 16;
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((filter_row_size + BLOCK_SIZE - 1)/dimBlock.x, (filter_col_size + BLOCK_SIZE - 1)/dimBlock.y);
if (stream_handle)
batch_transB_gemm_kernel<<<dimGrid, dimBlock, 0, *(cudaStream_t*)stream_handle->handle>>>(output_data, im2col_XX, batch_filter, output_row_size, output_col_size, im2col_XX_row, im2col_XX_col,output_batch);
else
batch_transB_gemm_kernel<<<dimGrid, dimBlock>>>(output_data, im2col_XX, batch_filter, output_row_size, output_col_size, im2col_XX_row, im2col_XX_col,output_batch);
size_t total = filter_row_size * filter_col_size;
while(output_batch !=1 ){
Float_Add(batch_filter, batch_filter + (output_batch + 1)/2*total, output_batch / 2*total, stream_handle);
output_batch = (output_batch + 1)/2;
}
size_t BLOCKS = (total + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
if (stream_handle)
float_memory_copy<<<BLOCKS, THREADS_PER_BLOCK, 0, *(cudaStream_t*)stream_handle->handle>>>(gradient_f_data, batch_filter, total);
else
float_memory_copy<<<BLOCKS, THREADS_PER_BLOCK>>>(gradient_f_data, batch_filter, total);
if(p != NULL){
int size_input_x = 1, size_grad_y = 1, size_output = 1, size_workspace_im2col = 1, size_workspace_batch_filter = 1;
for(int i = 0; i < input_x -> ndim; i++)
size_input_x *= input_x -> shape[i];
for(int i = 0; i < gradient_y -> ndim; i++)
size_grad_y *= gradient_y -> shape[i];
for(int i = 0; i < gradient_f -> ndim; i++)
size_output *= gradient_f -> shape[i];
for(int i = 0; i < workspace_im2col -> ndim; i++)
size_workspace_im2col *= workspace_im2col -> shape[i];
for(int i = 0; i < workspace_batch_filter -> ndim; i++)
size_workspace_batch_filter *= workspace_batch_filter -> shape[i];
p -> input_memory = 1.0 * (size_input_x + size_grad_y) * sizeof(float) / 1024 / 1024;
p -> output_memory = 1.0 * size_output * sizeof(float) / 1024 / 1024;
p -> workspace_memory = 1.0 * (size_workspace_im2col + size_workspace_batch_filter) * sizeof(float) / 1024 / 1024;
}
return 0;
}
// void print(const float *data, int n, int c, int h, int w) {
// std::vector<float> buffer(1 << 20);
// CUDA_CALL(cudaMemcpy(
// buffer.data(), data,
// n * c * h * w * sizeof(float),
// cudaMemcpyDeviceToHost));
// int a = 0;
// for (int i = 0; i < n; ++i) {
// for (int j = 0; j < c; ++j) {
// std::cout << "n=" << i << ", c=" << j << ":" << std::endl;
// for (int k = 0; k < h; ++k) {
// for (int l = 0; l < w; ++l) {
// std::cout << std::right << buffer[a]<<" ";
// ++a;
// }
// std::cout << std::endl;
// }
// }
// }
// std::cout << std::endl;
// }
|
a511b2ccdb6c67f96dcdf2fb665f5017a2eba02b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <string>
#include "dnn.hpp"
using namespace std;
//Define the parameters if not defined externally
#ifndef Sy
#define Sy 1
#define Sx 1
#endif
#ifndef Tnn
//Tiling Sizes
#define Tnn 32
#define Tn 16
#define Ti 16
#define Ty 8
#define Tx 8
#endif
#define NYPAD (Ny+Ky-1)
#define NXPAD (Nx+Kx-1)
#define NYSCL (Ny/Sy)
#define NXSCL (Nx/Sx)
#define SYNAPSE_SIZE (1L*Ky*Kx*Nn*Ni)
VTYPE (*synapse)[Ky][Kx][Nn][Ni];
VTYPE (*neuron_i)[NYPAD][NXPAD][Ni];
VTYPE (*neuron_n)[NYSCL][NXSCL][Nn];
VTYPE (*neuron_n2)[NYSCL][NXSCL][Nn];
VTYPE (*neuron_cuda)[NYSCL][NXSCL][Nn];
void fill_convolution_shared_simple(VTYPE (&synapse)[Ky][Kx][Nn][Ni],
VTYPE (&neuron_i)[NYPAD][NXPAD][Ni]) {
for (int yy = 0; yy < Ky; ++yy) {
for (int xx = 0; xx < Kx; ++xx) {
for (int nn = 0; nn < Nn; ++nn) {
for (int ni = 0; ni < Ni; ++ni) {
synapse[yy][xx][nn][ni] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX) - 0.5f;
}
}
}
}
for (int yy = 0; yy < NYPAD; ++yy) {
for (int xx = 0; xx < NXPAD; ++xx) {
for (int ni = 0; ni < Ni; ++ni) {
neuron_i[yy][xx][ni] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX) - 0.5f;
}
}
}
}
std::pair<int, int> convolution_layer_blocked(
VTYPE (&synapse)[Ky][Kx][Nn][Ni],
VTYPE (&neuron_i)[NYPAD][NXPAD][Ni],
VTYPE (&neuron_n)[NYSCL][NXSCL][Nn]) {
int c1 = 0, c2 = 0;
VTYPE sum[Nn] = {0};
for (int yy = 0; yy < Ny; yy += Ty) {
for (int xx = 0; xx < Nx; xx += Tx) {
for (int nnn = 0; nnn < Nn; nnn += Tnn) {
int yout = yy / Sy;
for (int y = yy; y < yy + Ty; y += Sy) { // tiling for y;
int xout = xx / Sx;
for (int x = xx; x < xx + Tx; x += Sx) { // tiling for x;
for (int nn = nnn; nn < nnn + Tnn; nn += Tn) {
for (int n = nn; n < nn + Tn; n++) {
sum[n] = 0;
}
for (int ky = 0; ky < Ky; ky++) { // sliding window;
for (int kx = 0; kx < Kx; kx++) {
int ii = 0;
VTYPE sum_sc;
for (; ii < Ni - Ti + 1; ii += Ti) {
for (int n = nn; n < nn + Tn; n++) {
sum_sc = 0;
for (int i = ii; i < ii + Ti; i++) {
VTYPE sv = synapse[ky][kx][n][i];
VTYPE nv = neuron_i[ky + y][kx + x][i];
sum_sc += sv * nv;
}
sum[n] += sum_sc;
}
}
}
}
//transfer
for (int n = nn; n < nn + Tn; n++) {
neuron_n[yout][xout][n] = transfer(sum[n]);
}
}
xout++;
}
yout++;
}
}
}
}
}
void convolution_layer(VTYPE (&synapse)[Ky][Kx][Nn][Ni],
VTYPE (&neuron_i)[NYPAD][NXPAD][Ni],
VTYPE (&neuron_n)[NYSCL][NXSCL][Nn]) {
VTYPE sum[Nn] = {0};
// Original code (excluding nn, ii loops)
int yout = 0;
for (int y = 0; y < Ny; y += Sy) { // tiling for y;
int xout = 0;
for (int x = 0; x < Ny; x += Sx) { // tiling for x;
for (int nn = 0; nn < Nn; nn += Tn) {
for (int n = nn; n < nn + Tn; n++) {
sum[n] = 0;
}
// sliding window;
for (int ky = 0; ky < Ky; ky++)
for (int kx = 0; kx < Kx; kx++)
for (int n = nn; n < nn + Tn; n++)
for (int i = 0; i < Ni; i++) {
VTYPE sv = synapse[ky][kx][n][i];
VTYPE nv = neuron_i[ky + y][kx + x][i];
sum[n] += sv * nv;
}
for (int n = nn; n < nn + Tn; n++) {
neuron_n[yout][xout][n] = transfer(sum[n]);
}
}
xout++;
}
yout++;
}
}
__global__ void convolution_layer_CUDA(VTYPE(&synapse)[Nn][Ni][Ky][Kx],
VTYPE(&neuron_i)[Ni][NYPAD][NXPAD],
VTYPE(&neuron_n)[Nn][NYSCL][NXSCL])
{
if (blockIdx.x * 1024 + threadIdx.x < (Ny*Nx))
{
int index_x = ((blockIdx.x * 1024) + threadIdx.x) % Nx;
int index_y = ((blockIdx.x * 1024) + threadIdx.x) / Nx;
for (int n=0; n<Nn; n++)
{
VTYPE acc = 0;
for (int y = index_y; y < index_y+3; y++)
{
for (int x = index_x; x < index_x+3; x++)
{
for (int z = 0; z<Ni; z++)
{
acc += neuron_i[z][y][x] * synapse[n][z][y - index_y][x - index_x];
}
}
}
neuron_n[n][index_y][index_x] = (acc < 0) ? (acc / 4) : acc;
}
}
}
int main(const int argc, const char** argv) {
cout << "allocating memory\n";
synapse = (VTYPE (*)[Ky][Kx][Nn][Ni]) aligned_malloc(64, SYNAPSE_SIZE * sizeof(VTYPE));
neuron_i = (VTYPE (*)[NYPAD][NXPAD][Ni])aligned_malloc(64, NYPAD * NXPAD * Ni * sizeof(VTYPE));
neuron_n = (VTYPE (*)[NYSCL][NXSCL][Nn])aligned_malloc(64, NYSCL * NXSCL * Nn * sizeof(VTYPE));
neuron_n2 = (VTYPE (*)[NYSCL][NXSCL][Nn])aligned_malloc(64, NYSCL * NXSCL * Nn * sizeof(VTYPE));
// Copy the data from the global buffers to the CUDA managed buffers.
VTYPE(*neuron_i_cuda)[Ni][NYPAD][NXPAD];
VTYPE(*neuron_n_cuda)[Nn][NYSCL][NXSCL];
VTYPE(*neuron_n_cuda_reset)[NYSCL][NXSCL][Nn] = (VTYPE (*)[NYSCL][NXSCL][Nn])aligned_malloc(64, NYSCL * NXSCL * Nn * sizeof(VTYPE));
VTYPE(*synapse_cuda)[Nn][Ni][Ky][Kx];
hipMallocManaged(&neuron_i_cuda, NYPAD * NXPAD * Ni * sizeof(VTYPE));
hipMallocManaged(&neuron_n_cuda, NYSCL * NXSCL * Nn * sizeof(VTYPE));
hipMallocManaged(&synapse_cuda, SYNAPSE_SIZE * sizeof(VTYPE));
cout << "initializing arrays\n";
fill_convolution_shared_simple(*synapse, *neuron_i);
for(int i=0; i<NYPAD; i++)
{
for (int j = 0; j < NXPAD; j++)
{
for (int k = 0; k < Ni; k++)
{
(*neuron_i_cuda)[k][i][j] = (*neuron_i)[i][j][k];
}
}
}
for (int i = 0; i<Ky; i++)
{
for (int j = 0; j < Kx; j++)
{
for (int k = 0; k < Ni; k++)
{
for (int l = 0; l < Nn; l++)
{
(*synapse_cuda)[k][l][i][j] = (*synapse)[i][j][k][l];
}
}
}
}
//hipMemcpy(neuron_i_cuda, &neuron_i, sizeof(VTYPE), hipMemcpyHostToDevice);
//hipMemcpy(synapse_cuda, &synapse, sizeof(VTYPE), hipMemcpyHostToDevice);
cout << "starting computation\n";
//Simple Version
begin_roi();
convolution_layer(*synapse, *neuron_i, *neuron_n);
end_roi();
cout << "simple version complete!\n";
//Blocked Version
begin_roi();
convolution_layer_blocked(*synapse, *neuron_i, *neuron_n2);
end_roi();
cout << "blocked computation complete!\n";
compare((VTYPE*)*neuron_n, (VTYPE*)*neuron_n2, NYSCL * NXSCL * Nn);
//Cuda version
begin_roi();
//TODO: Add cuda implementation of the layer.
hipLaunchKernelGGL(( convolution_layer_CUDA) , dim3((222*222/1024+1)), dim3(1024), 0, 0, *synapse_cuda, *neuron_i_cuda, *neuron_n_cuda);
hipDeviceSynchronize();
end_roi();
for(int i=0; i<NYSCL; i++)
{
for (int j = 0; j < NXSCL; j++)
{
for (int k = 0; k < Nn; k++)
{
(*neuron_n_cuda_reset)[i][j][k] = (*neuron_n_cuda)[k][i][j];
}
}
}
cout << "CUDA version complete!\n";
compare((VTYPE*)*neuron_n, (VTYPE*)*neuron_n_cuda_reset, NYSCL * NXSCL * Nn);
hipFree(neuron_i_cuda);
hipFree(synapse_cuda);
hipFree(neuron_n_cuda);
cout << "done\n";
}
| a511b2ccdb6c67f96dcdf2fb665f5017a2eba02b.cu | #include <iostream>
#include <string>
#include "dnn.hpp"
using namespace std;
//Define the parameters if not defined externally
#ifndef Sy
#define Sy 1
#define Sx 1
#endif
#ifndef Tnn
//Tiling Sizes
#define Tnn 32
#define Tn 16
#define Ti 16
#define Ty 8
#define Tx 8
#endif
#define NYPAD (Ny+Ky-1)
#define NXPAD (Nx+Kx-1)
#define NYSCL (Ny/Sy)
#define NXSCL (Nx/Sx)
#define SYNAPSE_SIZE (1L*Ky*Kx*Nn*Ni)
VTYPE (*synapse)[Ky][Kx][Nn][Ni];
VTYPE (*neuron_i)[NYPAD][NXPAD][Ni];
VTYPE (*neuron_n)[NYSCL][NXSCL][Nn];
VTYPE (*neuron_n2)[NYSCL][NXSCL][Nn];
VTYPE (*neuron_cuda)[NYSCL][NXSCL][Nn];
void fill_convolution_shared_simple(VTYPE (&synapse)[Ky][Kx][Nn][Ni],
VTYPE (&neuron_i)[NYPAD][NXPAD][Ni]) {
for (int yy = 0; yy < Ky; ++yy) {
for (int xx = 0; xx < Kx; ++xx) {
for (int nn = 0; nn < Nn; ++nn) {
for (int ni = 0; ni < Ni; ++ni) {
synapse[yy][xx][nn][ni] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX) - 0.5f;
}
}
}
}
for (int yy = 0; yy < NYPAD; ++yy) {
for (int xx = 0; xx < NXPAD; ++xx) {
for (int ni = 0; ni < Ni; ++ni) {
neuron_i[yy][xx][ni] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX) - 0.5f;
}
}
}
}
std::pair<int, int> convolution_layer_blocked(
VTYPE (&synapse)[Ky][Kx][Nn][Ni],
VTYPE (&neuron_i)[NYPAD][NXPAD][Ni],
VTYPE (&neuron_n)[NYSCL][NXSCL][Nn]) {
int c1 = 0, c2 = 0;
VTYPE sum[Nn] = {0};
for (int yy = 0; yy < Ny; yy += Ty) {
for (int xx = 0; xx < Nx; xx += Tx) {
for (int nnn = 0; nnn < Nn; nnn += Tnn) {
int yout = yy / Sy;
for (int y = yy; y < yy + Ty; y += Sy) { // tiling for y;
int xout = xx / Sx;
for (int x = xx; x < xx + Tx; x += Sx) { // tiling for x;
for (int nn = nnn; nn < nnn + Tnn; nn += Tn) {
for (int n = nn; n < nn + Tn; n++) {
sum[n] = 0;
}
for (int ky = 0; ky < Ky; ky++) { // sliding window;
for (int kx = 0; kx < Kx; kx++) {
int ii = 0;
VTYPE sum_sc;
for (; ii < Ni - Ti + 1; ii += Ti) {
for (int n = nn; n < nn + Tn; n++) {
sum_sc = 0;
for (int i = ii; i < ii + Ti; i++) {
VTYPE sv = synapse[ky][kx][n][i];
VTYPE nv = neuron_i[ky + y][kx + x][i];
sum_sc += sv * nv;
}
sum[n] += sum_sc;
}
}
}
}
//transfer
for (int n = nn; n < nn + Tn; n++) {
neuron_n[yout][xout][n] = transfer(sum[n]);
}
}
xout++;
}
yout++;
}
}
}
}
}
void convolution_layer(VTYPE (&synapse)[Ky][Kx][Nn][Ni],
VTYPE (&neuron_i)[NYPAD][NXPAD][Ni],
VTYPE (&neuron_n)[NYSCL][NXSCL][Nn]) {
VTYPE sum[Nn] = {0};
// — Original code — (excluding nn, ii loops)
int yout = 0;
for (int y = 0; y < Ny; y += Sy) { // tiling for y;
int xout = 0;
for (int x = 0; x < Ny; x += Sx) { // tiling for x;
for (int nn = 0; nn < Nn; nn += Tn) {
for (int n = nn; n < nn + Tn; n++) {
sum[n] = 0;
}
// sliding window;
for (int ky = 0; ky < Ky; ky++)
for (int kx = 0; kx < Kx; kx++)
for (int n = nn; n < nn + Tn; n++)
for (int i = 0; i < Ni; i++) {
VTYPE sv = synapse[ky][kx][n][i];
VTYPE nv = neuron_i[ky + y][kx + x][i];
sum[n] += sv * nv;
}
for (int n = nn; n < nn + Tn; n++) {
neuron_n[yout][xout][n] = transfer(sum[n]);
}
}
xout++;
}
yout++;
}
}
__global__ void convolution_layer_CUDA(VTYPE(&synapse)[Nn][Ni][Ky][Kx],
VTYPE(&neuron_i)[Ni][NYPAD][NXPAD],
VTYPE(&neuron_n)[Nn][NYSCL][NXSCL])
{
if (blockIdx.x * 1024 + threadIdx.x < (Ny*Nx))
{
int index_x = ((blockIdx.x * 1024) + threadIdx.x) % Nx;
int index_y = ((blockIdx.x * 1024) + threadIdx.x) / Nx;
for (int n=0; n<Nn; n++)
{
VTYPE acc = 0;
for (int y = index_y; y < index_y+3; y++)
{
for (int x = index_x; x < index_x+3; x++)
{
for (int z = 0; z<Ni; z++)
{
acc += neuron_i[z][y][x] * synapse[n][z][y - index_y][x - index_x];
}
}
}
neuron_n[n][index_y][index_x] = (acc < 0) ? (acc / 4) : acc;
}
}
}
int main(const int argc, const char** argv) {
cout << "allocating memory\n";
synapse = (VTYPE (*)[Ky][Kx][Nn][Ni]) aligned_malloc(64, SYNAPSE_SIZE * sizeof(VTYPE));
neuron_i = (VTYPE (*)[NYPAD][NXPAD][Ni])aligned_malloc(64, NYPAD * NXPAD * Ni * sizeof(VTYPE));
neuron_n = (VTYPE (*)[NYSCL][NXSCL][Nn])aligned_malloc(64, NYSCL * NXSCL * Nn * sizeof(VTYPE));
neuron_n2 = (VTYPE (*)[NYSCL][NXSCL][Nn])aligned_malloc(64, NYSCL * NXSCL * Nn * sizeof(VTYPE));
// Copy the data from the global buffers to the CUDA managed buffers.
VTYPE(*neuron_i_cuda)[Ni][NYPAD][NXPAD];
VTYPE(*neuron_n_cuda)[Nn][NYSCL][NXSCL];
VTYPE(*neuron_n_cuda_reset)[NYSCL][NXSCL][Nn] = (VTYPE (*)[NYSCL][NXSCL][Nn])aligned_malloc(64, NYSCL * NXSCL * Nn * sizeof(VTYPE));
VTYPE(*synapse_cuda)[Nn][Ni][Ky][Kx];
cudaMallocManaged(&neuron_i_cuda, NYPAD * NXPAD * Ni * sizeof(VTYPE));
cudaMallocManaged(&neuron_n_cuda, NYSCL * NXSCL * Nn * sizeof(VTYPE));
cudaMallocManaged(&synapse_cuda, SYNAPSE_SIZE * sizeof(VTYPE));
cout << "initializing arrays\n";
fill_convolution_shared_simple(*synapse, *neuron_i);
for(int i=0; i<NYPAD; i++)
{
for (int j = 0; j < NXPAD; j++)
{
for (int k = 0; k < Ni; k++)
{
(*neuron_i_cuda)[k][i][j] = (*neuron_i)[i][j][k];
}
}
}
for (int i = 0; i<Ky; i++)
{
for (int j = 0; j < Kx; j++)
{
for (int k = 0; k < Ni; k++)
{
for (int l = 0; l < Nn; l++)
{
(*synapse_cuda)[k][l][i][j] = (*synapse)[i][j][k][l];
}
}
}
}
//cudaMemcpy(neuron_i_cuda, &neuron_i, sizeof(VTYPE), cudaMemcpyHostToDevice);
//cudaMemcpy(synapse_cuda, &synapse, sizeof(VTYPE), cudaMemcpyHostToDevice);
cout << "starting computation\n";
//Simple Version
begin_roi();
convolution_layer(*synapse, *neuron_i, *neuron_n);
end_roi();
cout << "simple version complete!\n";
//Blocked Version
begin_roi();
convolution_layer_blocked(*synapse, *neuron_i, *neuron_n2);
end_roi();
cout << "blocked computation complete!\n";
compare((VTYPE*)*neuron_n, (VTYPE*)*neuron_n2, NYSCL * NXSCL * Nn);
//Cuda version
begin_roi();
//TODO: Add cuda implementation of the layer.
convolution_layer_CUDA <<<(222*222/1024+1), 1024>>> (*synapse_cuda, *neuron_i_cuda, *neuron_n_cuda);
cudaDeviceSynchronize();
end_roi();
for(int i=0; i<NYSCL; i++)
{
for (int j = 0; j < NXSCL; j++)
{
for (int k = 0; k < Nn; k++)
{
(*neuron_n_cuda_reset)[i][j][k] = (*neuron_n_cuda)[k][i][j];
}
}
}
cout << "CUDA version complete!\n";
compare((VTYPE*)*neuron_n, (VTYPE*)*neuron_n_cuda_reset, NYSCL * NXSCL * Nn);
cudaFree(neuron_i_cuda);
cudaFree(synapse_cuda);
cudaFree(neuron_n_cuda);
cout << "done\n";
}
|
76323a18d2f1be0a0f697532d9bcdb9665413c62.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include<stdio.h>
#include<stdlib.h>
#include<string.h>
__global__ void reverseWord(char *a , char *b , int size)
{
int id = threadIdx.x+1;
int cur = 0;
int start = 0;
int end = size;
int j = 0;
for(j = 0;j<size;j++)
{
if(a[j] == ' ')
{
cur++;
if(cur == id)
{
end = j;
break;
}
else
{
start = j;
}
}
}
int i = 0;
if(start!=0)
{
b[start] = ' ';
j = start+1;
}
else
j = start;
i = end-1;
for(;j<=i;j++,i--)
{
b[j] = a[i];
b[i] = a[j];
}
}
int main()
{
int n;
int size;
char *a = (char*)malloc(sizeof(char)*(30));
printf("Enter the string \n");
scanf("%[^\n]%*c", a);
printf("Enter number of words \n");
scanf("%d",&n);
char *b = (char*)malloc(sizeof(char)*(30));
char *d_a , *d_b;
printf("Input String = %s \n",a);
size = strlen(a);
int size1 = sizeof(char)*(size+1);
int size2 = sizeof(char)*(size+1);
hipMalloc((void**)&d_a,size1);
hipMalloc((void**)&d_b,size2);
hipMemcpy(d_a,a,sizeof(char)*(size+1),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( reverseWord), dim3(1),dim3(n), 0, 0, d_a,d_b,size);
hipMemcpy(b,d_b,size2,hipMemcpyDeviceToHost);
printf("Output string = %s \n",b);
hipFree(d_a);
hipFree(d_b);
}
| 76323a18d2f1be0a0f697532d9bcdb9665413c62.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include<stdio.h>
#include<stdlib.h>
#include<string.h>
__global__ void reverseWord(char *a , char *b , int size)
{
int id = threadIdx.x+1;
int cur = 0;
int start = 0;
int end = size;
int j = 0;
for(j = 0;j<size;j++)
{
if(a[j] == ' ')
{
cur++;
if(cur == id)
{
end = j;
break;
}
else
{
start = j;
}
}
}
int i = 0;
if(start!=0)
{
b[start] = ' ';
j = start+1;
}
else
j = start;
i = end-1;
for(;j<=i;j++,i--)
{
b[j] = a[i];
b[i] = a[j];
}
}
int main()
{
int n;
int size;
char *a = (char*)malloc(sizeof(char)*(30));
printf("Enter the string \n");
scanf("%[^\n]%*c", a);
printf("Enter number of words \n");
scanf("%d",&n);
char *b = (char*)malloc(sizeof(char)*(30));
char *d_a , *d_b;
printf("Input String = %s \n",a);
size = strlen(a);
int size1 = sizeof(char)*(size+1);
int size2 = sizeof(char)*(size+1);
cudaMalloc((void**)&d_a,size1);
cudaMalloc((void**)&d_b,size2);
cudaMemcpy(d_a,a,sizeof(char)*(size+1),cudaMemcpyHostToDevice);
reverseWord<<<1,n>>>(d_a,d_b,size);
cudaMemcpy(b,d_b,size2,cudaMemcpyDeviceToHost);
printf("Output string = %s \n",b);
cudaFree(d_a);
cudaFree(d_b);
}
|
67623869eaab88757937c8139e1b5b4aef0acd31.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/NativeFunctions.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/Dispatch.h>
#include <ATen/ExpandUtils.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/TensorIterator.h>
namespace {
inline void lerp_cuda(at::Tensor& ret, const at::Tensor& self, const at::Tensor& end, const at::Tensor& weights) {
TORCH_CHECK(self.dtype() == end.dtype(), "expected dtype ", self.dtype(), " for `end` but got dtype ", end.dtype());
TORCH_CHECK(self.dtype() == weights.dtype(), "expected dtype ", self.dtype(), " for `weights` but got dtype ", weights.dtype());
at::TensorIterator iter = at::TensorIteratorConfig()
.add_output(ret)
.add_input(self)
.add_input(end)
.add_input(weights)
.build();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "lerp_cuda", [&]{
at::native::gpu_kernel(iter,
[] GPU_LAMBDA (
scalar_t self_val,
scalar_t end_val,
scalar_t weight_val) -> scalar_t {
return (weight_val < 0.5) ?
self_val + weight_val * (end_val - self_val) : end_val - (end_val - self_val) * (1 - weight_val);
});
});
}
template <typename scalar_t>
void lerp_scalar_cuda(at::Tensor& ret, const at::Tensor& self, const at::Tensor& end, scalar_t weight_val) {
TORCH_CHECK(self.dtype() == end.dtype(), "expected dtype ", self.dtype(), " for `end` but got dtype ", end.dtype());
at::TensorIterator iter = at::TensorIteratorConfig()
.add_output(ret)
.add_input(self)
.add_input(end)
.build();
at::native::gpu_kernel(iter,
[=] GPU_LAMBDA (scalar_t self_val, scalar_t end_val) {
return (weight_val < 0.5) ? self_val + weight_val * (end_val - self_val) : end_val - (end_val - self_val) * (1 - weight_val);
});
}
} // namespace
namespace at {
namespace native {
Tensor& lerp_cuda_tensor_out(Tensor& result, const Tensor& self,
const Tensor& end, const Tensor& weight) {
Tensor b_self, b_end, b_weight;
TORCH_CHECK(weight.dim() <= ::max(self.dim(), end.dim()),
"weight should be of dimension max(self.dim(), end.dim()) or lesser");
std::tie(b_self, b_end, b_weight) = expand_outplace(self, end, weight, "lerp_out_cuda");
lerp_cuda(result, b_self, b_end, b_weight);
return result;
}
Tensor& lerp_cuda_scalar_out(Tensor& result, const Tensor& self,
const Tensor& end, Scalar weight) {
Tensor b_self, b_end;
std::tie(b_self, b_end) = expand_outplace(self, end, "lerp_out_cuda");
AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.scalar_type(), "lerp_out_cuda", [&]{
lerp_scalar_cuda<scalar_t>(result, b_self, b_end, weight.to<scalar_t>());
});
return result;
}
Tensor& lerp_cuda_tensor_(Tensor& self, const Tensor& end, const Tensor& weight) {
Tensor b_self, b_end, b_weight;
std::tie(b_self, b_end, b_weight) = expand_outplace(self, end, weight, "lerp__cuda");
TORCH_CHECK(b_self.sizes() == self.sizes(),
"output with shape ", self.sizes(),
" doesn't match the broadcast shape ", b_self.sizes());
TORCH_CHECK(weight.dim() <= ::max(self.dim(), end.dim()),
"weight should be of dimension max(self.dim(), end.dim()) or lesser");
lerp_cuda(self, b_self, b_end, b_weight);
return self;
}
Tensor& lerp_cuda_scalar_(Tensor& self, const Tensor& end, Scalar weight) {
Tensor b_self, b_end;
std::tie(b_self, b_end) = expand_outplace(self, end, "lerp__cuda");
TORCH_CHECK(b_self.sizes() == self.sizes(),
"output with shape ", self.sizes(),
" doesn't match the broadcast shape ", b_self.sizes());
AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.scalar_type(), "lerp__cuda", [&]{
lerp_scalar_cuda<scalar_t>(self, b_self, b_end, weight.to<scalar_t>());
});
return self;
}
Tensor lerp_cuda_tensor(const Tensor& self, const Tensor& end, const Tensor& weight) {
Tensor b_self, b_end, b_weight;
TORCH_CHECK(weight.dim() <= ::max(self.dim(), end.dim()),
"weight should be of dimension max(self.dim(), end.dim()) or lesser");
std::tie(b_self, b_end, b_weight) = expand_outplace(self, end, weight, "lerp_cuda");
Tensor result = at::empty_like(b_self, b_self.suggest_memory_format());
lerp_cuda(result, b_self, b_end, b_weight);
return result;
}
Tensor lerp_cuda_scalar(const Tensor& self, const Tensor& end, Scalar weight) {
Tensor b_self, b_end;
std::tie(b_self, b_end) = expand_outplace(self, end, "lerp_cuda");
Tensor result = at::empty_like(b_self, b_self.suggest_memory_format());
AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.scalar_type(), "lerp_cuda", [&]{
lerp_scalar_cuda<scalar_t>(result, b_self, b_end, weight.to<scalar_t>());
});
return result;
}
} // namespace native
} // namespace at
| 67623869eaab88757937c8139e1b5b4aef0acd31.cu | #include <ATen/NativeFunctions.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/Dispatch.h>
#include <ATen/ExpandUtils.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/TensorIterator.h>
namespace {
inline void lerp_cuda(at::Tensor& ret, const at::Tensor& self, const at::Tensor& end, const at::Tensor& weights) {
TORCH_CHECK(self.dtype() == end.dtype(), "expected dtype ", self.dtype(), " for `end` but got dtype ", end.dtype());
TORCH_CHECK(self.dtype() == weights.dtype(), "expected dtype ", self.dtype(), " for `weights` but got dtype ", weights.dtype());
at::TensorIterator iter = at::TensorIteratorConfig()
.add_output(ret)
.add_input(self)
.add_input(end)
.add_input(weights)
.build();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "lerp_cuda", [&]{
at::native::gpu_kernel(iter,
[] GPU_LAMBDA (
scalar_t self_val,
scalar_t end_val,
scalar_t weight_val) -> scalar_t {
return (weight_val < 0.5) ?
self_val + weight_val * (end_val - self_val) : end_val - (end_val - self_val) * (1 - weight_val);
});
});
}
template <typename scalar_t>
void lerp_scalar_cuda(at::Tensor& ret, const at::Tensor& self, const at::Tensor& end, scalar_t weight_val) {
TORCH_CHECK(self.dtype() == end.dtype(), "expected dtype ", self.dtype(), " for `end` but got dtype ", end.dtype());
at::TensorIterator iter = at::TensorIteratorConfig()
.add_output(ret)
.add_input(self)
.add_input(end)
.build();
at::native::gpu_kernel(iter,
[=] GPU_LAMBDA (scalar_t self_val, scalar_t end_val) {
return (weight_val < 0.5) ? self_val + weight_val * (end_val - self_val) : end_val - (end_val - self_val) * (1 - weight_val);
});
}
} // namespace
namespace at {
namespace native {
Tensor& lerp_cuda_tensor_out(Tensor& result, const Tensor& self,
const Tensor& end, const Tensor& weight) {
Tensor b_self, b_end, b_weight;
TORCH_CHECK(weight.dim() <= std::max(self.dim(), end.dim()),
"weight should be of dimension max(self.dim(), end.dim()) or lesser");
std::tie(b_self, b_end, b_weight) = expand_outplace(self, end, weight, "lerp_out_cuda");
lerp_cuda(result, b_self, b_end, b_weight);
return result;
}
Tensor& lerp_cuda_scalar_out(Tensor& result, const Tensor& self,
const Tensor& end, Scalar weight) {
Tensor b_self, b_end;
std::tie(b_self, b_end) = expand_outplace(self, end, "lerp_out_cuda");
AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.scalar_type(), "lerp_out_cuda", [&]{
lerp_scalar_cuda<scalar_t>(result, b_self, b_end, weight.to<scalar_t>());
});
return result;
}
Tensor& lerp_cuda_tensor_(Tensor& self, const Tensor& end, const Tensor& weight) {
Tensor b_self, b_end, b_weight;
std::tie(b_self, b_end, b_weight) = expand_outplace(self, end, weight, "lerp__cuda");
TORCH_CHECK(b_self.sizes() == self.sizes(),
"output with shape ", self.sizes(),
" doesn't match the broadcast shape ", b_self.sizes());
TORCH_CHECK(weight.dim() <= std::max(self.dim(), end.dim()),
"weight should be of dimension max(self.dim(), end.dim()) or lesser");
lerp_cuda(self, b_self, b_end, b_weight);
return self;
}
Tensor& lerp_cuda_scalar_(Tensor& self, const Tensor& end, Scalar weight) {
Tensor b_self, b_end;
std::tie(b_self, b_end) = expand_outplace(self, end, "lerp__cuda");
TORCH_CHECK(b_self.sizes() == self.sizes(),
"output with shape ", self.sizes(),
" doesn't match the broadcast shape ", b_self.sizes());
AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.scalar_type(), "lerp__cuda", [&]{
lerp_scalar_cuda<scalar_t>(self, b_self, b_end, weight.to<scalar_t>());
});
return self;
}
Tensor lerp_cuda_tensor(const Tensor& self, const Tensor& end, const Tensor& weight) {
Tensor b_self, b_end, b_weight;
TORCH_CHECK(weight.dim() <= std::max(self.dim(), end.dim()),
"weight should be of dimension max(self.dim(), end.dim()) or lesser");
std::tie(b_self, b_end, b_weight) = expand_outplace(self, end, weight, "lerp_cuda");
Tensor result = at::empty_like(b_self, b_self.suggest_memory_format());
lerp_cuda(result, b_self, b_end, b_weight);
return result;
}
Tensor lerp_cuda_scalar(const Tensor& self, const Tensor& end, Scalar weight) {
Tensor b_self, b_end;
std::tie(b_self, b_end) = expand_outplace(self, end, "lerp_cuda");
Tensor result = at::empty_like(b_self, b_self.suggest_memory_format());
AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.scalar_type(), "lerp_cuda", [&]{
lerp_scalar_cuda<scalar_t>(result, b_self, b_end, weight.to<scalar_t>());
});
return result;
}
} // namespace native
} // namespace at
|
f137918a1296b44dd6a6aa833483fce68e06af20.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/diagonal_kernel.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/diagonal.h"
namespace phi {
using paddle::platform::PADDLE_CUDA_NUM_THREADS;
template <typename T, typename Context>
void DiagonalKernel(const Context& dev_ctx,
const DenseTensor& x,
int offset,
int axis1,
int axis2,
DenseTensor* out) {
auto* input = &x;
const auto* input_data = input->data<T>();
auto input_dim = input->dims().Get();
auto input_dim_size = input->dims().size();
std::vector<int64_t> res_in = vectorize(phi::stride(input->dims()));
DenseTensor input_stride_tensor;
paddle::framework::TensorFromVector<int64_t>(
res_in, dev_ctx, &input_stride_tensor);
int64_t* input_stride = input_stride_tensor.data<int64_t>();
auto* output = out;
auto* output_data = dev_ctx.template Alloc<T>(out);
auto output_dim = output->dims().Get();
auto output_dim_size = output->dims().size();
std::vector<int64_t> res_out = vectorize(phi::stride(output->dims()));
DenseTensor output_stride_tensor;
paddle::framework::TensorFromVector<int64_t>(
res_out, dev_ctx, &output_stride_tensor);
int64_t* output_stride = output_stride_tensor.data<int64_t>();
const int64_t offset_ = offset;
int64_t axis1_ = axis1 < 0 ? input_dim_size + axis1 : axis1;
int64_t axis2_ = axis2 < 0 ? input_dim_size + axis2 : axis2;
int64_t numel = input->numel();
int threads = PADDLE_CUDA_NUM_THREADS;
int blocks = (numel + threads - 1) / threads;
switch (input_dim_size) {
case 2:
hipLaunchKernelGGL(( funcs::DiagonalCuda<T, 2, 1>), dim3(blocks), dim3(threads), 0, 0, input_data,
output_data,
offset_,
axis1_,
axis2_,
input_stride,
output_stride,
numel,
false);
break;
case 3:
hipLaunchKernelGGL(( funcs::DiagonalCuda<T, 3, 2>), dim3(blocks), dim3(threads), 0, 0, input_data,
output_data,
offset_,
axis1_,
axis2_,
input_stride,
output_stride,
numel,
false);
break;
case 4:
hipLaunchKernelGGL(( funcs::DiagonalCuda<T, 4, 3>), dim3(blocks), dim3(threads), 0, 0, input_data,
output_data,
offset_,
axis1_,
axis2_,
input_stride,
output_stride,
numel,
false);
break;
case 5:
hipLaunchKernelGGL(( funcs::DiagonalCuda<T, 5, 4>), dim3(blocks), dim3(threads), 0, 0, input_data,
output_data,
offset_,
axis1_,
axis2_,
input_stride,
output_stride,
numel,
false);
break;
case 6:
hipLaunchKernelGGL(( funcs::DiagonalCuda<T, 6, 5>), dim3(blocks), dim3(threads), 0, 0, input_data,
output_data,
offset_,
axis1_,
axis2_,
input_stride,
output_stride,
numel,
false);
break;
case 7:
hipLaunchKernelGGL(( funcs::DiagonalCuda<T, 7, 6>), dim3(blocks), dim3(threads), 0, 0, input_data,
output_data,
offset_,
axis1_,
axis2_,
input_stride,
output_stride,
numel,
false);
break;
case 8:
hipLaunchKernelGGL(( funcs::DiagonalCuda<T, 8, 7>), dim3(blocks), dim3(threads), 0, 0, input_data,
output_data,
offset_,
axis1_,
axis2_,
input_stride,
output_stride,
numel,
false);
break;
case 9:
hipLaunchKernelGGL(( funcs::DiagonalCuda<T, 9, 8>), dim3(blocks), dim3(threads), 0, 0, input_data,
output_data,
offset_,
axis1_,
axis2_,
input_stride,
output_stride,
numel,
false);
break;
default:
PADDLE_THROW(errors::InvalidArgument(
"The rank of input should be less than 10, but received %d.",
input_dim_size));
}
}
} // namespace phi
PD_REGISTER_KERNEL(diagonal,
GPU,
ALL_LAYOUT,
phi::DiagonalKernel,
float,
double,
int,
int64_t,
bool) {}
| f137918a1296b44dd6a6aa833483fce68e06af20.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/diagonal_kernel.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/diagonal.h"
namespace phi {
using paddle::platform::PADDLE_CUDA_NUM_THREADS;
template <typename T, typename Context>
void DiagonalKernel(const Context& dev_ctx,
const DenseTensor& x,
int offset,
int axis1,
int axis2,
DenseTensor* out) {
auto* input = &x;
const auto* input_data = input->data<T>();
auto input_dim = input->dims().Get();
auto input_dim_size = input->dims().size();
std::vector<int64_t> res_in = vectorize(phi::stride(input->dims()));
DenseTensor input_stride_tensor;
paddle::framework::TensorFromVector<int64_t>(
res_in, dev_ctx, &input_stride_tensor);
int64_t* input_stride = input_stride_tensor.data<int64_t>();
auto* output = out;
auto* output_data = dev_ctx.template Alloc<T>(out);
auto output_dim = output->dims().Get();
auto output_dim_size = output->dims().size();
std::vector<int64_t> res_out = vectorize(phi::stride(output->dims()));
DenseTensor output_stride_tensor;
paddle::framework::TensorFromVector<int64_t>(
res_out, dev_ctx, &output_stride_tensor);
int64_t* output_stride = output_stride_tensor.data<int64_t>();
const int64_t offset_ = offset;
int64_t axis1_ = axis1 < 0 ? input_dim_size + axis1 : axis1;
int64_t axis2_ = axis2 < 0 ? input_dim_size + axis2 : axis2;
int64_t numel = input->numel();
int threads = PADDLE_CUDA_NUM_THREADS;
int blocks = (numel + threads - 1) / threads;
switch (input_dim_size) {
case 2:
funcs::DiagonalCuda<T, 2, 1><<<blocks, threads>>>(input_data,
output_data,
offset_,
axis1_,
axis2_,
input_stride,
output_stride,
numel,
false);
break;
case 3:
funcs::DiagonalCuda<T, 3, 2><<<blocks, threads>>>(input_data,
output_data,
offset_,
axis1_,
axis2_,
input_stride,
output_stride,
numel,
false);
break;
case 4:
funcs::DiagonalCuda<T, 4, 3><<<blocks, threads>>>(input_data,
output_data,
offset_,
axis1_,
axis2_,
input_stride,
output_stride,
numel,
false);
break;
case 5:
funcs::DiagonalCuda<T, 5, 4><<<blocks, threads>>>(input_data,
output_data,
offset_,
axis1_,
axis2_,
input_stride,
output_stride,
numel,
false);
break;
case 6:
funcs::DiagonalCuda<T, 6, 5><<<blocks, threads>>>(input_data,
output_data,
offset_,
axis1_,
axis2_,
input_stride,
output_stride,
numel,
false);
break;
case 7:
funcs::DiagonalCuda<T, 7, 6><<<blocks, threads>>>(input_data,
output_data,
offset_,
axis1_,
axis2_,
input_stride,
output_stride,
numel,
false);
break;
case 8:
funcs::DiagonalCuda<T, 8, 7><<<blocks, threads>>>(input_data,
output_data,
offset_,
axis1_,
axis2_,
input_stride,
output_stride,
numel,
false);
break;
case 9:
funcs::DiagonalCuda<T, 9, 8><<<blocks, threads>>>(input_data,
output_data,
offset_,
axis1_,
axis2_,
input_stride,
output_stride,
numel,
false);
break;
default:
PADDLE_THROW(errors::InvalidArgument(
"The rank of input should be less than 10, but received %d.",
input_dim_size));
}
}
} // namespace phi
PD_REGISTER_KERNEL(diagonal,
GPU,
ALL_LAYOUT,
phi::DiagonalKernel,
float,
double,
int,
int64_t,
bool) {}
|
2e30757f0a5e4a61a60cb98acdb9ff5ad2797706.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <math.h>
// CUDA kernel to add elements of two arrays
__global__
void add(int n, float *x)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
x[i] = x[i] + i;
}
int main(void)
{
int N = 16*1024+8*1024;;
float *x, *y;
hipMallocManaged(&x, N*sizeof(float));
hipMallocManaged(&y, N*sizeof(float));
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
}
int blockSize = 32;
int numBlocks = (N + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( add), dim3(numBlocks), dim3(blockSize), 0, 0, N, x);
for(int j = 0; j < 100000; j++)
for (int i = 0; i < N; i++) {
x[i] += 2.0f;
}
hipLaunchKernelGGL(( add), dim3(numBlocks), dim3(blockSize), 0, 0, N, x);
// Wait for GPU to finish before accessing on host
hipDeviceSynchronize();
// Check for errors (all values should be 3.0f)
// float maxError = 0.0f;
// for (int i = 0; i < N; i++)
// maxError = fmax(maxError, fabs(y[i]-3.0f));
// std::cout << "Max error: " << maxError << std::endl;
// Free memory
hipFree(x);
hipFree(y);
return 0;
} | 2e30757f0a5e4a61a60cb98acdb9ff5ad2797706.cu | #include <iostream>
#include <math.h>
// CUDA kernel to add elements of two arrays
__global__
void add(int n, float *x)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
x[i] = x[i] + i;
}
int main(void)
{
int N = 16*1024+8*1024;;
float *x, *y;
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
}
int blockSize = 32;
int numBlocks = (N + blockSize - 1) / blockSize;
add<<<numBlocks, blockSize>>>(N, x);
for(int j = 0; j < 100000; j++)
for (int i = 0; i < N; i++) {
x[i] += 2.0f;
}
add<<<numBlocks, blockSize>>>(N, x);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// Check for errors (all values should be 3.0f)
// float maxError = 0.0f;
// for (int i = 0; i < N; i++)
// maxError = fmax(maxError, fabs(y[i]-3.0f));
// std::cout << "Max error: " << maxError << std::endl;
// Free memory
cudaFree(x);
cudaFree(y);
return 0;
} |
e3f9e96b07775c75b425cccb617c392c66fe59c2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "mixed_tentusscher_myo_epi_2004_S2_17.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu)
{
print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium GPU model\n\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(hipMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(hipMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
// Get the mapping array
uint32_t *mapping = NULL;
uint32_t *mapping_device = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
check_cuda_error(hipMalloc((void **)&mapping_device, extra_data_bytes_size));
check_cuda_error(hipMemcpy(mapping_device, mapping, extra_data_bytes_size, hipMemcpyHostToDevice));
}
hipLaunchKernelGGL(( kernel_set_model_inital_conditions) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, *sv, mapping_device, num_volumes);
check_cuda_error( hipPeekAtLastError() );
hipDeviceSynchronize();
check_cuda_error(hipFree(mapping_device));
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu)
{
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(hipMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(hipMemcpy(stims_currents_device, stim_currents, stim_currents_size, hipMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL)
{
check_cuda_error(hipMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(hipMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, hipMemcpyHostToDevice));
}
// Get the mapping array
uint32_t *mapping = NULL;
uint32_t *mapping_device = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
check_cuda_error(hipMalloc((void **)&mapping_device, extra_data_bytes_size));
check_cuda_error(hipMemcpy(mapping_device, mapping, extra_data_bytes_size, hipMemcpyHostToDevice));
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
hipLaunchKernelGGL(( solve_gpu) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, dt, sv, stims_currents_device, cells_to_solve_device, mapping_device, num_cells_to_solve, num_steps);
check_cuda_error( hipPeekAtLastError() );
check_cuda_error(hipFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(hipFree(cells_to_solve_device));
if(mapping_device) check_cuda_error(hipFree(mapping_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, uint32_t *mapping, int num_volumes)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if (threadID < num_volumes)
{
// Initial conditions for TenTusscher 2004 myocardium
if (mapping[threadID] == 0)
{
// Default initial conditions
/*
*((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real * )((char *) sv + pitch * 1) + threadID) = 0.f; //M
*((real * )((char *) sv + pitch * 2) + threadID) = 0.75; //H
*((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; //J
*((real * )((char *) sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real * )((char *) sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real * )((char *) sv + pitch * 6) + threadID) = 0.f; //Xs
*((real * )((char *) sv + pitch * 7) + threadID) = 1.f; //S
*((real * )((char *) sv + pitch * 8) + threadID) = 0.f; //R
*((real * )((char *) sv + pitch * 9) + threadID) = 0.f; //D
*((real * )((char *) sv + pitch * 10) + threadID) = 1.f; //F
*((real * )((char *) sv + pitch * 11) + threadID) = 1.f; //FCa
*((real * )((char *) sv + pitch * 12) + threadID) = 1.f; //G
*((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633};
for (uint32_t i = 0; i < NEQ; i++)
*((real * )((char *) sv + pitch * i) + threadID) = sv_sst[i];
}
// Initial conditions for TenTusscher 2004 epicardium
else
{
// Default initial conditions
/*
*((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real * )((char *) sv + pitch * 1) + threadID) = 0.f; //M
*((real * )((char *) sv + pitch * 2) + threadID) = 0.75; //H
*((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; //J
*((real * )((char *) sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real * )((char *) sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real * )((char *) sv + pitch * 6) + threadID) = 0.f; //Xs
*((real * )((char *) sv + pitch * 7) + threadID) = 1.f; //S
*((real * )((char *) sv + pitch * 8) + threadID) = 0.f; //R
*((real * )((char *) sv + pitch * 9) + threadID) = 0.f; //D
*((real * )((char *) sv + pitch * 10) + threadID) = 1.f; //F
*((real * )((char *) sv + pitch * 11) + threadID) = 1.f; //FCa
*((real * )((char *) sv + pitch * 12) + threadID) = 1.f; //G
*((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.5285006584511,0.00130106729313035,0.778730090563051,0.778532170509002,0.000175864034699588,0.484676327494511,0.00294864118836231,0.999998334805594,1.94635926887894e-08,1.90111810990968e-05,0.999770708859905,1.00748136518757,0.999998809936904,3.60224813237435e-05,1.18254991511234,9.21308723760909,140.066635187809};
for (uint32_t i = 0; i < NEQ; i++)
*((real * )((char *) sv + pitch * i) + threadID) = sv_sst[i];
}
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t *mapping, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve)
{
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n)
{
if (mapping[sv_id] == 0)
{
RHS_gpu_myo(sv, rDY, stim_currents[threadID], sv_id, dt);
for(int i = 0; i < NEQ; i++)
{
*((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id);
}
}
else
{
RHS_gpu_epi(sv, rDY, stim_currents[threadID], sv_id, dt);
for (int i = 0; i < NEQ; i++)
{
*((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id);
}
}
}
}
}
inline __device__ void RHS_gpu_myo (real *sv_, real *rDY_, real stim_current, int threadID_, real dt)
{
// State variables
real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Myocardium cell
real Gks=0.062;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Myocardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
Irel=A*sd*sg;
Ileak=0.00008f*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
// [!] Myocardium cell
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
inline __device__ void RHS_gpu_epi (real *sv_, real *rDY_, real stim_current, int threadID_, real dt)
{
// State variables
real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Epicardium cell
real Gks=0.245;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Epicardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={13.7219011711698,0.000373800660274715,0.000150569617335446,0.000654485626385041,0.257379206595380,0.173802542474158,0.132458241657246,3.93296187661537,0.0158924919170214,2.50168625879054,1095.95864752453,0.000511327811652900,0.243193135425503,0.0192821673745436,0.00636346797017134,9.00104876078144e-06};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
| e3f9e96b07775c75b425cccb617c392c66fe59c2.cu | #include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "mixed_tentusscher_myo_epi_2004_S2_17.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu)
{
print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium GPU model\n\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(cudaMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(cudaMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
// Get the mapping array
uint32_t *mapping = NULL;
uint32_t *mapping_device = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
check_cuda_error(cudaMalloc((void **)&mapping_device, extra_data_bytes_size));
check_cuda_error(cudaMemcpy(mapping_device, mapping, extra_data_bytes_size, cudaMemcpyHostToDevice));
}
kernel_set_model_inital_conditions <<<GRID, BLOCK_SIZE>>>(*sv, mapping_device, num_volumes);
check_cuda_error( cudaPeekAtLastError() );
cudaDeviceSynchronize();
check_cuda_error(cudaFree(mapping_device));
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu)
{
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(cudaMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(cudaMemcpy(stims_currents_device, stim_currents, stim_currents_size, cudaMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL)
{
check_cuda_error(cudaMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(cudaMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, cudaMemcpyHostToDevice));
}
// Get the mapping array
uint32_t *mapping = NULL;
uint32_t *mapping_device = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
check_cuda_error(cudaMalloc((void **)&mapping_device, extra_data_bytes_size));
check_cuda_error(cudaMemcpy(mapping_device, mapping, extra_data_bytes_size, cudaMemcpyHostToDevice));
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
solve_gpu <<<GRID, BLOCK_SIZE>>>(dt, sv, stims_currents_device, cells_to_solve_device, mapping_device, num_cells_to_solve, num_steps);
check_cuda_error( cudaPeekAtLastError() );
check_cuda_error(cudaFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(cudaFree(cells_to_solve_device));
if(mapping_device) check_cuda_error(cudaFree(mapping_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, uint32_t *mapping, int num_volumes)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if (threadID < num_volumes)
{
// Initial conditions for TenTusscher 2004 myocardium
if (mapping[threadID] == 0)
{
// Default initial conditions
/*
*((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real * )((char *) sv + pitch * 1) + threadID) = 0.f; //M
*((real * )((char *) sv + pitch * 2) + threadID) = 0.75; //H
*((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; //J
*((real * )((char *) sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real * )((char *) sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real * )((char *) sv + pitch * 6) + threadID) = 0.f; //Xs
*((real * )((char *) sv + pitch * 7) + threadID) = 1.f; //S
*((real * )((char *) sv + pitch * 8) + threadID) = 0.f; //R
*((real * )((char *) sv + pitch * 9) + threadID) = 0.f; //D
*((real * )((char *) sv + pitch * 10) + threadID) = 1.f; //F
*((real * )((char *) sv + pitch * 11) + threadID) = 1.f; //FCa
*((real * )((char *) sv + pitch * 12) + threadID) = 1.f; //G
*((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633};
for (uint32_t i = 0; i < NEQ; i++)
*((real * )((char *) sv + pitch * i) + threadID) = sv_sst[i];
}
// Initial conditions for TenTusscher 2004 epicardium
else
{
// Default initial conditions
/*
*((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real * )((char *) sv + pitch * 1) + threadID) = 0.f; //M
*((real * )((char *) sv + pitch * 2) + threadID) = 0.75; //H
*((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; //J
*((real * )((char *) sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real * )((char *) sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real * )((char *) sv + pitch * 6) + threadID) = 0.f; //Xs
*((real * )((char *) sv + pitch * 7) + threadID) = 1.f; //S
*((real * )((char *) sv + pitch * 8) + threadID) = 0.f; //R
*((real * )((char *) sv + pitch * 9) + threadID) = 0.f; //D
*((real * )((char *) sv + pitch * 10) + threadID) = 1.f; //F
*((real * )((char *) sv + pitch * 11) + threadID) = 1.f; //FCa
*((real * )((char *) sv + pitch * 12) + threadID) = 1.f; //G
*((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.5285006584511,0.00130106729313035,0.778730090563051,0.778532170509002,0.000175864034699588,0.484676327494511,0.00294864118836231,0.999998334805594,1.94635926887894e-08,1.90111810990968e-05,0.999770708859905,1.00748136518757,0.999998809936904,3.60224813237435e-05,1.18254991511234,9.21308723760909,140.066635187809};
for (uint32_t i = 0; i < NEQ; i++)
*((real * )((char *) sv + pitch * i) + threadID) = sv_sst[i];
}
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t *mapping, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve)
{
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n)
{
if (mapping[sv_id] == 0)
{
RHS_gpu_myo(sv, rDY, stim_currents[threadID], sv_id, dt);
for(int i = 0; i < NEQ; i++)
{
*((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id);
}
}
else
{
RHS_gpu_epi(sv, rDY, stim_currents[threadID], sv_id, dt);
for (int i = 0; i < NEQ; i++)
{
*((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id);
}
}
}
}
}
inline __device__ void RHS_gpu_myo (real *sv_, real *rDY_, real stim_current, int threadID_, real dt)
{
// State variables
real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Myocardium cell
real Gks=0.062;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Myocardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
Irel=A*sd*sg;
Ileak=0.00008f*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
// [!] Myocardium cell
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
inline __device__ void RHS_gpu_epi (real *sv_, real *rDY_, real stim_current, int threadID_, real dt)
{
// State variables
real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Epicardium cell
real Gks=0.245;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Epicardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={13.7219011711698,0.000373800660274715,0.000150569617335446,0.000654485626385041,0.257379206595380,0.173802542474158,0.132458241657246,3.93296187661537,0.0158924919170214,2.50168625879054,1095.95864752453,0.000511327811652900,0.243193135425503,0.0192821673745436,0.00636346797017134,9.00104876078144e-06};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
1afbf642d7df1ed6704ab837f36adc1771123bdc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "histogram_utils.cuh"
#include <hip/hip_cooperative_groups.h>
#include <catboost/cuda/cuda_lib/kernel/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <contrib/libs/cub/cub/warp/warp_scan.cuh>
using namespace cooperative_groups;
namespace NKernel
{
__global__ void CopyHistogramsImpl(const ui32* leftLeaves,
const ui32* rightLeaves,
ui32 numStats,
ui32 binFeaturesInHist,
float* histograms) {
const ui32 leftLeafId = __ldg(leftLeaves + blockIdx.y);
const ui32 rightLeafId = __ldg(rightLeaves + blockIdx.y);
ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
float* srcHist = histograms + leftLeafId * binFeaturesInHist * numStats;
float* dstHist = histograms + rightLeafId * binFeaturesInHist * numStats;
const ui32 histSize = binFeaturesInHist * numStats;
while (i < histSize) {
WriteThrough(dstHist + i, __ldg(srcHist + i));
i += gridDim.x * blockDim.x;
}
}
void CopyHistograms(const ui32* leftLeaves,
const ui32* rightLeaves,
const ui32 leavesCount,
ui32 numStats,
ui32 binFeaturesInHist,
float* histograms,
TCudaStream stream
) {
const ui32 histSize = numStats * binFeaturesInHist;
ui32 blockSize = 256;
dim3 numBlocks;
numBlocks.z = 1;
numBlocks.y = leavesCount;
numBlocks.x = CeilDivide(histSize, blockSize);
if (numBlocks.x) {
hipLaunchKernelGGL(( CopyHistogramsImpl), dim3(numBlocks), dim3(blockSize), 0, stream, leftLeaves, rightLeaves, numStats, binFeaturesInHist, histograms);
}
}
__global__ void CopyHistogramImpl(const ui32 leftLeafId,
const ui32 rightLeafId,
ui32 numStats,
ui32 binFeaturesInHist,
float* histograms) {
ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
float* srcHist = histograms + leftLeafId * binFeaturesInHist * numStats;
float* dstHist = histograms + rightLeafId * binFeaturesInHist * numStats;
const ui32 histSize = binFeaturesInHist * numStats;
while (i < histSize) {
WriteThrough(dstHist + i, __ldg(srcHist + i));
i += gridDim.x * blockDim.x;
}
}
void CopyHistogram(const ui32 leftLeaves,
const ui32 rightLeaves,
ui32 numStats,
ui32 binFeaturesInHist,
float* histograms,
TCudaStream stream
) {
const ui32 histSize = numStats * binFeaturesInHist;
ui32 blockSize = 256;
dim3 numBlocks;
numBlocks.z = 1;
numBlocks.y = 1;
numBlocks.x = CeilDivide(histSize, blockSize);
if (numBlocks.x) {
hipLaunchKernelGGL(( CopyHistogramImpl), dim3(numBlocks), dim3(blockSize), 0, stream, leftLeaves, rightLeaves, numStats, binFeaturesInHist, histograms);
}
}
//write histogram block to histograms
__global__ void WriteReducesHistogramsImpl(int histBlockOffset,
int binFeaturesInBlock,
const ui32* histogramIds,
const float* blockHistogram,
const int binFeatureCount,
float* dstHistogram) {
const int binFeatureId = blockIdx.x * blockDim.x + threadIdx.x;
const int leafId = blockIdx.y;
const int statId = blockIdx.z;
const size_t statCount = gridDim.z;
const int dstId = histogramIds[blockIdx.y];
if (binFeatureId < binFeaturesInBlock) {
blockHistogram += binFeatureId;
blockHistogram += binFeaturesInBlock * statId;
blockHistogram += leafId * binFeaturesInBlock * statCount;
const float val = __ldg(blockHistogram);
dstHistogram += dstId * binFeatureCount * statCount;
dstHistogram += statId * binFeatureCount;
dstHistogram += histBlockOffset + binFeatureId;
dstHistogram[0] = val;
}
}
void WriteReducesHistograms(int blockOffset,
int histBlockSize,
const ui32* histogramIds,
ui32 leafCount,
ui32 statCount,
const float* blockHistogram,
const int binFeatureCount,
float* dstHistogram,
TCudaStream stream) {
const int blockSize = 128;
dim3 numBlocks;
numBlocks.x = CeilDivide(histBlockSize, blockSize);
numBlocks.y = leafCount;
numBlocks.z = statCount;
if (histBlockSize && leafCount && statCount) {
hipLaunchKernelGGL(( WriteReducesHistogramsImpl), dim3(numBlocks), dim3(blockSize), 0, stream, blockOffset,
histBlockSize,
histogramIds,
blockHistogram,
binFeatureCount,
dstHistogram);
}
}
//write histogram block to histograms
__global__ void WriteReducesHistogramImpl(int histBlockOffset,
int binFeaturesInBlock,
const ui32 dstId,
const float* blockHistogram,
const int binFeatureCount,
float* dstHistogram) {
const int binFeatureId = blockIdx.x * blockDim.x + threadIdx.x;
const int leafId = 0;
const int statId = blockIdx.z;
const size_t statCount = gridDim.z;
if (binFeatureId < binFeaturesInBlock) {
blockHistogram += binFeatureId;
blockHistogram += binFeaturesInBlock * statId;
blockHistogram += leafId * binFeaturesInBlock * statCount;
const float val = __ldg(blockHistogram);
dstHistogram += dstId * binFeatureCount * statCount;
dstHistogram += statId * binFeatureCount;
dstHistogram += histBlockOffset + binFeatureId;
dstHistogram[0] = val;
}
}
void WriteReducesHistogram(int blockOffset,
int histBlockSize,
const ui32 histogramId,
ui32 statCount,
const float* blockHistogram,
const int binFeatureCount,
float* dstHistogram,
TCudaStream stream) {
const int blockSize = 128;
dim3 numBlocks;
numBlocks.x = CeilDivide(histBlockSize, blockSize);
numBlocks.y = 1;
numBlocks.z = statCount;
if (histBlockSize && statCount) {
hipLaunchKernelGGL(( WriteReducesHistogramImpl), dim3(numBlocks), dim3(blockSize), 0, stream, blockOffset,
histBlockSize,
histogramId,
blockHistogram,
binFeatureCount,
dstHistogram);
}
}
//write histogram block to histograms
__global__ void ZeroHistogramsImpl(const ui32* histIds,
const int binFeatureCount,
float* dstHistogram) {
const int binFeatureId = blockIdx.x * blockDim.x + threadIdx.x;
const int statId = blockIdx.z;
const size_t statCount = gridDim.z;
const int dstHist = histIds[blockIdx.y];
if (binFeatureId < binFeatureCount) {
dstHistogram += dstHist * binFeatureCount * statCount;
dstHistogram += statId * binFeatureCount;
WriteThrough(dstHistogram + binFeatureId, 0.0f);
}
}
void ZeroHistograms(const ui32* histIds,
ui32 idsCount,
ui32 statCount,
const int binFeatureCount,
float* dstHistogram,
TCudaStream stream) {
const int blockSize = 256;
dim3 numBlocks;
numBlocks.x = CeilDivide(binFeatureCount, blockSize);
numBlocks.y = idsCount;
numBlocks.z = statCount;
if (idsCount && statCount) {
hipLaunchKernelGGL(( ZeroHistogramsImpl), dim3(numBlocks), dim3(blockSize), 0, stream, histIds,
binFeatureCount,
dstHistogram);
}
}
//write histogram block to histograms
__global__ void ZeroHistogramImpl(const ui32 dstHist,
const int binFeatureCount,
float* dstHistogram) {
const int binFeatureId = blockIdx.x * blockDim.x + threadIdx.x;
const int statId = blockIdx.z;
const size_t statCount = gridDim.z;
if (binFeatureId < binFeatureCount) {
dstHistogram += dstHist * binFeatureCount * statCount;
dstHistogram += statId * binFeatureCount;
WriteThrough(dstHistogram + binFeatureId, 0.0f);
}
}
void ZeroHistogram(const ui32 histId,
ui32 statCount,
const int binFeatureCount,
float* dstHistogram,
TCudaStream stream) {
const int blockSize = 256;
dim3 numBlocks;
numBlocks.x = CeilDivide(binFeatureCount, blockSize);
numBlocks.y = 1;
numBlocks.z = statCount;
if (statCount) {
hipLaunchKernelGGL(( ZeroHistogramImpl), dim3(numBlocks), dim3(blockSize), 0, stream, histId,
binFeatureCount,
dstHistogram);
}
}
//write histogram block to histograms
__global__ void SubstractHistogramsImpl(const ui32* fromIds,
const ui32* whatIds,
const int binFeatureCount,
float* histogram) {
const int binFeatureId = blockIdx.x * blockDim.x + threadIdx.x;
const int fromId = __ldg(fromIds + blockIdx.y);
const int whatId = __ldg(whatIds + blockIdx.y);
const int statId = blockIdx.z;
const size_t statCount = gridDim.z;
histogram += binFeatureId;
if (binFeatureId < binFeatureCount) {
const ui64 fromOffset = fromId * binFeatureCount * statCount + statId * binFeatureCount;
const ui64 whatOffset = whatId * binFeatureCount * statCount + statId * binFeatureCount;
float newVal = __ldg(histogram + fromOffset) - __ldg(histogram + whatOffset);
if (statId == 0) {
newVal = max(newVal, 0.0f);
}
WriteThrough(histogram + fromOffset, newVal);
}
}
void SubstractHistgorams(const ui32* fromIds,
const ui32* whatIds,
const int idsCount,
const int statCount,
const int binFeatureCount,
float* dstHistogram,
TCudaStream stream) {
const int blockSize = 256;
dim3 numBlocks;
numBlocks.x = CeilDivide(binFeatureCount, blockSize);
numBlocks.y = idsCount;
numBlocks.z = statCount;
if (idsCount && statCount) {
hipLaunchKernelGGL(( SubstractHistogramsImpl), dim3(numBlocks), dim3(blockSize), 0, stream, fromIds, whatIds, binFeatureCount, dstHistogram);
}
}
//write histogram block to histograms
__global__ void SubstractHistogramImpl(const ui32 fromId,
const ui32 whatId,
const int binFeatureCount,
float* histogram) {
const int binFeatureId = blockIdx.x * blockDim.x + threadIdx.x;
const int statId = blockIdx.z;
const size_t statCount = gridDim.z;
histogram += binFeatureId;
if (binFeatureId < binFeatureCount) {
const ui64 fromOffset = fromId * binFeatureCount * statCount + statId * binFeatureCount;
const ui64 whatOffset = whatId * binFeatureCount * statCount + statId * binFeatureCount;
float newVal = __ldg(histogram + fromOffset) - __ldg(histogram + whatOffset);
if (statId == 0) {
newVal = max(newVal, 0.0f);
}
WriteThrough(histogram + fromOffset, newVal);
}
}
void SubstractHistgoram(const ui32 fromIds,
const ui32 whatIds,
const int statCount,
const int binFeatureCount,
float* dstHistogram,
TCudaStream stream) {
const int blockSize = 256;
dim3 numBlocks;
numBlocks.x = CeilDivide(binFeatureCount, blockSize);
numBlocks.y = 1;
numBlocks.z = statCount;
if (statCount) {
hipLaunchKernelGGL(( SubstractHistogramImpl), dim3(numBlocks), dim3(blockSize), 0, stream, fromIds, whatIds, binFeatureCount, dstHistogram);
}
}
template <int BlockSize>
__global__ void ScanHistogramsImpl(const TBinarizedFeature* features, int featureCount,
const ui32* histIds,
const int binFeatureCount,
float* histograms) {
const int featuresPerBlock = BlockSize / 32;
using WarpScan = cub::WarpScan<double>;
__shared__ typename WarpScan::TempStorage tempStorage[featuresPerBlock];
const int warpId = threadIdx.x / 32;
const int threadIdInWarp = threadIdx.x & 31;
const int featureId = blockIdx.x * featuresPerBlock + warpId;
const int histId = histIds[blockIdx.y];
const int statId = blockIdx.z;
const ui64 statCount = gridDim.z;
if (featureId < featureCount) {
features += featureId;
const bool skipFeature = features->OneHotFeature || (features->Folds <= 1);
if (!skipFeature) {
histograms += histId * binFeatureCount * statCount + statId * binFeatureCount + features->FirstFoldIndex;
const int folds = features->Folds;
const int n = ((folds + 31) / 32) * 32;
double prefixSum = 0;
for (int binOffset = 0; binOffset < n; binOffset += 32) {
const double val = (binOffset + threadIdInWarp) < folds
? histograms[(binOffset + threadIdInWarp)]
: 0.0f;
double sum = 0;
__syncwarp();
WarpScan(tempStorage[warpId]).InclusiveSum(val, sum);
__syncwarp();
sum += prefixSum;
if ((binOffset + threadIdInWarp) < folds) {
histograms[binOffset + threadIdInWarp] = sum;
}
if ((binOffset + 32) < n) {
prefixSum = cub::ShuffleIndex<32, double>(sum, 31, 0xffffffff);
}
}
}
}
};
void ScanHistograms(
const TBinarizedFeature* features, int fCount,
const ui32* ids,
const int idsCount,
const int statCount,
const int binFeatureCount,
float* histograms,
TCudaStream stream) {
const int blockSize = 256;
dim3 numBlocks;
numBlocks.x = CeilDivide(fCount * 32, blockSize);
numBlocks.y = idsCount;
numBlocks.z = statCount;
if (idsCount && statCount) {
hipLaunchKernelGGL(( ScanHistogramsImpl<blockSize>), dim3(numBlocks), dim3(blockSize), 0, stream, features, fCount, ids, binFeatureCount, histograms);
}
}
template <int BlockSize>
__global__ void ScanHistogramImpl(const TBinarizedFeature* features, int featureCount,
ui32 histId,
const int binFeatureCount,
float* histograms) {
const int featuresPerBlock = BlockSize / 32;
using WarpScan = cub::WarpScan<double>;
__shared__ typename WarpScan::TempStorage tempStorage[featuresPerBlock];
const int warpId = threadIdx.x / 32;
const int threadIdInWarp = threadIdx.x & 31;
const int featureId = blockIdx.x * featuresPerBlock + warpId;
const int statId = blockIdx.z;
const ui64 statCount = gridDim.z;
if (featureId < featureCount) {
features += featureId;
const bool skipFeature = features->OneHotFeature || (features->Folds <= 1);
if (!skipFeature) {
histograms += histId * binFeatureCount * statCount + statId * binFeatureCount + features->FirstFoldIndex;
const int folds = features->Folds;
const int n = ((folds + 31) / 32) * 32;
double prefixSum = 0;
for (int binOffset = 0; binOffset < n; binOffset += 32) {
const double val = (binOffset + threadIdInWarp) < folds
? histograms[(binOffset + threadIdInWarp)]
: 0.0f;
double sum = 0;
__syncwarp();
WarpScan(tempStorage[warpId]).InclusiveSum(val, sum);
__syncwarp();
sum += prefixSum;
if ((binOffset + threadIdInWarp) < folds) {
histograms[binOffset + threadIdInWarp] = sum;
}
if ((binOffset + 32) < n) {
prefixSum = cub::ShuffleIndex<32, double>(sum, 31, 0xffffffff);
}
}
}
}
};
void ScanHistogram(
const TBinarizedFeature* features, int fCount,
ui32 id,
const int statCount,
const int binFeatureCount,
float* histograms,
TCudaStream stream) {
const int blockSize = 256;
dim3 numBlocks;
numBlocks.x = CeilDivide(fCount * 32, blockSize);
numBlocks.y = 1;
numBlocks.z = statCount;
if (statCount) {
hipLaunchKernelGGL(( ScanHistogramImpl<blockSize>), dim3(numBlocks), dim3(blockSize), 0, stream, features, fCount, id, binFeatureCount, histograms);
}
}
}
| 1afbf642d7df1ed6704ab837f36adc1771123bdc.cu | #include "histogram_utils.cuh"
#include <cooperative_groups.h>
#include <catboost/cuda/cuda_lib/kernel/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <contrib/libs/cub/cub/warp/warp_scan.cuh>
using namespace cooperative_groups;
namespace NKernel
{
__global__ void CopyHistogramsImpl(const ui32* leftLeaves,
const ui32* rightLeaves,
ui32 numStats,
ui32 binFeaturesInHist,
float* histograms) {
const ui32 leftLeafId = __ldg(leftLeaves + blockIdx.y);
const ui32 rightLeafId = __ldg(rightLeaves + blockIdx.y);
ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
float* srcHist = histograms + leftLeafId * binFeaturesInHist * numStats;
float* dstHist = histograms + rightLeafId * binFeaturesInHist * numStats;
const ui32 histSize = binFeaturesInHist * numStats;
while (i < histSize) {
WriteThrough(dstHist + i, __ldg(srcHist + i));
i += gridDim.x * blockDim.x;
}
}
void CopyHistograms(const ui32* leftLeaves,
const ui32* rightLeaves,
const ui32 leavesCount,
ui32 numStats,
ui32 binFeaturesInHist,
float* histograms,
TCudaStream stream
) {
const ui32 histSize = numStats * binFeaturesInHist;
ui32 blockSize = 256;
dim3 numBlocks;
numBlocks.z = 1;
numBlocks.y = leavesCount;
numBlocks.x = CeilDivide(histSize, blockSize);
if (numBlocks.x) {
CopyHistogramsImpl<<<numBlocks, blockSize, 0, stream>>>(leftLeaves, rightLeaves, numStats, binFeaturesInHist, histograms);
}
}
__global__ void CopyHistogramImpl(const ui32 leftLeafId,
const ui32 rightLeafId,
ui32 numStats,
ui32 binFeaturesInHist,
float* histograms) {
ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
float* srcHist = histograms + leftLeafId * binFeaturesInHist * numStats;
float* dstHist = histograms + rightLeafId * binFeaturesInHist * numStats;
const ui32 histSize = binFeaturesInHist * numStats;
while (i < histSize) {
WriteThrough(dstHist + i, __ldg(srcHist + i));
i += gridDim.x * blockDim.x;
}
}
void CopyHistogram(const ui32 leftLeaves,
const ui32 rightLeaves,
ui32 numStats,
ui32 binFeaturesInHist,
float* histograms,
TCudaStream stream
) {
const ui32 histSize = numStats * binFeaturesInHist;
ui32 blockSize = 256;
dim3 numBlocks;
numBlocks.z = 1;
numBlocks.y = 1;
numBlocks.x = CeilDivide(histSize, blockSize);
if (numBlocks.x) {
CopyHistogramImpl<<<numBlocks, blockSize, 0, stream>>>(leftLeaves, rightLeaves, numStats, binFeaturesInHist, histograms);
}
}
//write histogram block to histograms
__global__ void WriteReducesHistogramsImpl(int histBlockOffset,
int binFeaturesInBlock,
const ui32* histogramIds,
const float* blockHistogram,
const int binFeatureCount,
float* dstHistogram) {
const int binFeatureId = blockIdx.x * blockDim.x + threadIdx.x;
const int leafId = blockIdx.y;
const int statId = blockIdx.z;
const size_t statCount = gridDim.z;
const int dstId = histogramIds[blockIdx.y];
if (binFeatureId < binFeaturesInBlock) {
blockHistogram += binFeatureId;
blockHistogram += binFeaturesInBlock * statId;
blockHistogram += leafId * binFeaturesInBlock * statCount;
const float val = __ldg(blockHistogram);
dstHistogram += dstId * binFeatureCount * statCount;
dstHistogram += statId * binFeatureCount;
dstHistogram += histBlockOffset + binFeatureId;
dstHistogram[0] = val;
}
}
void WriteReducesHistograms(int blockOffset,
int histBlockSize,
const ui32* histogramIds,
ui32 leafCount,
ui32 statCount,
const float* blockHistogram,
const int binFeatureCount,
float* dstHistogram,
TCudaStream stream) {
const int blockSize = 128;
dim3 numBlocks;
numBlocks.x = CeilDivide(histBlockSize, blockSize);
numBlocks.y = leafCount;
numBlocks.z = statCount;
if (histBlockSize && leafCount && statCount) {
WriteReducesHistogramsImpl<<<numBlocks, blockSize, 0, stream>>>(blockOffset,
histBlockSize,
histogramIds,
blockHistogram,
binFeatureCount,
dstHistogram);
}
}
//write histogram block to histograms
__global__ void WriteReducesHistogramImpl(int histBlockOffset,
int binFeaturesInBlock,
const ui32 dstId,
const float* blockHistogram,
const int binFeatureCount,
float* dstHistogram) {
const int binFeatureId = blockIdx.x * blockDim.x + threadIdx.x;
const int leafId = 0;
const int statId = blockIdx.z;
const size_t statCount = gridDim.z;
if (binFeatureId < binFeaturesInBlock) {
blockHistogram += binFeatureId;
blockHistogram += binFeaturesInBlock * statId;
blockHistogram += leafId * binFeaturesInBlock * statCount;
const float val = __ldg(blockHistogram);
dstHistogram += dstId * binFeatureCount * statCount;
dstHistogram += statId * binFeatureCount;
dstHistogram += histBlockOffset + binFeatureId;
dstHistogram[0] = val;
}
}
void WriteReducesHistogram(int blockOffset,
int histBlockSize,
const ui32 histogramId,
ui32 statCount,
const float* blockHistogram,
const int binFeatureCount,
float* dstHistogram,
TCudaStream stream) {
const int blockSize = 128;
dim3 numBlocks;
numBlocks.x = CeilDivide(histBlockSize, blockSize);
numBlocks.y = 1;
numBlocks.z = statCount;
if (histBlockSize && statCount) {
WriteReducesHistogramImpl<<<numBlocks, blockSize, 0, stream>>>(blockOffset,
histBlockSize,
histogramId,
blockHistogram,
binFeatureCount,
dstHistogram);
}
}
//write histogram block to histograms
__global__ void ZeroHistogramsImpl(const ui32* histIds,
const int binFeatureCount,
float* dstHistogram) {
const int binFeatureId = blockIdx.x * blockDim.x + threadIdx.x;
const int statId = blockIdx.z;
const size_t statCount = gridDim.z;
const int dstHist = histIds[blockIdx.y];
if (binFeatureId < binFeatureCount) {
dstHistogram += dstHist * binFeatureCount * statCount;
dstHistogram += statId * binFeatureCount;
WriteThrough(dstHistogram + binFeatureId, 0.0f);
}
}
void ZeroHistograms(const ui32* histIds,
ui32 idsCount,
ui32 statCount,
const int binFeatureCount,
float* dstHistogram,
TCudaStream stream) {
const int blockSize = 256;
dim3 numBlocks;
numBlocks.x = CeilDivide(binFeatureCount, blockSize);
numBlocks.y = idsCount;
numBlocks.z = statCount;
if (idsCount && statCount) {
ZeroHistogramsImpl<<<numBlocks, blockSize, 0, stream>>>(histIds,
binFeatureCount,
dstHistogram);
}
}
//write histogram block to histograms
__global__ void ZeroHistogramImpl(const ui32 dstHist,
const int binFeatureCount,
float* dstHistogram) {
const int binFeatureId = blockIdx.x * blockDim.x + threadIdx.x;
const int statId = blockIdx.z;
const size_t statCount = gridDim.z;
if (binFeatureId < binFeatureCount) {
dstHistogram += dstHist * binFeatureCount * statCount;
dstHistogram += statId * binFeatureCount;
WriteThrough(dstHistogram + binFeatureId, 0.0f);
}
}
void ZeroHistogram(const ui32 histId,
ui32 statCount,
const int binFeatureCount,
float* dstHistogram,
TCudaStream stream) {
const int blockSize = 256;
dim3 numBlocks;
numBlocks.x = CeilDivide(binFeatureCount, blockSize);
numBlocks.y = 1;
numBlocks.z = statCount;
if (statCount) {
ZeroHistogramImpl<<<numBlocks, blockSize, 0, stream>>>(histId,
binFeatureCount,
dstHistogram);
}
}
//write histogram block to histograms
__global__ void SubstractHistogramsImpl(const ui32* fromIds,
const ui32* whatIds,
const int binFeatureCount,
float* histogram) {
const int binFeatureId = blockIdx.x * blockDim.x + threadIdx.x;
const int fromId = __ldg(fromIds + blockIdx.y);
const int whatId = __ldg(whatIds + blockIdx.y);
const int statId = blockIdx.z;
const size_t statCount = gridDim.z;
histogram += binFeatureId;
if (binFeatureId < binFeatureCount) {
const ui64 fromOffset = fromId * binFeatureCount * statCount + statId * binFeatureCount;
const ui64 whatOffset = whatId * binFeatureCount * statCount + statId * binFeatureCount;
float newVal = __ldg(histogram + fromOffset) - __ldg(histogram + whatOffset);
if (statId == 0) {
newVal = max(newVal, 0.0f);
}
WriteThrough(histogram + fromOffset, newVal);
}
}
void SubstractHistgorams(const ui32* fromIds,
const ui32* whatIds,
const int idsCount,
const int statCount,
const int binFeatureCount,
float* dstHistogram,
TCudaStream stream) {
const int blockSize = 256;
dim3 numBlocks;
numBlocks.x = CeilDivide(binFeatureCount, blockSize);
numBlocks.y = idsCount;
numBlocks.z = statCount;
if (idsCount && statCount) {
SubstractHistogramsImpl<<<numBlocks, blockSize, 0, stream>>>(fromIds, whatIds, binFeatureCount, dstHistogram);
}
}
//write histogram block to histograms
__global__ void SubstractHistogramImpl(const ui32 fromId,
const ui32 whatId,
const int binFeatureCount,
float* histogram) {
const int binFeatureId = blockIdx.x * blockDim.x + threadIdx.x;
const int statId = blockIdx.z;
const size_t statCount = gridDim.z;
histogram += binFeatureId;
if (binFeatureId < binFeatureCount) {
const ui64 fromOffset = fromId * binFeatureCount * statCount + statId * binFeatureCount;
const ui64 whatOffset = whatId * binFeatureCount * statCount + statId * binFeatureCount;
float newVal = __ldg(histogram + fromOffset) - __ldg(histogram + whatOffset);
if (statId == 0) {
newVal = max(newVal, 0.0f);
}
WriteThrough(histogram + fromOffset, newVal);
}
}
void SubstractHistgoram(const ui32 fromIds,
const ui32 whatIds,
const int statCount,
const int binFeatureCount,
float* dstHistogram,
TCudaStream stream) {
const int blockSize = 256;
dim3 numBlocks;
numBlocks.x = CeilDivide(binFeatureCount, blockSize);
numBlocks.y = 1;
numBlocks.z = statCount;
if (statCount) {
SubstractHistogramImpl<<<numBlocks, blockSize, 0, stream>>>(fromIds, whatIds, binFeatureCount, dstHistogram);
}
}
template <int BlockSize>
__global__ void ScanHistogramsImpl(const TBinarizedFeature* features, int featureCount,
const ui32* histIds,
const int binFeatureCount,
float* histograms) {
const int featuresPerBlock = BlockSize / 32;
using WarpScan = cub::WarpScan<double>;
__shared__ typename WarpScan::TempStorage tempStorage[featuresPerBlock];
const int warpId = threadIdx.x / 32;
const int threadIdInWarp = threadIdx.x & 31;
const int featureId = blockIdx.x * featuresPerBlock + warpId;
const int histId = histIds[blockIdx.y];
const int statId = blockIdx.z;
const ui64 statCount = gridDim.z;
if (featureId < featureCount) {
features += featureId;
const bool skipFeature = features->OneHotFeature || (features->Folds <= 1);
if (!skipFeature) {
histograms += histId * binFeatureCount * statCount + statId * binFeatureCount + features->FirstFoldIndex;
const int folds = features->Folds;
const int n = ((folds + 31) / 32) * 32;
double prefixSum = 0;
for (int binOffset = 0; binOffset < n; binOffset += 32) {
const double val = (binOffset + threadIdInWarp) < folds
? histograms[(binOffset + threadIdInWarp)]
: 0.0f;
double sum = 0;
__syncwarp();
WarpScan(tempStorage[warpId]).InclusiveSum(val, sum);
__syncwarp();
sum += prefixSum;
if ((binOffset + threadIdInWarp) < folds) {
histograms[binOffset + threadIdInWarp] = sum;
}
if ((binOffset + 32) < n) {
prefixSum = cub::ShuffleIndex<32, double>(sum, 31, 0xffffffff);
}
}
}
}
};
void ScanHistograms(
const TBinarizedFeature* features, int fCount,
const ui32* ids,
const int idsCount,
const int statCount,
const int binFeatureCount,
float* histograms,
TCudaStream stream) {
const int blockSize = 256;
dim3 numBlocks;
numBlocks.x = CeilDivide(fCount * 32, blockSize);
numBlocks.y = idsCount;
numBlocks.z = statCount;
if (idsCount && statCount) {
ScanHistogramsImpl<blockSize><<<numBlocks, blockSize, 0, stream>>>(features, fCount, ids, binFeatureCount, histograms);
}
}
template <int BlockSize>
__global__ void ScanHistogramImpl(const TBinarizedFeature* features, int featureCount,
ui32 histId,
const int binFeatureCount,
float* histograms) {
const int featuresPerBlock = BlockSize / 32;
using WarpScan = cub::WarpScan<double>;
__shared__ typename WarpScan::TempStorage tempStorage[featuresPerBlock];
const int warpId = threadIdx.x / 32;
const int threadIdInWarp = threadIdx.x & 31;
const int featureId = blockIdx.x * featuresPerBlock + warpId;
const int statId = blockIdx.z;
const ui64 statCount = gridDim.z;
if (featureId < featureCount) {
features += featureId;
const bool skipFeature = features->OneHotFeature || (features->Folds <= 1);
if (!skipFeature) {
histograms += histId * binFeatureCount * statCount + statId * binFeatureCount + features->FirstFoldIndex;
const int folds = features->Folds;
const int n = ((folds + 31) / 32) * 32;
double prefixSum = 0;
for (int binOffset = 0; binOffset < n; binOffset += 32) {
const double val = (binOffset + threadIdInWarp) < folds
? histograms[(binOffset + threadIdInWarp)]
: 0.0f;
double sum = 0;
__syncwarp();
WarpScan(tempStorage[warpId]).InclusiveSum(val, sum);
__syncwarp();
sum += prefixSum;
if ((binOffset + threadIdInWarp) < folds) {
histograms[binOffset + threadIdInWarp] = sum;
}
if ((binOffset + 32) < n) {
prefixSum = cub::ShuffleIndex<32, double>(sum, 31, 0xffffffff);
}
}
}
}
};
void ScanHistogram(
const TBinarizedFeature* features, int fCount,
ui32 id,
const int statCount,
const int binFeatureCount,
float* histograms,
TCudaStream stream) {
const int blockSize = 256;
dim3 numBlocks;
numBlocks.x = CeilDivide(fCount * 32, blockSize);
numBlocks.y = 1;
numBlocks.z = statCount;
if (statCount) {
ScanHistogramImpl<blockSize><<<numBlocks, blockSize, 0, stream>>>(features, fCount, id, binFeatureCount, histograms);
}
}
}
|
dbf927e2ec187abdb1a8a994c8de01e4b6f739ba.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@generated from magmablas/ztrtri_diag_vbatched.cu, normal z -> c, Thu Oct 8 23:05:38 2020
@author Peng Du
@author Tingxing Dong
@author Mark Gates
@author Azzam Haidar
File named ctrtri_diag.cu to avoid name conflict with src/ctrtri.o
in the library. The actual kernels are in ctrtri_lower.cu and ctrtri_upper.cu
*/
#include "magma_internal.h"
#define TRTRI_BATCHED
#include "ctrtri.cuh"
/***************************************************************************//**
Purpose
-------
ctrtri_diag inverts the NB x NB diagonal blocks of A.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, uplo specifies whether the matrix A is an upper or
lower triangular matrix as follows:
- = MagmaUpper: A is an upper triangular matrix.
- = MagmaLower: A is a lower triangular matrix.
@param[in]
diag magma_diag_t.
On entry, diag specifies whether or not A is unit triangular
as follows:
- = MagmaUnit: A is assumed to be unit triangular.
- = MagmaNonUnit: A is not assumed to be unit triangular.
@param[in]
nmax INTEGER.
maximum value of n.
@param[in]
n INTEGER array, dimension(batchCount)
On entry, each entry specifies the order of the corresponding matrix A. N >= 0.
@param[in]
dA_array COMPLEX array of dimension ( ldda, n )
The triangular matrix A.
\n
If UPLO = 'U', the leading N-by-N upper triangular part of A
contains the upper triangular matrix, and the strictly lower
triangular part of A is not referenced.
\n
If UPLO = 'L', the leading N-by-N lower triangular part of A
contains the lower triangular matrix, and the strictly upper
triangular part of A is not referenced.
\n
If DIAG = 'U', the diagonal elements of A are also not referenced
and are assumed to be 1.
@param[in]
ldda INTEGER.
The leading dimension of the array A. LDDA >= max(1,N).
@param[out]
dinvA_array COMPLEX array of dimension (NB, ceil(n/NB)*NB),
where NB = 128.
On exit, contains inverses of the NB-by-NB diagonal blocks of A.
@param[in]
resetozero INTEGER
If not zero, each array dinvA will be reset to all zeros
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_trtri_batched
*******************************************************************************/
extern "C" void
magmablas_ctrtri_diag_vbatched(
magma_uplo_t uplo, magma_diag_t diag, magma_int_t nmax, magma_int_t *n,
magmaFloatComplex const * const *dA_array, magma_int_t *ldda,
magmaFloatComplex **dinvA_array,
magma_int_t resetozero, magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t info = 0;
if (uplo != MagmaLower && uplo != MagmaUpper)
info = -1;
else if (diag != MagmaNonUnit && diag != MagmaUnit)
info = -2;
else if (nmax < 0)
info = -3;
//else if (ldda < n)
// info = -5;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info
}
// allocate temp buffers for dimensions
magma_int_t *mm, *nn;
magma_imalloc( &mm, batchCount );
magma_imalloc( &nn, batchCount );
int nblocks = magma_ceildiv( nmax, IB );
if ( resetozero ) {
// roundup dimensions in 'n' and write it to 'mm' : magma_roundup( n, NB )
magma_ivec_roundup( batchCount, n, NB, mm, queue);
// set vector 'nn' to NB
magma_ivec_setc( batchCount, nn, NB, queue);
magma_int_t max_m = magma_roundup( nmax, NB );
magma_int_t max_n = NB;
//magmablas_claset_batched (MagmaFull, magma_roundup( n, NB ), NB, MAGMA_C_ZERO, MAGMA_C_ZERO, dinvA_array, magma_roundup( n, NB ), batchCount, queue);
magmablas_claset_vbatched(MagmaFull, max_m, max_n, mm, nn, MAGMA_C_ZERO, MAGMA_C_ZERO, dinvA_array, mm, batchCount, queue);
}
// if someone want to use cudamemset he need to set the whole vectors
// of initial size otherwise it is a bug and thus need to have dinvA_length
// in input parameter and has been tested and was slower.
//was not the largest size computed by the high API getrf_batched then it is bug and need to use magmablas_claset_batched
magma_int_t max_batchCount = queue->get_maxBatch();
for(magma_int_t i = 0; i < batchCount; i+=max_batchCount) {
magma_int_t ibatch = min(max_batchCount, batchCount-i);
if ( uplo == MagmaLower ) {
// invert diagonal IB x IB inner blocks
dim3 diaggrid( nblocks, 1, ibatch ); // emulate 3D grid
hipLaunchKernelGGL(( ctrtri_diag_lower_kernel_vbatched), dim3(diaggrid), dim3(IB), 0, queue->cuda_stream() , diag, n+i, dA_array+i, ldda+i, dinvA_array+i );
// build up NB x NB blocks (assuming IB=16 here):
// use 16 x 16 blocks to build 32 x 32 blocks, 1 x (1 x npages) grid, 4 x 4 threads;
// then 32 x 32 blocks to build 64 x 64 blocks, 1 x (2 x npages) grid, 8 x 4 threads;
// then 64 x 64 blocks to build 128 x 128 blocks, 1 x (4 x npages) grid, 16 x 4 threads;
// then 128 x 128 blocks to build 256 x 256 blocks, 2 x (8 x npages) grid, 16 x 4 threads.
for( int jb=IB; jb < NB; jb *= 2 ) {
int kb = jb*2;
int npages = magma_ceildiv( nmax, kb );
dim3 threads( (jb <= 32 ? jb/4 : 16), 4 );
dim3 grid( jb/(threads.x*threads.y), npages*(jb/16), ibatch ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x
//printf( "n %d, jb %d, grid %d x %d (%d x %d)\n", n, jb, grid.x, grid.y, grid.y / npages, npages );
switch (jb) {
case 16:
hipLaunchKernelGGL(( triple_cgemm16_part1_lower_kernel_vbatched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n+i, dA_array+i, ldda+i, dinvA_array+i, jb, npages );
hipLaunchKernelGGL(( triple_cgemm16_part2_lower_kernel_vbatched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n+i, dA_array+i, ldda+i, dinvA_array+i, jb, npages );
break;
case 32:
hipLaunchKernelGGL(( triple_cgemm32_part1_lower_kernel_vbatched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n+i, dA_array+i, ldda+i, dinvA_array+i, jb, npages );
hipLaunchKernelGGL(( triple_cgemm32_part2_lower_kernel_vbatched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n+i, dA_array+i, ldda+i, dinvA_array+i, jb, npages );
break;
case 64:
hipLaunchKernelGGL(( triple_cgemm64_part1_lower_kernel_vbatched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n+i, dA_array+i, ldda+i, dinvA_array+i, jb, npages );
hipLaunchKernelGGL(( triple_cgemm64_part2_lower_kernel_vbatched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n+i, dA_array+i, ldda+i, dinvA_array+i, jb, npages );
break;
default:
hipLaunchKernelGGL(( triple_cgemm_above64_part1_lower_kernel_vbatched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n+i, dA_array+i, ldda+i, dinvA_array+i, jb, npages );
hipLaunchKernelGGL(( triple_cgemm_above64_part2_lower_kernel_vbatched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n+i, dA_array+i, ldda+i, dinvA_array+i, jb, npages );
hipLaunchKernelGGL(( triple_cgemm_above64_part3_lower_kernel_vbatched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n+i, dA_array+i, ldda+i, dinvA_array+i, jb, npages );
break;
}
if ( kb >= nmax ) break;
}
}
else {
dim3 diaggrid( nblocks, 1, ibatch ); // emulate 3D grid
hipLaunchKernelGGL(( ctrtri_diag_upper_kernel_vbatched), dim3(diaggrid), dim3(IB), 0, queue->cuda_stream() , diag, n+i, dA_array+i, ldda+i, dinvA_array+i );
// update the inverse up to the size of IB
for( int jb=IB; jb < NB; jb*=2 ) {
int kb = jb*2;
int npages = magma_ceildiv( nmax, kb );
dim3 threads( (jb <= 32 ? jb/4 : 16), 4 );
dim3 grid( jb/(threads.x*threads.y), npages*(jb/16), ibatch ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x
switch (jb) {
case 16:
hipLaunchKernelGGL(( triple_cgemm16_part1_upper_kernel_vbatched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n+i, dA_array+i, ldda+i, dinvA_array+i, jb, npages );
hipLaunchKernelGGL(( triple_cgemm16_part2_upper_kernel_vbatched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n+i, dA_array+i, ldda+i, dinvA_array+i, jb, npages );
break;
case 32:
hipLaunchKernelGGL(( triple_cgemm32_part1_upper_kernel_vbatched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n+i, dA_array+i, ldda+i, dinvA_array+i, jb, npages );
hipLaunchKernelGGL(( triple_cgemm32_part2_upper_kernel_vbatched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n+i, dA_array+i, ldda+i, dinvA_array+i, jb, npages );
break;
case 64:
hipLaunchKernelGGL(( triple_cgemm64_part1_upper_kernel_vbatched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n+i, dA_array+i, ldda+i, dinvA_array+i, jb, npages );
hipLaunchKernelGGL(( triple_cgemm64_part2_upper_kernel_vbatched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n+i, dA_array+i, ldda+i, dinvA_array+i, jb, npages );
break;
default:
hipLaunchKernelGGL(( triple_cgemm_above64_part1_upper_kernel_vbatched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n+i, dA_array+i, ldda+i, dinvA_array+i, jb, npages );
hipLaunchKernelGGL(( triple_cgemm_above64_part2_upper_kernel_vbatched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n+i, dA_array+i, ldda+i, dinvA_array+i, jb, npages );
hipLaunchKernelGGL(( triple_cgemm_above64_part3_upper_kernel_vbatched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n+i, dA_array+i, ldda+i, dinvA_array+i, jb, npages );
break;
}
if ( kb >= nmax ) break;
}
}
}
// free allocated buffers
magma_free(mm);
magma_free(nn);
}
| dbf927e2ec187abdb1a8a994c8de01e4b6f739ba.cu | /*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@generated from magmablas/ztrtri_diag_vbatched.cu, normal z -> c, Thu Oct 8 23:05:38 2020
@author Peng Du
@author Tingxing Dong
@author Mark Gates
@author Azzam Haidar
File named ctrtri_diag.cu to avoid name conflict with src/ctrtri.o
in the library. The actual kernels are in ctrtri_lower.cu and ctrtri_upper.cu
*/
#include "magma_internal.h"
#define TRTRI_BATCHED
#include "ctrtri.cuh"
/***************************************************************************//**
Purpose
-------
ctrtri_diag inverts the NB x NB diagonal blocks of A.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, uplo specifies whether the matrix A is an upper or
lower triangular matrix as follows:
- = MagmaUpper: A is an upper triangular matrix.
- = MagmaLower: A is a lower triangular matrix.
@param[in]
diag magma_diag_t.
On entry, diag specifies whether or not A is unit triangular
as follows:
- = MagmaUnit: A is assumed to be unit triangular.
- = MagmaNonUnit: A is not assumed to be unit triangular.
@param[in]
nmax INTEGER.
maximum value of n.
@param[in]
n INTEGER array, dimension(batchCount)
On entry, each entry specifies the order of the corresponding matrix A. N >= 0.
@param[in]
dA_array COMPLEX array of dimension ( ldda, n )
The triangular matrix A.
\n
If UPLO = 'U', the leading N-by-N upper triangular part of A
contains the upper triangular matrix, and the strictly lower
triangular part of A is not referenced.
\n
If UPLO = 'L', the leading N-by-N lower triangular part of A
contains the lower triangular matrix, and the strictly upper
triangular part of A is not referenced.
\n
If DIAG = 'U', the diagonal elements of A are also not referenced
and are assumed to be 1.
@param[in]
ldda INTEGER.
The leading dimension of the array A. LDDA >= max(1,N).
@param[out]
dinvA_array COMPLEX array of dimension (NB, ceil(n/NB)*NB),
where NB = 128.
On exit, contains inverses of the NB-by-NB diagonal blocks of A.
@param[in]
resetozero INTEGER
If not zero, each array dinvA will be reset to all zeros
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_trtri_batched
*******************************************************************************/
extern "C" void
magmablas_ctrtri_diag_vbatched(
magma_uplo_t uplo, magma_diag_t diag, magma_int_t nmax, magma_int_t *n,
magmaFloatComplex const * const *dA_array, magma_int_t *ldda,
magmaFloatComplex **dinvA_array,
magma_int_t resetozero, magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t info = 0;
if (uplo != MagmaLower && uplo != MagmaUpper)
info = -1;
else if (diag != MagmaNonUnit && diag != MagmaUnit)
info = -2;
else if (nmax < 0)
info = -3;
//else if (ldda < n)
// info = -5;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info
}
// allocate temp buffers for dimensions
magma_int_t *mm, *nn;
magma_imalloc( &mm, batchCount );
magma_imalloc( &nn, batchCount );
int nblocks = magma_ceildiv( nmax, IB );
if ( resetozero ) {
// roundup dimensions in 'n' and write it to 'mm' : magma_roundup( n, NB )
magma_ivec_roundup( batchCount, n, NB, mm, queue);
// set vector 'nn' to NB
magma_ivec_setc( batchCount, nn, NB, queue);
magma_int_t max_m = magma_roundup( nmax, NB );
magma_int_t max_n = NB;
//magmablas_claset_batched (MagmaFull, magma_roundup( n, NB ), NB, MAGMA_C_ZERO, MAGMA_C_ZERO, dinvA_array, magma_roundup( n, NB ), batchCount, queue);
magmablas_claset_vbatched(MagmaFull, max_m, max_n, mm, nn, MAGMA_C_ZERO, MAGMA_C_ZERO, dinvA_array, mm, batchCount, queue);
}
// if someone want to use cudamemset he need to set the whole vectors
// of initial size otherwise it is a bug and thus need to have dinvA_length
// in input parameter and has been tested and was slower.
//was not the largest size computed by the high API getrf_batched then it is bug and need to use magmablas_claset_batched
magma_int_t max_batchCount = queue->get_maxBatch();
for(magma_int_t i = 0; i < batchCount; i+=max_batchCount) {
magma_int_t ibatch = min(max_batchCount, batchCount-i);
if ( uplo == MagmaLower ) {
// invert diagonal IB x IB inner blocks
dim3 diaggrid( nblocks, 1, ibatch ); // emulate 3D grid
ctrtri_diag_lower_kernel_vbatched<<< diaggrid, IB, 0, queue->cuda_stream() >>>( diag, n+i, dA_array+i, ldda+i, dinvA_array+i );
// build up NB x NB blocks (assuming IB=16 here):
// use 16 x 16 blocks to build 32 x 32 blocks, 1 x (1 x npages) grid, 4 x 4 threads;
// then 32 x 32 blocks to build 64 x 64 blocks, 1 x (2 x npages) grid, 8 x 4 threads;
// then 64 x 64 blocks to build 128 x 128 blocks, 1 x (4 x npages) grid, 16 x 4 threads;
// then 128 x 128 blocks to build 256 x 256 blocks, 2 x (8 x npages) grid, 16 x 4 threads.
for( int jb=IB; jb < NB; jb *= 2 ) {
int kb = jb*2;
int npages = magma_ceildiv( nmax, kb );
dim3 threads( (jb <= 32 ? jb/4 : 16), 4 );
dim3 grid( jb/(threads.x*threads.y), npages*(jb/16), ibatch ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x
//printf( "n %d, jb %d, grid %d x %d (%d x %d)\n", n, jb, grid.x, grid.y, grid.y / npages, npages );
switch (jb) {
case 16:
triple_cgemm16_part1_lower_kernel_vbatched<<< grid, threads, 0, queue->cuda_stream() >>>( n+i, dA_array+i, ldda+i, dinvA_array+i, jb, npages );
triple_cgemm16_part2_lower_kernel_vbatched<<< grid, threads, 0, queue->cuda_stream() >>>( n+i, dA_array+i, ldda+i, dinvA_array+i, jb, npages );
break;
case 32:
triple_cgemm32_part1_lower_kernel_vbatched<<< grid, threads, 0, queue->cuda_stream() >>>( n+i, dA_array+i, ldda+i, dinvA_array+i, jb, npages );
triple_cgemm32_part2_lower_kernel_vbatched<<< grid, threads, 0, queue->cuda_stream() >>>( n+i, dA_array+i, ldda+i, dinvA_array+i, jb, npages );
break;
case 64:
triple_cgemm64_part1_lower_kernel_vbatched<<< grid, threads, 0, queue->cuda_stream() >>>( n+i, dA_array+i, ldda+i, dinvA_array+i, jb, npages );
triple_cgemm64_part2_lower_kernel_vbatched<<< grid, threads, 0, queue->cuda_stream() >>>( n+i, dA_array+i, ldda+i, dinvA_array+i, jb, npages );
break;
default:
triple_cgemm_above64_part1_lower_kernel_vbatched<<< grid, threads, 0, queue->cuda_stream() >>>( n+i, dA_array+i, ldda+i, dinvA_array+i, jb, npages );
triple_cgemm_above64_part2_lower_kernel_vbatched<<< grid, threads, 0, queue->cuda_stream() >>>( n+i, dA_array+i, ldda+i, dinvA_array+i, jb, npages );
triple_cgemm_above64_part3_lower_kernel_vbatched<<< grid, threads, 0, queue->cuda_stream() >>>( n+i, dA_array+i, ldda+i, dinvA_array+i, jb, npages );
break;
}
if ( kb >= nmax ) break;
}
}
else {
dim3 diaggrid( nblocks, 1, ibatch ); // emulate 3D grid
ctrtri_diag_upper_kernel_vbatched<<< diaggrid, IB, 0, queue->cuda_stream() >>>( diag, n+i, dA_array+i, ldda+i, dinvA_array+i );
// update the inverse up to the size of IB
for( int jb=IB; jb < NB; jb*=2 ) {
int kb = jb*2;
int npages = magma_ceildiv( nmax, kb );
dim3 threads( (jb <= 32 ? jb/4 : 16), 4 );
dim3 grid( jb/(threads.x*threads.y), npages*(jb/16), ibatch ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x
switch (jb) {
case 16:
triple_cgemm16_part1_upper_kernel_vbatched<<< grid, threads, 0, queue->cuda_stream() >>>( n+i, dA_array+i, ldda+i, dinvA_array+i, jb, npages );
triple_cgemm16_part2_upper_kernel_vbatched<<< grid, threads, 0, queue->cuda_stream() >>>( n+i, dA_array+i, ldda+i, dinvA_array+i, jb, npages );
break;
case 32:
triple_cgemm32_part1_upper_kernel_vbatched<<< grid, threads, 0, queue->cuda_stream() >>>( n+i, dA_array+i, ldda+i, dinvA_array+i, jb, npages );
triple_cgemm32_part2_upper_kernel_vbatched<<< grid, threads, 0, queue->cuda_stream() >>>( n+i, dA_array+i, ldda+i, dinvA_array+i, jb, npages );
break;
case 64:
triple_cgemm64_part1_upper_kernel_vbatched<<< grid, threads, 0, queue->cuda_stream() >>>( n+i, dA_array+i, ldda+i, dinvA_array+i, jb, npages );
triple_cgemm64_part2_upper_kernel_vbatched<<< grid, threads, 0, queue->cuda_stream() >>>( n+i, dA_array+i, ldda+i, dinvA_array+i, jb, npages );
break;
default:
triple_cgemm_above64_part1_upper_kernel_vbatched<<< grid, threads, 0, queue->cuda_stream() >>>( n+i, dA_array+i, ldda+i, dinvA_array+i, jb, npages );
triple_cgemm_above64_part2_upper_kernel_vbatched<<< grid, threads, 0, queue->cuda_stream() >>>( n+i, dA_array+i, ldda+i, dinvA_array+i, jb, npages );
triple_cgemm_above64_part3_upper_kernel_vbatched<<< grid, threads, 0, queue->cuda_stream() >>>( n+i, dA_array+i, ldda+i, dinvA_array+i, jb, npages );
break;
}
if ( kb >= nmax ) break;
}
}
}
// free allocated buffers
magma_free(mm);
magma_free(nn);
}
|
e952a241797e392ab594438ed8c44e05be13700f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define STB_IMAGE_IMPLEMENTATION
#include "stb/stb_image.h"
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "stb/stb_image_write.h"
#include <iostream>
#include <stdio.h>
__device__ int getMax(int value1, int value2)
{
return value1 > value2 ? value1 : value2;
}
__device__ int getMin(int value1, int value2)
{
return value1 < value2 ? value1 : value2;
}
__device__ int clamp(int value, int minValue, int maxValue)
{
return getMax( getMin( value, maxValue ), minValue );
}
__device__ int getPosition(int x, int y, int width, int margin, int pixelPosition)
{
return (x + (y * width)) * margin + pixelPosition;
}
__device__ void getMaxIndex(int array[], int size, int &maxValue, int &maxIndex)
{
maxValue = -1; maxIndex = -1;
for( int i = 0; i <= size; i++ )
{
if( maxValue < array[i] )
{
maxValue = array[i];
maxIndex = i;
}
}
}
__global__ void setEdgeDetection( unsigned char* output_img, const unsigned char* input_img, int width, int height, int nbBlocks )
{
int margin = 3;
int lengthY = (int)(height/nbBlocks)+1;
int startY = blockIdx.x * lengthY;
int endY = blockIdx.x * lengthY + lengthY;
if( endY > height )
endY = height;
int lengthX = (int)(width/blockDim.x)+1;
int startX = threadIdx.x * lengthX;
int endX = threadIdx.x * lengthX + lengthX;
if( endX > width )
endX = width;
float kernel[9] = {
1.0, 0.0, -1.0,
0.0, 0.0, 0.0,
-1.0, 0.0, 1.0
};
float kernelDiv = 1.0f;
for( int x = startX; x < endX; x++ )
{
for( int y = startY; y < endY; y++ )
{
int currentIndex = getPosition(x, y, width, margin, 0);
float countR = 0;
float countG = 0;
float countB = 0;
int n = 0;
for( int i = -1; i <= 1; i++ )
{
if( y+i < 0 || y+i >= height )
continue;
for( int j = -1; j <= 1; j++)
{
if( x+i < 0 || x+i >= width )
continue;
int currentIndex2 = getPosition(x+j, y+i, width, margin, 0);
countR += input_img[currentIndex2] / 255.0f * kernel[n];
countG += input_img[currentIndex2+1] / 255.0f * kernel[n];
countB += input_img[currentIndex2+2] / 255.0f * kernel[n];
n++;
}
}
countR *= kernelDiv;
countG *= kernelDiv;
countB *= kernelDiv;
output_img[currentIndex] = clamp(255 - countR * 255 * 20, 0, 255);
output_img[currentIndex+1] = clamp(255 - countG * 255 * 20, 0,255);
output_img[currentIndex+2] = clamp(255 - countB * 255 * 20, 0, 255);
}
}
}
__global__ void setOilFilter(unsigned char* output_img, const unsigned char* input_img, int width, int height, int radius, int intensity, int nbBlocks)
{
int margin = 3;
int lengthY = (int)(height/nbBlocks)+1;
int startY = blockIdx.x * lengthY;
int endY = blockIdx.x * lengthY + lengthY;
if( endY > height )
endY = height;
int lengthX = (int)(width/blockDim.x)+1;
int startX = threadIdx.x * lengthX;
int endX = threadIdx.x * lengthX + lengthX;
if( endX > width )
endX = width;
for( int x = startX; x < endX; x++ )
{
for( int y = startY; y < endY; y++)
{
int currentIndex = getPosition(x, y, width, margin, 0);
int intensityCount[255] = {0};
int intensityR[255] = {0};
int intensityG[255] = {0};
int intensityB[255] = {0};
for( int i = -radius; i <= radius; i++ )
{
if( y+i < 0 || y+i >= height )
continue;
for( int j = -radius; j <= radius; j++ )
{
if( x+j < 0 || x+j >= width )
continue;
int currentIndex2 = getPosition(x+j, y+i, width, margin, 0);
int R = input_img[currentIndex2];
int G = input_img[currentIndex2+1];
int B = input_img[currentIndex2+2];
int currentIntensity = (((R+G+B)/3.0)*intensity)/255.0;
intensityCount[currentIntensity]++;
intensityR[currentIntensity] += R;
intensityG[currentIntensity] += G;
intensityB[currentIntensity] += B;
}
}
int maxValue = 0; int maxIndex = 0;
getMaxIndex(intensityCount, intensity, maxValue, maxIndex);
output_img[currentIndex] = clamp(intensityR[maxIndex]/maxValue, 0, 255);
output_img[currentIndex+1] = clamp(intensityG[maxIndex]/maxValue, 0, 255);
output_img[currentIndex+2] = clamp(intensityB[maxIndex]/maxValue, 0, 255);
}
}
}
__global__ void addEffect( unsigned char* output_img, unsigned char* input_img, int width, int height, int nbBlocks)
{
int lengthY = (int)(height/nbBlocks)+1;
int startY = blockIdx.x * lengthY;
int endY = blockIdx.x * lengthY + lengthY;
if( endY > height )
endY = height;
int lengthX = (int)(width/blockDim.x)+1;
int startX = threadIdx.x * lengthX;
int endX = threadIdx.x * lengthX + lengthX;
if( endX > width )
endX = width;
for( int x = startX; x < endX; x++ )
{
for( int y = startY; y < endY; y++ )
{
int currentIndex = getPosition(x, y, width, 3, 0);
if( (input_img[currentIndex] + input_img[currentIndex+1] + input_img[currentIndex+2])/3 < 20)
{
output_img[currentIndex] = input_img[currentIndex];
output_img[currentIndex+1] = input_img[currentIndex+1];
output_img[currentIndex+2] = input_img[currentIndex+2];
for( int i = -4; i <= 4; i++ )
{
for( int j = -4; j <= 4; j++ )
{
if( x+i < 0 || x+i > width || y+j < 0 || y+j > height )
continue;
int neighbourIndex = getPosition( x+i, y+j, width, 3, 0);
if( neighbourIndex < 0 || neighbourIndex + 2 > width*height*3)
continue;
output_img[neighbourIndex] = 0;
output_img[neighbourIndex+1] = 0;
output_img[neighbourIndex+2] = 0;
}
}
}
}
}
}
int main()
{
int width, height, n;
unsigned char* sourceImg = stbi_load("Photos/01.jpg", &width, &height, &n, 3);
int nbBlocks = 13; int nbThreads = 1024;
unsigned char* inputImg, *inputImg2, *outputImg, *tmpOutput;
hipMalloc((void**) &inputImg, width * height * n * sizeof(unsigned char));
hipMemcpy(inputImg, sourceImg, width * height * n * sizeof(unsigned char), hipMemcpyHostToDevice);
hipMallocManaged(&outputImg, width * height * n * sizeof(unsigned char));
hipMallocManaged(&tmpOutput, width * height * n * sizeof(unsigned char));
// OIL FILTER
hipLaunchKernelGGL(( setOilFilter), dim3(nbBlocks),dim3(nbThreads), 0, 0, outputImg, inputImg, width, height, 10, 20, nbBlocks);
hipDeviceSynchronize();
hipMalloc((void**) &inputImg2, width * height * n * sizeof(unsigned char));
hipMemcpy(inputImg2, outputImg, width * height * n * sizeof(unsigned char), hipMemcpyDeviceToDevice);
// EDGE DETECTION
hipLaunchKernelGGL(( setEdgeDetection), dim3(nbBlocks),dim3(nbThreads), 0, 0, tmpOutput, inputImg2, width, height, nbBlocks);
hipDeviceSynchronize();
// FUSION
hipLaunchKernelGGL(( addEffect), dim3(nbBlocks),dim3(nbThreads), 0, 0, outputImg, tmpOutput, width, height, nbBlocks);
hipDeviceSynchronize();
stbi_write_png("exempleCuda.png", width, height, n, outputImg, n*width);
hipFree(inputImg2);
hipFree(tmpOutput);
hipFree(outputImg);
hipFree(inputImg);
return 0;
}
| e952a241797e392ab594438ed8c44e05be13700f.cu | #define STB_IMAGE_IMPLEMENTATION
#include "stb/stb_image.h"
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "stb/stb_image_write.h"
#include <iostream>
#include <stdio.h>
__device__ int getMax(int value1, int value2)
{
return value1 > value2 ? value1 : value2;
}
__device__ int getMin(int value1, int value2)
{
return value1 < value2 ? value1 : value2;
}
__device__ int clamp(int value, int minValue, int maxValue)
{
return getMax( getMin( value, maxValue ), minValue );
}
__device__ int getPosition(int x, int y, int width, int margin, int pixelPosition)
{
return (x + (y * width)) * margin + pixelPosition;
}
__device__ void getMaxIndex(int array[], int size, int &maxValue, int &maxIndex)
{
maxValue = -1; maxIndex = -1;
for( int i = 0; i <= size; i++ )
{
if( maxValue < array[i] )
{
maxValue = array[i];
maxIndex = i;
}
}
}
__global__ void setEdgeDetection( unsigned char* output_img, const unsigned char* input_img, int width, int height, int nbBlocks )
{
int margin = 3;
int lengthY = (int)(height/nbBlocks)+1;
int startY = blockIdx.x * lengthY;
int endY = blockIdx.x * lengthY + lengthY;
if( endY > height )
endY = height;
int lengthX = (int)(width/blockDim.x)+1;
int startX = threadIdx.x * lengthX;
int endX = threadIdx.x * lengthX + lengthX;
if( endX > width )
endX = width;
float kernel[9] = {
1.0, 0.0, -1.0,
0.0, 0.0, 0.0,
-1.0, 0.0, 1.0
};
float kernelDiv = 1.0f;
for( int x = startX; x < endX; x++ )
{
for( int y = startY; y < endY; y++ )
{
int currentIndex = getPosition(x, y, width, margin, 0);
float countR = 0;
float countG = 0;
float countB = 0;
int n = 0;
for( int i = -1; i <= 1; i++ )
{
if( y+i < 0 || y+i >= height )
continue;
for( int j = -1; j <= 1; j++)
{
if( x+i < 0 || x+i >= width )
continue;
int currentIndex2 = getPosition(x+j, y+i, width, margin, 0);
countR += input_img[currentIndex2] / 255.0f * kernel[n];
countG += input_img[currentIndex2+1] / 255.0f * kernel[n];
countB += input_img[currentIndex2+2] / 255.0f * kernel[n];
n++;
}
}
countR *= kernelDiv;
countG *= kernelDiv;
countB *= kernelDiv;
output_img[currentIndex] = clamp(255 - countR * 255 * 20, 0, 255);
output_img[currentIndex+1] = clamp(255 - countG * 255 * 20, 0,255);
output_img[currentIndex+2] = clamp(255 - countB * 255 * 20, 0, 255);
}
}
}
__global__ void setOilFilter(unsigned char* output_img, const unsigned char* input_img, int width, int height, int radius, int intensity, int nbBlocks)
{
int margin = 3;
int lengthY = (int)(height/nbBlocks)+1;
int startY = blockIdx.x * lengthY;
int endY = blockIdx.x * lengthY + lengthY;
if( endY > height )
endY = height;
int lengthX = (int)(width/blockDim.x)+1;
int startX = threadIdx.x * lengthX;
int endX = threadIdx.x * lengthX + lengthX;
if( endX > width )
endX = width;
for( int x = startX; x < endX; x++ )
{
for( int y = startY; y < endY; y++)
{
int currentIndex = getPosition(x, y, width, margin, 0);
int intensityCount[255] = {0};
int intensityR[255] = {0};
int intensityG[255] = {0};
int intensityB[255] = {0};
for( int i = -radius; i <= radius; i++ )
{
if( y+i < 0 || y+i >= height )
continue;
for( int j = -radius; j <= radius; j++ )
{
if( x+j < 0 || x+j >= width )
continue;
int currentIndex2 = getPosition(x+j, y+i, width, margin, 0);
int R = input_img[currentIndex2];
int G = input_img[currentIndex2+1];
int B = input_img[currentIndex2+2];
int currentIntensity = (((R+G+B)/3.0)*intensity)/255.0;
intensityCount[currentIntensity]++;
intensityR[currentIntensity] += R;
intensityG[currentIntensity] += G;
intensityB[currentIntensity] += B;
}
}
int maxValue = 0; int maxIndex = 0;
getMaxIndex(intensityCount, intensity, maxValue, maxIndex);
output_img[currentIndex] = clamp(intensityR[maxIndex]/maxValue, 0, 255);
output_img[currentIndex+1] = clamp(intensityG[maxIndex]/maxValue, 0, 255);
output_img[currentIndex+2] = clamp(intensityB[maxIndex]/maxValue, 0, 255);
}
}
}
__global__ void addEffect( unsigned char* output_img, unsigned char* input_img, int width, int height, int nbBlocks)
{
int lengthY = (int)(height/nbBlocks)+1;
int startY = blockIdx.x * lengthY;
int endY = blockIdx.x * lengthY + lengthY;
if( endY > height )
endY = height;
int lengthX = (int)(width/blockDim.x)+1;
int startX = threadIdx.x * lengthX;
int endX = threadIdx.x * lengthX + lengthX;
if( endX > width )
endX = width;
for( int x = startX; x < endX; x++ )
{
for( int y = startY; y < endY; y++ )
{
int currentIndex = getPosition(x, y, width, 3, 0);
if( (input_img[currentIndex] + input_img[currentIndex+1] + input_img[currentIndex+2])/3 < 20)
{
output_img[currentIndex] = input_img[currentIndex];
output_img[currentIndex+1] = input_img[currentIndex+1];
output_img[currentIndex+2] = input_img[currentIndex+2];
for( int i = -4; i <= 4; i++ )
{
for( int j = -4; j <= 4; j++ )
{
if( x+i < 0 || x+i > width || y+j < 0 || y+j > height )
continue;
int neighbourIndex = getPosition( x+i, y+j, width, 3, 0);
if( neighbourIndex < 0 || neighbourIndex + 2 > width*height*3)
continue;
output_img[neighbourIndex] = 0;
output_img[neighbourIndex+1] = 0;
output_img[neighbourIndex+2] = 0;
}
}
}
}
}
}
int main()
{
int width, height, n;
unsigned char* sourceImg = stbi_load("Photos/01.jpg", &width, &height, &n, 3);
int nbBlocks = 13; int nbThreads = 1024;
unsigned char* inputImg, *inputImg2, *outputImg, *tmpOutput;
cudaMalloc((void**) &inputImg, width * height * n * sizeof(unsigned char));
cudaMemcpy(inputImg, sourceImg, width * height * n * sizeof(unsigned char), cudaMemcpyHostToDevice);
cudaMallocManaged(&outputImg, width * height * n * sizeof(unsigned char));
cudaMallocManaged(&tmpOutput, width * height * n * sizeof(unsigned char));
// OIL FILTER
setOilFilter<<<nbBlocks,nbThreads>>>(outputImg, inputImg, width, height, 10, 20, nbBlocks);
cudaDeviceSynchronize();
cudaMalloc((void**) &inputImg2, width * height * n * sizeof(unsigned char));
cudaMemcpy(inputImg2, outputImg, width * height * n * sizeof(unsigned char), cudaMemcpyDeviceToDevice);
// EDGE DETECTION
setEdgeDetection<<<nbBlocks,nbThreads>>>(tmpOutput, inputImg2, width, height, nbBlocks);
cudaDeviceSynchronize();
// FUSION
addEffect<<<nbBlocks,nbThreads>>>(outputImg, tmpOutput, width, height, nbBlocks);
cudaDeviceSynchronize();
stbi_write_png("exempleCuda.png", width, height, n, outputImg, n*width);
cudaFree(inputImg2);
cudaFree(tmpOutput);
cudaFree(outputImg);
cudaFree(inputImg);
return 0;
}
|
db118859e8bd72edd15a2f0f7ea4c055bc8fdfd5.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <cstdlib>
#include <cfloat>
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include <rocblas.h>
#include <cusolverDn.h>
#include "struct.h"
#include "constants.h"
void assignObjfcnStructMemory(long long &, fcndata &, double *);
void objfcn(double *, double *, fcndata &);
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[])
{
mxInitGPU();
fcndata fcnObj = {0};
mxGPUArray const *ctlGrpStk;
mxGPUArray const *cenIniMat, *difIniMat, *grpNdeVec, *wgtGrpVec, *tgtNdeMat;
ctlGrpStk = mxGPUCreateFromMxArray(prhs[ 0]);
cenIniMat = mxGPUCreateFromMxArray(prhs[ 1]);
difIniMat = mxGPUCreateFromMxArray(prhs[ 2]);
grpNdeVec = mxGPUCreateFromMxArray(prhs[ 3]);
wgtGrpVec = mxGPUCreateFromMxArray(prhs[ 4]);
tgtNdeMat = mxGPUCreateFromMxArray(prhs[ 5]);
fcnObj.prm.knlOrder = mxGetScalar(prhs[ 6]);
fcnObj.prm.knlWidth = mxGetScalar(prhs[ 7]);
fcnObj.prm.knlEps = mxGetScalar(prhs[ 8]);
fcnObj.prm.timeStp = mxGetScalar(prhs[ 9]);
fcnObj.prm.timeNum = mxGetScalar(prhs[10]);
fcnObj.prm.tgtWgt = mxGetScalar(prhs[11]);
// ---
double *d_ctlGrpStk = (double *) mxGPUGetDataReadOnly(ctlGrpStk);
fcnObj.prm.d_cenIniMat = (double *) mxGPUGetDataReadOnly(cenIniMat);
fcnObj.prm.d_difIniMat = (double *) mxGPUGetDataReadOnly(difIniMat);
fcnObj.prm.d_grpNdeVec = (int *) mxGPUGetDataReadOnly(grpNdeVec);
fcnObj.prm.d_wgtGrpVec = (double *) mxGPUGetDataReadOnly(wgtGrpVec);
fcnObj.tgt.d_tgtNdeMat = (double *) mxGPUGetDataReadOnly(tgtNdeMat);
fcnObj.prm.rgdGrpNum = mxGPUGetNumberOfElements(wgtGrpVec);
fcnObj.prm.rgdNdeNum = mxGPUGetNumberOfElements(grpNdeVec);
// ---
int rgdGrpNum = fcnObj.prm.rgdGrpNum;
int rgdNdeNum = fcnObj.prm.rgdNdeNum;
int timeNum = fcnObj.prm.timeNum;
long long gpuAloDblMemCnt = rgdGrpNum * (timeNum - 1)
+ rgdNdeNum * ( rgdNdeNum * 2 + DIMNUM + DIMNUM * timeNum + DIMNUM * (timeNum - 1) * 2
+ RGDDOF * (timeNum - 1) + RGDDOF * timeNum)
+ SUMBLKDIM;
double *gpuDblSpace;
hipError_t error = hipMalloc((void **) &gpuDblSpace, sizeof(double) * gpuAloDblMemCnt);
if ( error != hipSuccess )
mexErrMsgIdAndTxt("objfcn2D:hipMalloc", "Fail to allocate device memory.");
hipMalloc((void **) &(fcnObj.d_status), sizeof(int));
long long gpuAsgDblMemCnt;
assignObjfcnStructMemory(gpuAsgDblMemCnt, fcnObj, gpuDblSpace);
if ( gpuAsgDblMemCnt != gpuAloDblMemCnt )
{
mexErrMsgIdAndTxt("objfcn2D:memAssign",
"Assigned device double memory (%lld) mismatches the allocated memory (%lld).",
gpuAsgDblMemCnt, gpuAloDblMemCnt);
}
// ---
hipblasCreate(&(fcnObj.blasHdl));
hipsolverDnCreate(&(fcnObj.solvHdl));
hipsolverDnDpotrf_bufferSize(fcnObj.solvHdl, HIPBLAS_FILL_MODE_LOWER,
fcnObj.prm.rgdNdeNum, fcnObj.d_rgdKnlMat,
fcnObj.prm.rgdNdeNum, &(fcnObj.h_Lwork));
hipMalloc((void **) &(fcnObj.d_workspace), sizeof(double) * fcnObj.h_Lwork);
// ---
double h_objVal;
objfcn(&h_objVal, d_ctlGrpStk, fcnObj);
plhs[0] = mxCreateDoubleScalar(h_objVal);
// ---
//
mxGPUDestroyGPUArray(ctlGrpStk);
mxGPUDestroyGPUArray(cenIniMat);
mxGPUDestroyGPUArray(difIniMat);
mxGPUDestroyGPUArray(grpNdeVec);
mxGPUDestroyGPUArray(wgtGrpVec);
mxGPUDestroyGPUArray(tgtNdeMat);
hipFree(gpuDblSpace);
hipFree(fcnObj.d_status);
hipFree(fcnObj.d_workspace);
hipblasDestroy(fcnObj.blasHdl);
hipsolverDnDestroy(fcnObj.solvHdl);
return;
}
| db118859e8bd72edd15a2f0f7ea4c055bc8fdfd5.cu | #include <cstdio>
#include <cstdlib>
#include <cfloat>
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include <cublas_v2.h>
#include <cusolverDn.h>
#include "struct.h"
#include "constants.h"
void assignObjfcnStructMemory(long long &, fcndata &, double *);
void objfcn(double *, double *, fcndata &);
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[])
{
mxInitGPU();
fcndata fcnObj = {0};
mxGPUArray const *ctlGrpStk;
mxGPUArray const *cenIniMat, *difIniMat, *grpNdeVec, *wgtGrpVec, *tgtNdeMat;
ctlGrpStk = mxGPUCreateFromMxArray(prhs[ 0]);
cenIniMat = mxGPUCreateFromMxArray(prhs[ 1]);
difIniMat = mxGPUCreateFromMxArray(prhs[ 2]);
grpNdeVec = mxGPUCreateFromMxArray(prhs[ 3]);
wgtGrpVec = mxGPUCreateFromMxArray(prhs[ 4]);
tgtNdeMat = mxGPUCreateFromMxArray(prhs[ 5]);
fcnObj.prm.knlOrder = mxGetScalar(prhs[ 6]);
fcnObj.prm.knlWidth = mxGetScalar(prhs[ 7]);
fcnObj.prm.knlEps = mxGetScalar(prhs[ 8]);
fcnObj.prm.timeStp = mxGetScalar(prhs[ 9]);
fcnObj.prm.timeNum = mxGetScalar(prhs[10]);
fcnObj.prm.tgtWgt = mxGetScalar(prhs[11]);
// ---
double *d_ctlGrpStk = (double *) mxGPUGetDataReadOnly(ctlGrpStk);
fcnObj.prm.d_cenIniMat = (double *) mxGPUGetDataReadOnly(cenIniMat);
fcnObj.prm.d_difIniMat = (double *) mxGPUGetDataReadOnly(difIniMat);
fcnObj.prm.d_grpNdeVec = (int *) mxGPUGetDataReadOnly(grpNdeVec);
fcnObj.prm.d_wgtGrpVec = (double *) mxGPUGetDataReadOnly(wgtGrpVec);
fcnObj.tgt.d_tgtNdeMat = (double *) mxGPUGetDataReadOnly(tgtNdeMat);
fcnObj.prm.rgdGrpNum = mxGPUGetNumberOfElements(wgtGrpVec);
fcnObj.prm.rgdNdeNum = mxGPUGetNumberOfElements(grpNdeVec);
// ---
int rgdGrpNum = fcnObj.prm.rgdGrpNum;
int rgdNdeNum = fcnObj.prm.rgdNdeNum;
int timeNum = fcnObj.prm.timeNum;
long long gpuAloDblMemCnt = rgdGrpNum * (timeNum - 1)
+ rgdNdeNum * ( rgdNdeNum * 2 + DIMNUM + DIMNUM * timeNum + DIMNUM * (timeNum - 1) * 2
+ RGDDOF * (timeNum - 1) + RGDDOF * timeNum)
+ SUMBLKDIM;
double *gpuDblSpace;
cudaError_t error = cudaMalloc((void **) &gpuDblSpace, sizeof(double) * gpuAloDblMemCnt);
if ( error != cudaSuccess )
mexErrMsgIdAndTxt("objfcn2D:cudaMalloc", "Fail to allocate device memory.");
cudaMalloc((void **) &(fcnObj.d_status), sizeof(int));
long long gpuAsgDblMemCnt;
assignObjfcnStructMemory(gpuAsgDblMemCnt, fcnObj, gpuDblSpace);
if ( gpuAsgDblMemCnt != gpuAloDblMemCnt )
{
mexErrMsgIdAndTxt("objfcn2D:memAssign",
"Assigned device double memory (%lld) mismatches the allocated memory (%lld).",
gpuAsgDblMemCnt, gpuAloDblMemCnt);
}
// ---
cublasCreate(&(fcnObj.blasHdl));
cusolverDnCreate(&(fcnObj.solvHdl));
cusolverDnDpotrf_bufferSize(fcnObj.solvHdl, CUBLAS_FILL_MODE_LOWER,
fcnObj.prm.rgdNdeNum, fcnObj.d_rgdKnlMat,
fcnObj.prm.rgdNdeNum, &(fcnObj.h_Lwork));
cudaMalloc((void **) &(fcnObj.d_workspace), sizeof(double) * fcnObj.h_Lwork);
// ---
double h_objVal;
objfcn(&h_objVal, d_ctlGrpStk, fcnObj);
plhs[0] = mxCreateDoubleScalar(h_objVal);
// ---
//
mxGPUDestroyGPUArray(ctlGrpStk);
mxGPUDestroyGPUArray(cenIniMat);
mxGPUDestroyGPUArray(difIniMat);
mxGPUDestroyGPUArray(grpNdeVec);
mxGPUDestroyGPUArray(wgtGrpVec);
mxGPUDestroyGPUArray(tgtNdeMat);
cudaFree(gpuDblSpace);
cudaFree(fcnObj.d_status);
cudaFree(fcnObj.d_workspace);
cublasDestroy(fcnObj.blasHdl);
cusolverDnDestroy(fcnObj.solvHdl);
return;
}
|
cd6509f662fbfc07a248645ff14076abfdd0baea.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// a simple CUDA kernel to add two vectors
extern "C"
{
__global__ void vadd(const float *a, const float *b, float *c)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
c[i] = a[i] + b[i];
}
} // extern "C"
| cd6509f662fbfc07a248645ff14076abfdd0baea.cu | // a simple CUDA kernel to add two vectors
extern "C"
{
__global__ void vadd(const float *a, const float *b, float *c)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
c[i] = a[i] + b[i];
}
} // extern "C"
|
b75d4108331e92719b7b141bfbdb816d307f3825.hip | // !!! This is a file automatically generated by hipify!!!
/* -*- mode: C++; c-file-style: "bsd"; c-basic-offset: 2; indent-tabs-mode: nil -*- */
#include "buildKKRMatrix.hpp"
#include <stdio.h>
#include "Complex.hpp"
#include "Matrix.hpp"
#include <vector>
#include "Accelerator/DeviceStorage.hpp"
#include <hip/hip_runtime.h>
#include <hip/hip_complex.h>
#include "cuComplexOperators.hpp"
#include <rocblas.h>
#include <cusolverDn.h>
#include "linearSolvers.hpp"
// we might want to distinguish between systems where all lmax (and consequently kkrsz_ns) are the same
// and systems with potential different lmax on different atoms and l steps
// #define COMPARE_ORIGINAL 1
// Fortran layout for matrix
// #define IDX(i, j, lDim) (((j)*(lDim))+(i))
#define IDX3(i, j, k, lDim, mDim) (((k)*(lDim)*(mDim)) + ((j)*(lDim)) + (i))
__device__
__inline__
void calculateHankelCuda(hipDoubleComplex prel, double r, int lend, hipDoubleComplex *ilp1, hipDoubleComplex *hfn)
{
if(threadIdx.x == 0)
{
const hipDoubleComplex sqrtm1 = make_cuDoubleComplex(0.0, 1.0);
hipDoubleComplex z = prel * make_cuDoubleComplex(r,0.0);
hfn[0] = make_cuDoubleComplex(0.0, -1.0); //-sqrtm1;
hfn[1] = -1.0 - sqrtm1/z;
for(int l=1; l<lend; l++)
{
hfn[l+1] = ((2.0*l+1.0) * hfn[l]/z) - hfn[l-1];
}
// l+1
// hfn = -i *h (k*R )*sqrt(E)
// l ij
z = exp(sqrtm1*z)/r;
for(int l=0; l<=lend; l++)
{
hfn[l] = ((-hfn[l]) * z) * ilp1[l];
}
}
// __syncthreads();
}
__device__
__inline__
void calculateSinCosPowersCuda(Real *rij, int lend, Real *sinmp, Real *cosmp)
{
const Real ptol = 1.0e-6;
Real pmag = std::sqrt(rij[0]*rij[0]+rij[1]*rij[1]);
cosmp[0] = 1.0;
sinmp[0] = 0.0;
if(pmag>ptol)
{
cosmp[1] = rij[0]/pmag;
sinmp[1] = rij[1]/pmag;
} else {
cosmp[1] = 0.0;
sinmp[1] = 0.0;
}
for(int m=2; m<=lend; m++)
{
cosmp[m] = cosmp[m-1]*cosmp[1] - sinmp[m-1]*sinmp[1];
sinmp[m] = sinmp[m-1]*cosmp[1] + cosmp[m-1]*sinmp[1];
}
}
// __device__ __inline__ int plmIdxDev(int l, int m)
// { return l*(l+1)/2+m; }
#define PLM_IDX(l,m) (((l)*((l)+1))/2 + (m))
__device__
__inline__
void associatedLegendreFunctionNormalizedCuda(Real x, int lmax, Real *Plm)
{
const Real pi = std::acos(-1.0);
// y = \sqrt{1-x^2}
Real y = std::sqrt(1.0-x*x);
// initialize the first entry
// Plm[0]=std::sqrt(R(1)/(R(2)*pi));
Plm[0]=std::sqrt(1.0/(4.0*pi));
if(lmax<1) return;
for(int m=1; m<=lmax; m++)
{
// \bar{P}_{mm} = - \sqrt{\frac{2m+1}{2m}} y \bar{P}_{m-1, m-1}
Plm[PLM_IDX(m,m)] = - std::sqrt(Real(2*m+1)/Real(2*m)) * y * Plm[PLM_IDX(m-1,m-1)];
// \bar{P}_{mm-1} = \sqrt{2 m + 1} x \bar{P}_{m-1, m-1}
Plm[PLM_IDX(m,m-1)] = std::sqrt(Real(2*m+1)) * x * Plm[PLM_IDX(m-1,m-1)];
}
for(int m=0; m<lmax; m++)
{
for(int l=m+2; l<=lmax; l++)
{
// \bar{P}_{lm} = a_{lm} (x \bar{P}_{l-1. m} - b_{lm} \bar{P}_{l-2, m})
// a_{lm} = \sqrt{\frac{(4 l^2 - 1)(l^2 - m^2)}}
// b_{lm} = \sqrt{\frac{(l -1)^2 - m^2}{4 (l-1)^2 -1}}
Real a_lm = std::sqrt(Real(4*l*l-1)/Real(l*l - m*m));
Real b_lm = std::sqrt(Real((l-1)*(l-1) - m*m)/Real(4*(l-1)*(l-1)-1));
Plm[PLM_IDX(l,m)] = a_lm * (x * Plm[PLM_IDX(l-1,m)] - b_lm * Plm[PLM_IDX(l-2,m)]);
}
}
}
__device__
__inline__
hipDoubleComplex dlmFunction(hipDoubleComplex *hfn, double *cosmp, double *sinmp, double *plm, int l, int m)
{
int mAbs = abs(m);
hipDoubleComplex dlm = hfn[l]*plm[PLM_IDX(l,mAbs)];
if(m==0) return dlm;
if(m<0)
{
dlm = dlm * make_cuDoubleComplex(cosmp[mAbs],sinmp[mAbs]);
if(mAbs & 0x01 != 0) // m is odd
dlm = -dlm;
} else {
dlm = dlm * make_cuDoubleComplex(cosmp[mAbs],-sinmp[mAbs]);
}
return dlm;
}
size_t sharedMemoryBGijCuda(LSMSSystemParameters &lsms, size_t *hfnOffset, size_t *sinmpOffset, size_t *cosmpOffset,
size_t *plmOffset, size_t *dlmOffset)
{
size_t size = 0;
*hfnOffset = size;
size += sizeof(hipDoubleComplex) * (2*lsms.maxlmax + 1);
*sinmpOffset = size;
size += sizeof(double) * (2*lsms.maxlmax + 1);
*cosmpOffset = size;
size += sizeof(double) * (2*lsms.maxlmax + 1);
*plmOffset = size;
size += sizeof(double) * (lsms.angularMomentumIndices.ndlm);
// *dlmOffset = size;
// size += sizeof(hipDoubleComplex) * (lsms.angularMomentumIndices.ndlj);
return size;
}
__global__
void setBGijCuda(bool fullRelativity, int n_spin_cant, int *LIZlmax,
int *offsets, size_t nrmat_ns, hipDoubleComplex *devBgij)
{
if(n_spin_cant == 1) return;
int ir1 = blockIdx.x;
int ir2 = blockIdx.y;
int iOffset = offsets[ir1];
int jOffset = offsets[ir2];
int kkri=(LIZlmax[ir1]+1)*(LIZlmax[ir1]+1);
int kkrj=(LIZlmax[ir2]+1)*(LIZlmax[ir2]+1);
if(!fullRelativity) //(lsms.relativity != full)
{
for(int ij=threadIdx.x; ij < kkri*kkrj; ij += blockDim.x)
{
int i = ij % kkri;
int j = ij / kkri;
/*
for(int i=0; i<kkri; i++)
for(int j=0; j<kkrj; j++)
{
*/
devBgij[IDX(iOffset + kkri + i, jOffset + j, nrmat_ns)] = make_cuDoubleComplex(0.0, 0.0); // bgij(iOffset + i, jOffset + j);
devBgij[IDX(iOffset + i, jOffset + kkrj + j, nrmat_ns)] = make_cuDoubleComplex(0.0, 0.0); // bgij(iOffset + i, jOqffset + j);
devBgij[IDX(iOffset + kkri + i, jOffset + kkrj + j, nrmat_ns)] = devBgij[IDX(iOffset + i, jOffset + j, nrmat_ns)];
}
} else {
/*
call relmtrx(gij,bgij,kkr1,kkr2)
fac=psq/ce
do i=1,kkr1_ns
do j=1,kkr2_ns
bgij(i,j)=fac*bgij(i,j)
end do
end do
*/
printf("Fully relativistic calculation not yet implemented in 'MultipleScattering/buildKKRMatrix.cpp : setBGijCPU'\n");
// exit(1);
}
}
__global__
void buildGijCudaKernel(Real *LIZPos, int *LIZlmax, int *lofk, int *mofk, hipDoubleComplex *ilp1, hipDoubleComplex *illp, Real *cgnt,
int ndlj_illp, int lmaxp1_cgnt, int ndlj_cgnt,
size_t hfnOffset, size_t sinmpOffset, size_t cosmpOffset, size_t plmOffset, size_t dlmOffset,
#if !defined(COMPARE_ORIGINAL)
hipDoubleComplex energy, hipDoubleComplex prel, int *offsets, size_t nrmat_ns, hipDoubleComplex *devBgij)
#else
hipDoubleComplex energy, hipDoubleComplex prel, int *offsets, size_t nrmat_ns, hipDoubleComplex *devBgij, char *testSM)
#endif
// void buildBGijCPU(LSMSSystemParameters &lsms, AtomData &atom, int ir1, int ir2, Real *rij,
// Complex energy, Complex prel, int iOffset, int jOffset, Matrix<Complex> &bgij)
{
int ir1 = blockIdx.x;
int ir2 = blockIdx.y;
extern char __shared__ sharedMemory[];
if(ir1 != ir2)
{
int iOffset = offsets[ir1];
// int iOffset = ir1 * kkrsz_ns;
int jOffset = offsets[ir2];
// int jOffset = ir2 * kkrsz_ns;
Real rij[3];
rij[0] = LIZPos[3*ir1 + 0] - LIZPos[3*ir2 + 0];
rij[1] = LIZPos[3*ir1 + 1] - LIZPos[3*ir2 + 1];
rij[2] = LIZPos[3*ir1 + 2] - LIZPos[3*ir2 + 2];
// Complex hfn[2*lsms.maxlmax + 1];
hipDoubleComplex *hfn = (hipDoubleComplex *) (sharedMemory + hfnOffset);
// Real sinmp[2*lsms.maxlmax + 1];
Real *sinmp = (Real *) (sharedMemory + sinmpOffset);
// Real cosmp[2*lsms.maxlmax + 1];
Real *cosmp = (Real *) (sharedMemory + cosmpOffset);
// Real plm[lsms.angularMomentumIndices.ndlm];
Real *plm = (Real *) (sharedMemory + plmOffset);
// Complex dlm[lsms.angularMomentumIndices.ndlj];
// hipDoubleComplex *dlm = (hipDoubleComplex *) (sharedMemory + dlmOffset);
#if defined(COMPARE_ORIGINAL)
hipDoubleComplex *testHfn = (hipDoubleComplex *) (testSM + hfnOffset);
Real *testSinmp = (Real *) (testSM + sinmpOffset);
Real *testCosmp = (Real *) (testSM + cosmpOffset);
Real *testPlm = (Real *) (testSM + plmOffset);
hipDoubleComplex *testDlm = (hipDoubleComplex *) (testSM + dlmOffset);
#endif
Real r = std::sqrt(rij[0]*rij[0] + rij[1]*rij[1] + rij[2]*rij[2]);
int lmax1 = LIZlmax[ir1];
int lmax2 = LIZlmax[ir2];
int kkri=(lmax1+1)*(lmax1+1);
int kkrj=(lmax2+1)*(lmax2+1);
int lend = lmax1 + lmax2;
Real pi4=4.0*2.0*std::asin(1.0);
Real cosTheta = rij[2]/r;
if(threadIdx.x == 0)
{
calculateHankelCuda(prel, r, lend, ilp1, hfn);
associatedLegendreFunctionNormalizedCuda(cosTheta, lend, plm);
// for associatedLegendreFunctionNormalized all clm[i] == 1.0
// for(int j=0;j<ndlm_local;j++)
// plm[j]=clm[j]*plm[j];
// calculate cos(phi) and sin(phi) .................................
// needs to be serial
calculateSinCosPowersCuda(rij, lend, sinmp, cosmp);
}
__syncthreads();
/*
// can be parallel
int j;
int ll;
// for(int l = threadIdx.x; l<=lend; l += blockDim.x)
if(threadIdx.x == 0)
{
for(int l = 0; l<=lend; l++)
{
// int ll = l*(l+1);
// j = ll;
// ll = ll/2;
j = l*(l+1);
ll = j/2;
double m1m = 1.0;
dlm[j] = hfn[l]*plm[ll];
for(int m=1; m<=l; m++)
{
m1m = -m1m;
hipDoubleComplex fac = plm[ll+m] * make_cuDoubleComplex(cosmp[m],sinmp[m]);
dlm[j-m] = hfn[l]*m1m*fac;
dlm[j+m] = hfn[l]*cuConj(fac);
}
}
}
__syncthreads();
*/
#if defined(COMPARE_ORIGINAL)
if(ir1 == 0 && ir2 == 1 && threadIdx.x == 0)
{
for(int l = 0; l<=lend; l++)
{
testHfn[l] = hfn[l];
testSinmp[l] = sinmp[l];
testCosmp[l] = cosmp[l];
}
}
#endif
// ================================================================
// calculate g(R_ij)...............................................
// for(int i=0; i<kkri*kkrj; i++) gij[i]=0.0;
// for(int i=0; i<kkri; i++)
// for(int j=0; j<kkrj; j++)
// for(int ij=0; ij < kkri*kkrj; ij++)
for(int ij=threadIdx.x; ij < kkri*kkrj; ij += blockDim.x)
{
int lm2 = ij % kkri;
int lm1 = ij / kkri;
devBgij[IDX(iOffset + lm2, jOffset + lm1, nrmat_ns)] = make_cuDoubleComplex(0.0, 0.0);
// bgij(iOffset + lm2, jOffset + lm1) = 0.0;
// }
// loop over l1,m1............................................
// for(int lm1=0; lm1<kkrj; lm1++)
// {
int l1=lofk[lm1];
int m1=mofk[lm1];
// loop over l2,m2..............................................
// for(int lm2=0; lm2<kkri; lm2++)
// {
int l2=lofk[lm2];
int m2=mofk[lm2];
// ==========================================================
// l2-l1
// illp(lm2,lm1) = i
//
// perform sum over l3 with gaunt # ......................
// ==========================================================
int m3=m2-m1;
int llow=max(abs(m3), abs(l1-l2));
if(cuCabs(prel)==0.0) llow=l1+l2;
for(int l3=l1+l2; l3>=llow; l3-=2)
{
int j=l3*(l3+1)+m3;
// gij[lm2+lm1*kkri] = gij[lm2+lm1*kkri]+cgnt(l3/2,lm1,lm2)*dlm[j];
devBgij[IDX(iOffset + lm2, jOffset + lm1, nrmat_ns)] = devBgij[IDX(iOffset + lm2, jOffset + lm1, nrmat_ns)]
+ cgnt[IDX3(l3/2,lm1,lm2,lmaxp1_cgnt,ndlj_cgnt)]
* dlmFunction(hfn, cosmp, sinmp, plm, l3, m3); //dlm[j];
}
// gij[lm2+lm1*kkri]=pi4*illp(lm2,lm1)*gij[lm2+lm1*kkri];
devBgij[IDX(iOffset + lm2, jOffset + lm1, nrmat_ns)] = devBgij[IDX(iOffset + lm2, jOffset + lm1, nrmat_ns)]
* pi4 * illp[IDX(lm2, lm1, ndlj_illp)];
}
}
}
__device__
void buildTmatNCuda(int ispin, int n_spin_pola, int n_spin_cant, int iie, int blkSizeTmatStore, int tmatStoreLDim,
int kkr1, int kkr2, int lizStoreIdx,
hipDoubleComplex *devTmatStore, int kkrsz_ns, hipDoubleComplex *tmat_n)
{
// Matrix<Complex> tmat_n(lsms.n_spin_cant*atom.kkrsz, lsms.n_spin_cant*atom.kkrsz);
if(threadIdx.x == 0)
{
int im=0;
if(n_spin_pola == n_spin_cant) // non polarized or spin canted
{
int kkrsz = kkrsz_ns/n_spin_cant;
for(int js=0; js<n_spin_cant; js++)
{
int jsm = kkrsz*kkrsz_ns*js;
for(int j=0; j<kkr1; j++)
{
for(int is=0; is<n_spin_cant; is++)
{
int jm=jsm+kkrsz_ns*j+kkrsz*is;
// int one=1;
// BLAS::zcopy_(&kkr1,&local.tmatStore(iie*local.blkSizeTmatStore+jm,atom.LIZStoreIdx[ir1]),&one,&tmat_n[im],&one);
for(int i=0; i<kkr1; i++)
{
tmat_n[im+i] = devTmatStore[IDX(iie*blkSizeTmatStore+jm+i, lizStoreIdx, tmatStoreLDim)];
}
im+=kkr1;
}
}
}
} else { // spin polarized colinear version for ispin
int kkrsz = kkrsz_ns/n_spin_cant;
// int ispin=0;
printf("warning: cant't test building kkrMatrix for collinear spin polarized yet!\n");
// exit(1);
int jsm = kkrsz*kkrsz*ispin; // copy spin up or down?
for(int j=0; j<kkr1; j++)
{
int jm=jsm+kkrsz_ns*j;
// int one=1;
// BLAS::zcopy_(&kkr1,&local.tmatStore(iie*local.blkSizeTmatStore+jm,atom.LIZStoreIdx[ir1]),&one,&tmat_n[im],&one);
for(int i=0; i<kkr1; i++)
{
tmat_n[im+i] = devTmatStore[IDX(iie*blkSizeTmatStore+jm+i, lizStoreIdx, tmatStoreLDim)];
}
im+=kkr1;
}
}
}
__syncthreads();
}
__global__
void buildKKRMatrixMultiplyKernelCuda(int *LIZlmax, int *LIZStoreIdx, int *offsets, int kkrsz_ns,
int ispin, int n_spin_pola, int n_spin_cant, int iie, int blkSizeTmatStore, int tmatStoreLDim,
hipDoubleComplex *devTmatStore, int nrmat_ns, hipDoubleComplex *devBgij, hipDoubleComplex *devM)
{
int ir1 = blockIdx.x;
int ir2 = blockIdx.y;
// extern hipDoubleComplex __shared__ *tmat_n;
hipDoubleComplex *tmat_n;
int iOffset = offsets[ir1];
int jOffset = offsets[ir2];
if(ir1 != ir2)
{
int lmax1 = LIZlmax[ir1];
int lmax2 = LIZlmax[ir2];
int kkr1=(lmax1+1)*(lmax1+1);
int kkr2=(lmax2+1)*(lmax2+1);
int kkr1_ns = kkr1 * n_spin_cant;
int kkr2_ns = kkr2 * n_spin_cant;
// BLAS::zgemm_("n", "n", &kkr1_ns, &kkr2_ns, &kkr1_ns, &cmone,
// &local.tmatStore(iie*local.blkSizeTmatStore, devAtom.LIZStoreIdx[ir1]), &kkr1_ns,
// // &tmat_n(0, 0), &kkr1_ns,
// &bgij(iOffset, jOffset), &nrmat_ns, &czero,
// // &bgijSmall(0, 0), &kkrsz_ns, &czero,
// &m(iOffset, jOffset), &nrmat_ns);
for(int j=0; j<kkr2_ns; j++)
{
}
// for(int i=0; i<kkr1_ns; i++)
// for(int j=0; j<kkr2_ns; j++)
// buildTmatNCuda(ispin, n_spin_pola, n_spin_cant, iie, blkSizeTmatStore, tmatStoreLDim,
// kkr1, kkr2, LIZStoreIdx[ir1], devTmatStore, kkrsz_ns, tmat_n);
tmat_n = &devTmatStore[IDX(iie*blkSizeTmatStore, LIZStoreIdx[ir1], tmatStoreLDim)];
for(int ij=threadIdx.x; ij < kkr1_ns*kkr2_ns; ij += blockDim.x)
{
int i = ij % kkr1_ns;
int j = ij / kkr1_ns;
devM[IDX(iOffset + i, jOffset + j, nrmat_ns)] = make_cuDoubleComplex(0.0,0.0);
for(int k=0; k<kkr1_ns ; k++)
devM[IDX(iOffset + i, jOffset + j, nrmat_ns)] = devM[IDX(iOffset + i, jOffset + j, nrmat_ns)] -
tmat_n[IDX(i,k,kkr1_ns)] * // tmat_n(i, k) * // local.tmatStore(iie*local.blkSizeTmatStore + , atom.LIZStoreIdx[ir1]) *
devBgij[IDX(iOffset + k, jOffset + j, nrmat_ns)];
}
}
}
void buildKKRMatrixLMaxIdenticalCuda(LSMSSystemParameters &lsms, LocalTypeInfo &local, AtomData &atom,
DeviceStorage &d, DeviceAtom &devAtom, int ispin,
int iie, Complex energy, Complex prel, Complex *devM)
{
hipblasHandle_t cublasHandle = DeviceStorage::getCublasHandle();
int nrmat_ns = lsms.n_spin_cant*atom.nrmat; // total size of the kkr matrix
int kkrsz_ns = lsms.n_spin_cant*atom.kkrsz; // size of t00 block
bool fullRelativity = false;
if(lsms.relativity == full) fullRelativity = true;
// Complex cmone = Complex(-1.0,0.0);
// Complex czero=0.0;
Complex *devBgij = d.getDevBGij();
// Matrix<Complex> bgijSmall(kkrsz_ns, kkrsz_ns);
hipDoubleComplex cuEnergy = make_cuDoubleComplex(energy.real(), energy.imag());
hipDoubleComplex cuPrel = make_cuDoubleComplex(prel.real(), prel.imag());
unitMatrixCuda<Complex>(devM, nrmat_ns, nrmat_ns);
zeroMatrixCuda(devBgij, nrmat_ns, nrmat_ns);
// calculate Bgij
// reuse ipvt for offsets
int *devOffsets = d.getDevIpvt();
std::vector<int> offsets(devAtom.numLIZ);
for(int ir = 0; ir < devAtom.numLIZ; ir++)
offsets[ir] = ir * kkrsz_ns;
hipMemcpy(devOffsets, &offsets[0], atom.numLIZ*sizeof(int), hipMemcpyHostToDevice);
size_t hfnOffset, sinmpOffset, cosmpOffset, plmOffset, dlmOffset;
size_t smSize = sharedMemoryBGijCuda(lsms, &hfnOffset, &sinmpOffset, &cosmpOffset,
&plmOffset, &dlmOffset);
#ifdef COMPARE_ORIGINAL
printf("smSize = %zu\n", smSize);
printf(" hfnOffset = %zu\n", hfnOffset);
printf(" sinmpOffset = %zu\n", sinmpOffset);
printf(" cosmpOffset = %zu\n", cosmpOffset);
printf(" plmOffset = %zu\n", plmOffset);
printf(" dlmOffset = %zu\n", dlmOffset);
char *devTestSM;
hipMalloc(&devTestSM, smSize);
{
// test
// Matrix<Real> testLIZPos(3,atom.numLIZ);
// Matrix<Complex> bgij(nrmat_ns, nrmat_ns);
Complex testIlp1[2*lsms.maxlmax + 1];
// hipMemcpy(&bgij[0], devBgij, nrmat_ns*nrmat_ns*sizeof(Complex), hipMemcpyDeviceToHost);
// hipMemcpy(&testLIZPos[0], devAtom.LIZPos, 3*atom.numLIZ*sizeof(Real), hipMemcpyDeviceToHost);
hipMemcpy(&testIlp1[0], DeviceConstants::ilp1, (2*lsms.maxlmax + 1)*sizeof(Complex), hipMemcpyDeviceToHost);
printf("in calculateTauMatrix: before buildGijCudaKernel:\n");
for(int l=0; l<2*lsms.maxlmax; l++)
{
printf("l=%d : ilp1 [%g + %gi] | DeviceConstats::ilp1 [%g + %gi]\n",l,IFactors::ilp1[l].real(),IFactors::ilp1[l].imag(), testIlp1[l].real(), testIlp1[l].imag());
}
}
#endif
int threads = 256;
// int threads = 1;
dim3 blocks = dim3(devAtom.numLIZ, devAtom.numLIZ,1);
hipLaunchKernelGGL(( buildGijCudaKernel), dim3(blocks),dim3(threads),smSize, 0, devAtom.LIZPos, devAtom.LIZlmax,
DeviceConstants::lofk, DeviceConstants::mofk, DeviceConstants::ilp1, DeviceConstants::illp, DeviceConstants::cgnt,
DeviceConstants::ndlj_illp, DeviceConstants::lmaxp1_cgnt, DeviceConstants::ndlj_cgnt,
hfnOffset, sinmpOffset, cosmpOffset, plmOffset, dlmOffset,
cuEnergy, cuPrel,
#if !defined(COMPARE_ORIGINAL)
devOffsets, nrmat_ns, (hipDoubleComplex *)devBgij);
#else
devOffsets, nrmat_ns, (hipDoubleComplex *)devBgij, devTestSM);
{
// test
// Matrix<Real> testLIZPos(3,atom.numLIZ);
// Matrix<Complex> bgij(nrmat_ns, nrmat_ns);
Complex testIlp1[2*lsms.maxlmax + 1];
// hipMemcpy(&bgij[0], devBgij, nrmat_ns*nrmat_ns*sizeof(Complex), hipMemcpyDeviceToHost);
// hipMemcpy(&testLIZPos[0], devAtom.LIZPos, 3*atom.numLIZ*sizeof(Real), hipMemcpyDeviceToHost);
hipMemcpy(&testIlp1[0], DeviceConstants::ilp1, (2*lsms.maxlmax + 1)*sizeof(Complex), hipMemcpyDeviceToHost);
printf("in calculateTauMatrix: before setBGijCuda:\n");
for(int l=0; l<2*lsms.maxlmax; l++)
{
printf("l=%d : ilp1 [%g + %gi] | DeviceConstats::ilp1 [%g + %gi]\n",l,IFactors::ilp1[l].real(),IFactors::ilp1[l].imag(), testIlp1[l].real(), testIlp1[l].imag());
}
}
#endif
hipLaunchKernelGGL(( setBGijCuda), dim3(blocks), dim3(threads), 0, 0, fullRelativity, lsms.n_spin_cant, devAtom.LIZlmax,
devOffsets, nrmat_ns, (hipDoubleComplex *)devBgij);
#ifdef COMPARE_ORIGINAL
bool exitCompare = false;
Matrix<Real> testLIZPos(3,atom.numLIZ);
Matrix<Complex> bgij(nrmat_ns, nrmat_ns);
Complex testIlp1[2*lsms.maxlmax + 1];
hipMemcpy(&bgij[0], devBgij, nrmat_ns*nrmat_ns*sizeof(Complex), hipMemcpyDeviceToHost);
hipMemcpy(&testLIZPos[0], devAtom.LIZPos, 3*atom.numLIZ*sizeof(Real), hipMemcpyDeviceToHost);
hipMemcpy(&testIlp1[0], DeviceConstants::ilp1, (2*lsms.maxlmax + 1)*sizeof(Complex), hipMemcpyDeviceToHost);
for(int l=0; l<2*lsms.maxlmax; l++)
{
printf("l=%d : ilp1 [%g + %gi] | DeviceConstats::ilp1 [%g + %gi]\n",l,IFactors::ilp1[l].real(),IFactors::ilp1[l].imag(), testIlp1[l].real(), testIlp1[l].imag());
}
Complex testHfn[2*lsms.maxlmax + 1];
Real testSinmp[2*lsms.maxlmax + 1];
Real testCosmp[2*lsms.maxlmax + 1];
// Real plm[((lsms.maxlmax+1) * (lsms.maxlmax+2)) / 2];
Real testPlm[lsms.angularMomentumIndices.ndlm];
Complex testDlm[lsms.angularMomentumIndices.ndlj];
hipMemcpy(testHfn, devTestSM + hfnOffset, (2*lsms.maxlmax + 1)*sizeof(Complex), hipMemcpyDeviceToHost);
hipMemcpy(testSinmp, devTestSM + sinmpOffset, (2*lsms.maxlmax + 1)*sizeof(Real), hipMemcpyDeviceToHost);
hipMemcpy(testCosmp, devTestSM + cosmpOffset, (2*lsms.maxlmax + 1)*sizeof(Real), hipMemcpyDeviceToHost);
hipMemcpy(testPlm, devTestSM + plmOffset, lsms.angularMomentumIndices.ndlm*sizeof(Real), hipMemcpyDeviceToHost);
hipMemcpy(testDlm, devTestSM + dlmOffset, lsms.angularMomentumIndices.ndlj*sizeof(Complex), hipMemcpyDeviceToHost);
for(int i = 0; i < atom.numLIZ; i++)
{
if(atom.LIZPos(0,i) != testLIZPos(0,i) ||
atom.LIZPos(1,i) != testLIZPos(1,i) ||
atom.LIZPos(2,i) != testLIZPos(2,i))
{
printf("atom.LIZPos(*,%d) [%lf,%lf,%lf] != devAtom.LIZPos(*,%d) [%lf,%lf,%lf]\n",
i,atom.LIZPos(0,i),atom.LIZPos(1,i),atom.LIZPos(2,i),
i,testLIZPos(0,i),testLIZPos(1,i),testLIZPos(2,i));
}
}
// loop over the LIZ blocks
Complex hfn[2*lsms.maxlmax + 1];
Real sinmp[2*lsms.maxlmax + 1];
Real cosmp[2*lsms.maxlmax + 1];
// Real plm[((lsms.maxlmax+1) * (lsms.maxlmax+2)) / 2];
Real plm[lsms.angularMomentumIndices.ndlm];
Complex dlm[lsms.angularMomentumIndices.ndlj];
Real rij[3];
Real pi4=4.0*2.0*std::asin(1.0);
for(int ir1 = 0; ir1 < atom.numLIZ; ir1++)
{
int iOffset = ir1 * kkrsz_ns; // this assumes that there are NO lStep reductions of lmax!!!
for(int ir2 = 0; ir2 < atom.numLIZ; ir2++)
{
int jOffset = ir2 * kkrsz_ns; // this assumes that there are NO lStep reductions of lmax!!
int lmax1 = atom.LIZlmax[ir1];
int lmax2 = atom.LIZlmax[ir2];
int kkri=(lmax1+1)*(lmax1+1);
int kkrj=(lmax2+1)*(lmax2+1);
rij[0]=atom.LIZPos(0,ir1)-atom.LIZPos(0,ir2);
rij[1]=atom.LIZPos(1,ir1)-atom.LIZPos(1,ir2);
rij[2]=atom.LIZPos(2,ir1)-atom.LIZPos(2,ir2);
if(ir1 != ir2)
{
int kkr1 = kkri;
int kkr2 = kkrj;
Matrix<Complex> gijTest(kkr1,kkr2);
Matrix<Complex> bgijTest(2*kkr1, 2*kkr2);
int lmax=lsms.maxlmax;
int kkrsz=(lmax+1)*(lmax+1);
makegij_(&atom.LIZlmax[ir1],&kkr1,&atom.LIZlmax[ir2],&kkr2,
&lsms.maxlmax,&kkrsz,&lsms.angularMomentumIndices.ndlj,&lsms.angularMomentumIndices.ndlm,
&prel,&rij[0],&sinmp[0],&cosmp[0],
&sphericalHarmonicsCoeficients.clm[0],&plm[0],
&gauntCoeficients.cgnt(0,0,0),&gauntCoeficients.lmax,
&lsms.angularMomentumIndices.lofk[0],&lsms.angularMomentumIndices.mofk[0],
&iFactors.ilp1[0],&iFactors.illp(0,0),
&hfn[0],&dlm[0],&gijTest(0,0),
&pi4,&lsms.global.iprint,lsms.global.istop,32);
if(ir1 == 0 && ir2 == 1)
{
for(int l=0; l<=atom.LIZlmax[ir1]+atom.LIZlmax[ir2]; l++)
{
if(sinmp[l] != testSinmp[l])
printf("sinmp[%d] (%g) != testSinmp[%d] (%g)\n", l, sinmp[l], l, testSinmp[l]);
if(cosmp[l] != testCosmp[l])
printf("cosmp[%d] (%g) != testCosmp[%d] (%g)\n", l, cosmp[l], l, testCosmp[l]);
if(hfn[l] != testHfn[l])
printf("hfn[%d] (%g + %gi) != testHfn[%d] (%g + %gi)\n", l, hfn[l].real(), hfn[l].imag(), l, testHfn[l].real(), testHfn[l].imag());
}
}
int idx=0;
for(int i=0; i<kkri; i++)
for(int j=0; j<kkrj; j++)
{
if(bgij(iOffset + i, jOffset + j) != gijTest(i,j))
// if(bgij[idx] != gijTest[idx])
{
printf("buildBGijCPU [idx=%d]: bgij(%d + %d, %d + %d) [%g + %gi] != gijTest(%d, %d) [%g + %gi]\n", idx,
iOffset, i, jOffset, j, bgij(iOffset + i, jOffset + j).real(), bgij(iOffset + i, jOffset + j).imag(),
i, j, gijTest(i,j).real(), gijTest(i,j).imag());
exitCompare = true;
}
if(bgij(iOffset + kkri + i, jOffset + kkrj + j) != gijTest(i,j))
// if(bgij[idx] != gijTest[idx])
{
printf("buildBGijCPU : bgij(%d + %d, %d + %d) [%g + %gi] != gijTest(%d, %d) [%g + %gi]\n",
iOffset, i+kkri, jOffset, j+kkrj, bgij(iOffset + kkri + i, jOffset + kkrj + j).real(), bgij(iOffset + kkri + i, jOffset + kkrj + j).imag(),
i, j, gijTest(i,j).real(), gijTest(i,j).imag());
exitCompare = true;
}
if(bgij(iOffset + kkri + i, jOffset + j) != 0.0) //gijTest(i+kkri,j))
// if(bgij[idx] != gijTest[idx])
{
printf("buildBGijCPU : bgij(%d + %d, %d + %d) [%g + %gi] != 0.0\n",
iOffset, i+kkri, jOffset, j, bgij(iOffset + kkri + i, jOffset + j).real(), bgij(iOffset + kkri + i, jOffset + j).imag());
exitCompare = true;
}
if(bgij(iOffset + i, jOffset + kkrj + j) != 0.0) //gijTest(i,j+kkrj))
// if(bgij[idx] != gijTest[idx])
{
printf("buildBGijCPU : bgij(%d + %d, %d + %d) [%g + %gi] != 0.0\n",
iOffset, i, jOffset, j+kkrj, bgij(iOffset + i, jOffset + kkrj + j).real(), bgij(iOffset + i, jOffset + kkrj + j).imag());
exitCompare = true;
}
idx++;
}
}
}
}
/*
Complex psq=prel*prel;
for(int ir1 = 0; ir1 < atom.numLIZ; ir1++)
{
int iOffset = ir1 * kkrsz_ns; // this assumes that there are NO lStep reductions of lmax!!!
for(int ir2 = 0; ir2 < atom.numLIZ; ir2++)
{
int jOffset = ir2 * kkrsz_ns; // this assumes that there are NO lStep reductions of lmax!!
int lmax1 = atom.LIZlmax[ir1];
int lmax2 = atom.LIZlmax[ir2];
int kkr1=(lmax1+1)*(lmax1+1);
int kkr2=(lmax2+1)*(lmax2+1);
int kkr1_ns = 2*kkr1;
int kkr2_ns = 2*kkr2;
int nrel_rel=0;
if(lsms.relativity==full) nrel_rel=1;
setgij_(&gijTest(0,0),&bgijTest(0,0),&kkr1,&kkr1_ns,&kkr2,&kkr2_ns,
&lsms.n_spin_cant,&nrel_rel,&psq,&energy);
idx=0;
for(int i=0; i<2*kkri; i++)
for(int j=0; j<2*kkrj; j++)
{
// if(bgij(iOffset + i, jOffset + j) != bgijTest(i,j))
if(bgij[idx] != bgijTest[idx])
{
printf("buildBGijCPU [idx=%d]: bgij(%d + %d, %d + %d) [%g + %gi] != bgijTest(%d, %d) [%g + %gi]\n", idx,
iOffset, i, jOffset, j, bgij(iOffset + i, jOffset + j).real(), bgij(iOffset + i, jOffset + j).imag(),
i, j, bgijTest(i,j).real(), bgijTest(i,j).imag());
exitCompare = true;
}
idx++;
}
if((ir1==1 && ir2==0) || (ir1==10 && ir2==0))
{
printf("ir1=%d, ir2=%d: bgij(0,0) = %g + %gi; bgijTest(0,0) = %g + %gi\n",
ir1, ir2, bgij(0,0).real(), bgij(0,0).imag(), bgijTest(0,0).real(), bgijTest(0,0).imag());
printf(" rij = %g %g %g; prel=%g + %gi\n", rij[0], rij[1], rij[2], prel.real(), prel.imag());
printf(" kkr1 = %d; kkr2 = %d; kkrsz = %d\n", kkr1, kkr2, kkrsz);
}
*/
#endif
smSize = kkrsz_ns*kkrsz_ns*sizeof(hipDoubleComplex);
threads = 256;
// threads = 1;
// printf("buildKKRMatrixMultiplyKernelCuda: smSize=%zu\n",smSize);
// note that the shared memory requiremets of the present implementation is too large for lmax>3
//hipLaunchKernelGGL(( buildKKRMatrixMultiplyKernelCuda), dim3(blocks), dim3(threads), smSize, 0, devAtom.LIZlmax, devAtom.LIZStoreIdx, devOffsets,
hipLaunchKernelGGL(( buildKKRMatrixMultiplyKernelCuda), dim3(blocks), dim3(threads), 0, 0, devAtom.LIZlmax, devAtom.LIZStoreIdx, devOffsets,
kkrsz_ns, ispin, lsms.n_spin_pola, lsms.n_spin_cant,
iie, d.getBlkSizeTmatStore(), d.getTmatStoreLDim(),
(hipDoubleComplex *)d.getDevTmatStore(), nrmat_ns,
(hipDoubleComplex *)devBgij, (hipDoubleComplex *)devM);
/*
// loop over the LIZ blocks
for(int ir1 = 0; ir1 < devAtom.numLIZ; ir1++)
{
int iOffset = ir1 * kkrsz_ns; // this assumes that there are NO lStep reductions of lmax!!!
for(int ir2 = 0; ir2 < devAtom.numLIZ; ir2++)
{
if(ir1 != ir2)
{
int jOffset = ir2 * kkrsz_ns; // this assumes that there are NO lStep reductions of lmax!!!
int lmax1 = devAtom.LIZlmax[ir1];
int lmax2 = devAtom.LIZlmax[ir2];
int kkr1=(lmax1+1)*(lmax1+1);
int kkr2=(lmax2+1)*(lmax2+1);
int kkr1_ns = kkr1 * lsms.n_spin_cant;
int kkr2_ns = kkr2 * lsms.n_spin_cant;
// buildBGijCuda(lsms, atom, ir1, ir2, rij, energy, prel, iOffset, jOffset, bgij);
// buildBGijCPU(lsms, atom, ir1, ir2, rij, energy, prel, 0, 0, bgijSmall);
BLAS::zgemm_("n", "n", &kkr1_ns, &kkr2_ns, &kkr1_ns, &cmone,
&local.tmatStore(iie*local.blkSizeTmatStore, devAtom.LIZStoreIdx[ir1]), &kkr1_ns,
// &tmat_n(0, 0), &kkr1_ns,
&bgij(iOffset, jOffset), &nrmat_ns, &czero,
// &bgijSmall(0, 0), &kkrsz_ns, &czero,
&m(iOffset, jOffset), &nrmat_ns);
// for(int i=0; i<kkr1_ns; i++)
// for(int j=0; j<kkr2_ns; j++)
// {
// m(iOffset + i, jOffset + j) = 0.0;
// for(int k=0; k<kkr1_ns ; k++)
// m(iOffset + i, jOffset + j) -= tmat_n(i, k) * // local.tmatStore(iie*local.blkSizeTmatStore + , atom.LIZStoreIdx[ir1]) *
// // bgij(iOffset + k, jOffset + j);
// bgijSmall(k, j);
// }
}
}
}
*/
#ifdef COMPARE_ORIGINAL
Matrix<Complex> mCPU(nrmat_ns,nrmat_ns);
Matrix<Complex> mGPU(nrmat_ns,nrmat_ns);
hipMemcpy(&mGPU(0,0), devM, nrmat_ns*nrmat_ns*sizeof(Complex), hipMemcpyDeviceToHost);
buildKKRMatrixCPU(lsms, local, atom, iie, energy, prel, mCPU);
for(int i=0; i<nrmat_ns; i++)
for(int j=0; j<nrmat_ns; j++)
{
if(mCPU(i,j) != mGPU(i,j))
// if(bgij[idx] != gijTest[idx])
{
printf("buildBGijCPU : mCPU(%d, %d) [%g + %gi] != mGPU(%d, %d) [%g + %gi]\n",
i, j, mCPU(i, j).real(), mCPU(i, j).imag(),
i, j, mGPU(i,j).real(), mGPU(i,j).imag());
exitCompare = true;
}
}
if(exitCompare)
exit(1);
#endif
}
void buildKKRMatrixLMaxDifferentCuda(LSMSSystemParameters &lsms, LocalTypeInfo &local, AtomData &atom,
DeviceStorage &d, DeviceAtom &devAtom, int ispin,
int iie, Complex energy, Complex prel, Complex *devM)
{
hipblasHandle_t cublasHandle = DeviceStorage::getCublasHandle();
int nrmat_ns = lsms.n_spin_cant*atom.nrmat; // total size of the kkr matrix
int kkrsz_ns = lsms.n_spin_cant*atom.kkrsz; // size of t00 block
bool fullRelativity = false;
if(lsms.relativity == full) fullRelativity = true;
// Complex cmone = Complex(-1.0,0.0);
// Complex czero=0.0;
Complex *devBgij = d.getDevBGij();
// Matrix<Complex> bgijSmall(kkrsz_ns, kkrsz_ns);
hipDoubleComplex cuEnergy = make_cuDoubleComplex(energy.real(), energy.imag());
hipDoubleComplex cuPrel = make_cuDoubleComplex(prel.real(), prel.imag());
unitMatrixCuda<Complex>(devM, nrmat_ns, nrmat_ns);
zeroMatrixCuda(devBgij, nrmat_ns, nrmat_ns);
// calculate Bgij
// reuse ipvt for offsets
int *devOffsets = d.getDevIpvt();
std::vector<int> offsets(devAtom.numLIZ);
offsets[0] = 0;
for(int ir = 1; ir < atom.numLIZ; ir++)
offsets[ir] = offsets[ir-1] + lsms.n_spin_cant * (atom.LIZlmax[ir-1]+1)*(atom.LIZlmax[ir-1]+1);
hipMemcpy(devOffsets, &offsets[0], atom.numLIZ*sizeof(int), hipMemcpyHostToDevice);
size_t hfnOffset, sinmpOffset, cosmpOffset, plmOffset, dlmOffset;
size_t smSize = sharedMemoryBGijCuda(lsms, &hfnOffset, &sinmpOffset, &cosmpOffset,
&plmOffset, &dlmOffset);
#ifdef COMPARE_ORIGINAL
char *devTestSM;
hipMalloc(&devTestSM, smSize);
#endif
int threads = 256;
dim3 blocks = dim3(devAtom.numLIZ, devAtom.numLIZ,1);
hipLaunchKernelGGL(( buildGijCudaKernel), dim3(blocks),dim3(threads),smSize, 0, devAtom.LIZPos, devAtom.LIZlmax,
DeviceConstants::lofk, DeviceConstants::mofk, DeviceConstants::ilp1, DeviceConstants::illp, DeviceConstants::cgnt,
DeviceConstants::ndlj_illp, DeviceConstants::lmaxp1_cgnt, DeviceConstants::ndlj_cgnt,
hfnOffset, sinmpOffset, cosmpOffset, plmOffset, dlmOffset,
cuEnergy, cuPrel,
#if !defined(COMPARE_ORIGINAL)
devOffsets, nrmat_ns, (hipDoubleComplex *)devBgij);
#else
devOffsets, nrmat_ns, (hipDoubleComplex *)devBgij, devTestSM);
#endif
hipLaunchKernelGGL(( setBGijCuda), dim3(blocks), dim3(threads), 0, 0, fullRelativity, lsms.n_spin_cant, devAtom.LIZlmax,
devOffsets, nrmat_ns, (hipDoubleComplex *)devBgij);
smSize = kkrsz_ns*kkrsz_ns*sizeof(hipDoubleComplex);
hipLaunchKernelGGL(( buildKKRMatrixMultiplyKernelCuda), dim3(blocks), dim3(threads), smSize, 0, devAtom.LIZlmax, devAtom.LIZStoreIdx, devOffsets,
kkrsz_ns, ispin, lsms.n_spin_pola, lsms.n_spin_cant,
iie, d.getBlkSizeTmatStore(), d.getTmatStoreLDim(),
(hipDoubleComplex *)d.getDevTmatStore(), nrmat_ns,
(hipDoubleComplex *)devBgij, (hipDoubleComplex *)devM);
/*
// loop over the LIZ blocks
for(int ir1 = 0; ir1 < devAtom.numLIZ; ir1++)
{
int iOffset = ir1 * kkrsz_ns; // this assumes that there are NO lStep reductions of lmax!!!
for(int ir2 = 0; ir2 < devAtom.numLIZ; ir2++)
{
if(ir1 != ir2)
{
int jOffset = ir2 * kkrsz_ns; // this assumes that there are NO lStep reductions of lmax!!!
int lmax1 = devAtom.LIZlmax[ir1];
int lmax2 = devAtom.LIZlmax[ir2];
int kkr1=(lmax1+1)*(lmax1+1);
int kkr2=(lmax2+1)*(lmax2+1);
int kkr1_ns = kkr1 * lsms.n_spin_cant;
int kkr2_ns = kkr2 * lsms.n_spin_cant;
// buildBGijCuda(lsms, atom, ir1, ir2, rij, energy, prel, iOffset, jOffset, bgij);
// buildBGijCPU(lsms, atom, ir1, ir2, rij, energy, prel, 0, 0, bgijSmall);
BLAS::zgemm_("n", "n", &kkr1_ns, &kkr2_ns, &kkr1_ns, &cmone,
&local.tmatStore(iie*local.blkSizeTmatStore, devAtom.LIZStoreIdx[ir1]), &kkr1_ns,
// &tmat_n(0, 0), &kkr1_ns,
&bgij(iOffset, jOffset), &nrmat_ns, &czero,
// &bgijSmall(0, 0), &kkrsz_ns, &czero,
&m(iOffset, jOffset), &nrmat_ns);
// for(int i=0; i<kkr1_ns; i++)
// for(int j=0; j<kkr2_ns; j++)
// {
// m(iOffset + i, jOffset + j) = 0.0;
// for(int k=0; k<kkr1_ns ; k++)
// m(iOffset + i, jOffset + j) -= tmat_n(i, k) * // local.tmatStore(iie*local.blkSizeTmatStore + , atom.LIZStoreIdx[ir1]) *
// // bgij(iOffset + k, jOffset + j);
// bgijSmall(k, j);
// }
}
}
}
*/
}
void buildKKRMatrixCuda(LSMSSystemParameters &lsms, LocalTypeInfo &local, AtomData &atom,
DeviceStorage &devStorage, DeviceAtom &devAtom, int ispin,
int iie, Complex energy, Complex prel, Complex *devM)
{
// decide between identical lmax and different lmax:
// printf("buildKKRMatrixCuda not finished yet!\n");
// exit(1);
bool lmaxIdentical = true;
if(atom.LIZlmax[0] != lsms.maxlmax)
{
lmaxIdentical = false;
printf("atom.LIZlmax[0] (=%d) != lsms.maxlmax (=%d)\n",atom.LIZlmax[0], lsms.maxlmax);
}
for(int ir = 0; ir < atom.numLIZ; ir++)
{
if(atom.LIZlmax[ir] != atom.LIZlmax[0])
lmaxIdentical = false;
}
if(lmaxIdentical)
{
// printf("lmax identical in buildKKRMatrix\n");
buildKKRMatrixLMaxIdenticalCuda(lsms, local, atom, devStorage, devAtom, ispin,
iie, energy, prel, devM);
} else {
// printf("lmax not identical in buildKKRMatrix\n");
buildKKRMatrixLMaxDifferentCuda(lsms, local, atom, devStorage, devAtom, ispin,
iie, energy, prel, devM);
}
}
| b75d4108331e92719b7b141bfbdb816d307f3825.cu | /* -*- mode: C++; c-file-style: "bsd"; c-basic-offset: 2; indent-tabs-mode: nil -*- */
#include "buildKKRMatrix.hpp"
#include <stdio.h>
#include "Complex.hpp"
#include "Matrix.hpp"
#include <vector>
#include "Accelerator/DeviceStorage.hpp"
#include <cuda_runtime.h>
#include <cuComplex.h>
#include "cuComplexOperators.hpp"
#include <cublas_v2.h>
#include <cusolverDn.h>
#include "linearSolvers.hpp"
// we might want to distinguish between systems where all lmax (and consequently kkrsz_ns) are the same
// and systems with potential different lmax on different atoms and l steps
// #define COMPARE_ORIGINAL 1
// Fortran layout for matrix
// #define IDX(i, j, lDim) (((j)*(lDim))+(i))
#define IDX3(i, j, k, lDim, mDim) (((k)*(lDim)*(mDim)) + ((j)*(lDim)) + (i))
__device__
__inline__
void calculateHankelCuda(cuDoubleComplex prel, double r, int lend, cuDoubleComplex *ilp1, cuDoubleComplex *hfn)
{
if(threadIdx.x == 0)
{
const cuDoubleComplex sqrtm1 = make_cuDoubleComplex(0.0, 1.0);
cuDoubleComplex z = prel * make_cuDoubleComplex(r,0.0);
hfn[0] = make_cuDoubleComplex(0.0, -1.0); //-sqrtm1;
hfn[1] = -1.0 - sqrtm1/z;
for(int l=1; l<lend; l++)
{
hfn[l+1] = ((2.0*l+1.0) * hfn[l]/z) - hfn[l-1];
}
// l+1
// hfn = -i *h (k*R )*sqrt(E)
// l ij
z = exp(sqrtm1*z)/r;
for(int l=0; l<=lend; l++)
{
hfn[l] = ((-hfn[l]) * z) * ilp1[l];
}
}
// __syncthreads();
}
__device__
__inline__
void calculateSinCosPowersCuda(Real *rij, int lend, Real *sinmp, Real *cosmp)
{
const Real ptol = 1.0e-6;
Real pmag = std::sqrt(rij[0]*rij[0]+rij[1]*rij[1]);
cosmp[0] = 1.0;
sinmp[0] = 0.0;
if(pmag>ptol)
{
cosmp[1] = rij[0]/pmag;
sinmp[1] = rij[1]/pmag;
} else {
cosmp[1] = 0.0;
sinmp[1] = 0.0;
}
for(int m=2; m<=lend; m++)
{
cosmp[m] = cosmp[m-1]*cosmp[1] - sinmp[m-1]*sinmp[1];
sinmp[m] = sinmp[m-1]*cosmp[1] + cosmp[m-1]*sinmp[1];
}
}
// __device__ __inline__ int plmIdxDev(int l, int m)
// { return l*(l+1)/2+m; }
#define PLM_IDX(l,m) (((l)*((l)+1))/2 + (m))
__device__
__inline__
void associatedLegendreFunctionNormalizedCuda(Real x, int lmax, Real *Plm)
{
const Real pi = std::acos(-1.0);
// y = \sqrt{1-x^2}
Real y = std::sqrt(1.0-x*x);
// initialize the first entry
// Plm[0]=std::sqrt(R(1)/(R(2)*pi));
Plm[0]=std::sqrt(1.0/(4.0*pi));
if(lmax<1) return;
for(int m=1; m<=lmax; m++)
{
// \bar{P}_{mm} = - \sqrt{\frac{2m+1}{2m}} y \bar{P}_{m-1, m-1}
Plm[PLM_IDX(m,m)] = - std::sqrt(Real(2*m+1)/Real(2*m)) * y * Plm[PLM_IDX(m-1,m-1)];
// \bar{P}_{mm-1} = \sqrt{2 m + 1} x \bar{P}_{m-1, m-1}
Plm[PLM_IDX(m,m-1)] = std::sqrt(Real(2*m+1)) * x * Plm[PLM_IDX(m-1,m-1)];
}
for(int m=0; m<lmax; m++)
{
for(int l=m+2; l<=lmax; l++)
{
// \bar{P}_{lm} = a_{lm} (x \bar{P}_{l-1. m} - b_{lm} \bar{P}_{l-2, m})
// a_{lm} = \sqrt{\frac{(4 l^2 - 1)(l^2 - m^2)}}
// b_{lm} = \sqrt{\frac{(l -1)^2 - m^2}{4 (l-1)^2 -1}}
Real a_lm = std::sqrt(Real(4*l*l-1)/Real(l*l - m*m));
Real b_lm = std::sqrt(Real((l-1)*(l-1) - m*m)/Real(4*(l-1)*(l-1)-1));
Plm[PLM_IDX(l,m)] = a_lm * (x * Plm[PLM_IDX(l-1,m)] - b_lm * Plm[PLM_IDX(l-2,m)]);
}
}
}
__device__
__inline__
cuDoubleComplex dlmFunction(cuDoubleComplex *hfn, double *cosmp, double *sinmp, double *plm, int l, int m)
{
int mAbs = abs(m);
cuDoubleComplex dlm = hfn[l]*plm[PLM_IDX(l,mAbs)];
if(m==0) return dlm;
if(m<0)
{
dlm = dlm * make_cuDoubleComplex(cosmp[mAbs],sinmp[mAbs]);
if(mAbs & 0x01 != 0) // m is odd
dlm = -dlm;
} else {
dlm = dlm * make_cuDoubleComplex(cosmp[mAbs],-sinmp[mAbs]);
}
return dlm;
}
size_t sharedMemoryBGijCuda(LSMSSystemParameters &lsms, size_t *hfnOffset, size_t *sinmpOffset, size_t *cosmpOffset,
size_t *plmOffset, size_t *dlmOffset)
{
size_t size = 0;
*hfnOffset = size;
size += sizeof(cuDoubleComplex) * (2*lsms.maxlmax + 1);
*sinmpOffset = size;
size += sizeof(double) * (2*lsms.maxlmax + 1);
*cosmpOffset = size;
size += sizeof(double) * (2*lsms.maxlmax + 1);
*plmOffset = size;
size += sizeof(double) * (lsms.angularMomentumIndices.ndlm);
// *dlmOffset = size;
// size += sizeof(cuDoubleComplex) * (lsms.angularMomentumIndices.ndlj);
return size;
}
__global__
void setBGijCuda(bool fullRelativity, int n_spin_cant, int *LIZlmax,
int *offsets, size_t nrmat_ns, cuDoubleComplex *devBgij)
{
if(n_spin_cant == 1) return;
int ir1 = blockIdx.x;
int ir2 = blockIdx.y;
int iOffset = offsets[ir1];
int jOffset = offsets[ir2];
int kkri=(LIZlmax[ir1]+1)*(LIZlmax[ir1]+1);
int kkrj=(LIZlmax[ir2]+1)*(LIZlmax[ir2]+1);
if(!fullRelativity) //(lsms.relativity != full)
{
for(int ij=threadIdx.x; ij < kkri*kkrj; ij += blockDim.x)
{
int i = ij % kkri;
int j = ij / kkri;
/*
for(int i=0; i<kkri; i++)
for(int j=0; j<kkrj; j++)
{
*/
devBgij[IDX(iOffset + kkri + i, jOffset + j, nrmat_ns)] = make_cuDoubleComplex(0.0, 0.0); // bgij(iOffset + i, jOffset + j);
devBgij[IDX(iOffset + i, jOffset + kkrj + j, nrmat_ns)] = make_cuDoubleComplex(0.0, 0.0); // bgij(iOffset + i, jOqffset + j);
devBgij[IDX(iOffset + kkri + i, jOffset + kkrj + j, nrmat_ns)] = devBgij[IDX(iOffset + i, jOffset + j, nrmat_ns)];
}
} else {
/*
call relmtrx(gij,bgij,kkr1,kkr2)
fac=psq/ce
do i=1,kkr1_ns
do j=1,kkr2_ns
bgij(i,j)=fac*bgij(i,j)
end do
end do
*/
printf("Fully relativistic calculation not yet implemented in 'MultipleScattering/buildKKRMatrix.cpp : setBGijCPU'\n");
// exit(1);
}
}
__global__
void buildGijCudaKernel(Real *LIZPos, int *LIZlmax, int *lofk, int *mofk, cuDoubleComplex *ilp1, cuDoubleComplex *illp, Real *cgnt,
int ndlj_illp, int lmaxp1_cgnt, int ndlj_cgnt,
size_t hfnOffset, size_t sinmpOffset, size_t cosmpOffset, size_t plmOffset, size_t dlmOffset,
#if !defined(COMPARE_ORIGINAL)
cuDoubleComplex energy, cuDoubleComplex prel, int *offsets, size_t nrmat_ns, cuDoubleComplex *devBgij)
#else
cuDoubleComplex energy, cuDoubleComplex prel, int *offsets, size_t nrmat_ns, cuDoubleComplex *devBgij, char *testSM)
#endif
// void buildBGijCPU(LSMSSystemParameters &lsms, AtomData &atom, int ir1, int ir2, Real *rij,
// Complex energy, Complex prel, int iOffset, int jOffset, Matrix<Complex> &bgij)
{
int ir1 = blockIdx.x;
int ir2 = blockIdx.y;
extern char __shared__ sharedMemory[];
if(ir1 != ir2)
{
int iOffset = offsets[ir1];
// int iOffset = ir1 * kkrsz_ns;
int jOffset = offsets[ir2];
// int jOffset = ir2 * kkrsz_ns;
Real rij[3];
rij[0] = LIZPos[3*ir1 + 0] - LIZPos[3*ir2 + 0];
rij[1] = LIZPos[3*ir1 + 1] - LIZPos[3*ir2 + 1];
rij[2] = LIZPos[3*ir1 + 2] - LIZPos[3*ir2 + 2];
// Complex hfn[2*lsms.maxlmax + 1];
cuDoubleComplex *hfn = (cuDoubleComplex *) (sharedMemory + hfnOffset);
// Real sinmp[2*lsms.maxlmax + 1];
Real *sinmp = (Real *) (sharedMemory + sinmpOffset);
// Real cosmp[2*lsms.maxlmax + 1];
Real *cosmp = (Real *) (sharedMemory + cosmpOffset);
// Real plm[lsms.angularMomentumIndices.ndlm];
Real *plm = (Real *) (sharedMemory + plmOffset);
// Complex dlm[lsms.angularMomentumIndices.ndlj];
// cuDoubleComplex *dlm = (cuDoubleComplex *) (sharedMemory + dlmOffset);
#if defined(COMPARE_ORIGINAL)
cuDoubleComplex *testHfn = (cuDoubleComplex *) (testSM + hfnOffset);
Real *testSinmp = (Real *) (testSM + sinmpOffset);
Real *testCosmp = (Real *) (testSM + cosmpOffset);
Real *testPlm = (Real *) (testSM + plmOffset);
cuDoubleComplex *testDlm = (cuDoubleComplex *) (testSM + dlmOffset);
#endif
Real r = std::sqrt(rij[0]*rij[0] + rij[1]*rij[1] + rij[2]*rij[2]);
int lmax1 = LIZlmax[ir1];
int lmax2 = LIZlmax[ir2];
int kkri=(lmax1+1)*(lmax1+1);
int kkrj=(lmax2+1)*(lmax2+1);
int lend = lmax1 + lmax2;
Real pi4=4.0*2.0*std::asin(1.0);
Real cosTheta = rij[2]/r;
if(threadIdx.x == 0)
{
calculateHankelCuda(prel, r, lend, ilp1, hfn);
associatedLegendreFunctionNormalizedCuda(cosTheta, lend, plm);
// for associatedLegendreFunctionNormalized all clm[i] == 1.0
// for(int j=0;j<ndlm_local;j++)
// plm[j]=clm[j]*plm[j];
// calculate cos(phi) and sin(phi) .................................
// needs to be serial
calculateSinCosPowersCuda(rij, lend, sinmp, cosmp);
}
__syncthreads();
/*
// can be parallel
int j;
int ll;
// for(int l = threadIdx.x; l<=lend; l += blockDim.x)
if(threadIdx.x == 0)
{
for(int l = 0; l<=lend; l++)
{
// int ll = l*(l+1);
// j = ll;
// ll = ll/2;
j = l*(l+1);
ll = j/2;
double m1m = 1.0;
dlm[j] = hfn[l]*plm[ll];
for(int m=1; m<=l; m++)
{
m1m = -m1m;
cuDoubleComplex fac = plm[ll+m] * make_cuDoubleComplex(cosmp[m],sinmp[m]);
dlm[j-m] = hfn[l]*m1m*fac;
dlm[j+m] = hfn[l]*cuConj(fac);
}
}
}
__syncthreads();
*/
#if defined(COMPARE_ORIGINAL)
if(ir1 == 0 && ir2 == 1 && threadIdx.x == 0)
{
for(int l = 0; l<=lend; l++)
{
testHfn[l] = hfn[l];
testSinmp[l] = sinmp[l];
testCosmp[l] = cosmp[l];
}
}
#endif
// ================================================================
// calculate g(R_ij)...............................................
// for(int i=0; i<kkri*kkrj; i++) gij[i]=0.0;
// for(int i=0; i<kkri; i++)
// for(int j=0; j<kkrj; j++)
// for(int ij=0; ij < kkri*kkrj; ij++)
for(int ij=threadIdx.x; ij < kkri*kkrj; ij += blockDim.x)
{
int lm2 = ij % kkri;
int lm1 = ij / kkri;
devBgij[IDX(iOffset + lm2, jOffset + lm1, nrmat_ns)] = make_cuDoubleComplex(0.0, 0.0);
// bgij(iOffset + lm2, jOffset + lm1) = 0.0;
// }
// loop over l1,m1............................................
// for(int lm1=0; lm1<kkrj; lm1++)
// {
int l1=lofk[lm1];
int m1=mofk[lm1];
// loop over l2,m2..............................................
// for(int lm2=0; lm2<kkri; lm2++)
// {
int l2=lofk[lm2];
int m2=mofk[lm2];
// ==========================================================
// l2-l1
// illp(lm2,lm1) = i
//
// perform sum over l3 with gaunt # ......................
// ==========================================================
int m3=m2-m1;
int llow=max(abs(m3), abs(l1-l2));
if(cuCabs(prel)==0.0) llow=l1+l2;
for(int l3=l1+l2; l3>=llow; l3-=2)
{
int j=l3*(l3+1)+m3;
// gij[lm2+lm1*kkri] = gij[lm2+lm1*kkri]+cgnt(l3/2,lm1,lm2)*dlm[j];
devBgij[IDX(iOffset + lm2, jOffset + lm1, nrmat_ns)] = devBgij[IDX(iOffset + lm2, jOffset + lm1, nrmat_ns)]
+ cgnt[IDX3(l3/2,lm1,lm2,lmaxp1_cgnt,ndlj_cgnt)]
* dlmFunction(hfn, cosmp, sinmp, plm, l3, m3); //dlm[j];
}
// gij[lm2+lm1*kkri]=pi4*illp(lm2,lm1)*gij[lm2+lm1*kkri];
devBgij[IDX(iOffset + lm2, jOffset + lm1, nrmat_ns)] = devBgij[IDX(iOffset + lm2, jOffset + lm1, nrmat_ns)]
* pi4 * illp[IDX(lm2, lm1, ndlj_illp)];
}
}
}
__device__
void buildTmatNCuda(int ispin, int n_spin_pola, int n_spin_cant, int iie, int blkSizeTmatStore, int tmatStoreLDim,
int kkr1, int kkr2, int lizStoreIdx,
cuDoubleComplex *devTmatStore, int kkrsz_ns, cuDoubleComplex *tmat_n)
{
// Matrix<Complex> tmat_n(lsms.n_spin_cant*atom.kkrsz, lsms.n_spin_cant*atom.kkrsz);
if(threadIdx.x == 0)
{
int im=0;
if(n_spin_pola == n_spin_cant) // non polarized or spin canted
{
int kkrsz = kkrsz_ns/n_spin_cant;
for(int js=0; js<n_spin_cant; js++)
{
int jsm = kkrsz*kkrsz_ns*js;
for(int j=0; j<kkr1; j++)
{
for(int is=0; is<n_spin_cant; is++)
{
int jm=jsm+kkrsz_ns*j+kkrsz*is;
// int one=1;
// BLAS::zcopy_(&kkr1,&local.tmatStore(iie*local.blkSizeTmatStore+jm,atom.LIZStoreIdx[ir1]),&one,&tmat_n[im],&one);
for(int i=0; i<kkr1; i++)
{
tmat_n[im+i] = devTmatStore[IDX(iie*blkSizeTmatStore+jm+i, lizStoreIdx, tmatStoreLDim)];
}
im+=kkr1;
}
}
}
} else { // spin polarized colinear version for ispin
int kkrsz = kkrsz_ns/n_spin_cant;
// int ispin=0;
printf("warning: cant't test building kkrMatrix for collinear spin polarized yet!\n");
// exit(1);
int jsm = kkrsz*kkrsz*ispin; // copy spin up or down?
for(int j=0; j<kkr1; j++)
{
int jm=jsm+kkrsz_ns*j;
// int one=1;
// BLAS::zcopy_(&kkr1,&local.tmatStore(iie*local.blkSizeTmatStore+jm,atom.LIZStoreIdx[ir1]),&one,&tmat_n[im],&one);
for(int i=0; i<kkr1; i++)
{
tmat_n[im+i] = devTmatStore[IDX(iie*blkSizeTmatStore+jm+i, lizStoreIdx, tmatStoreLDim)];
}
im+=kkr1;
}
}
}
__syncthreads();
}
__global__
void buildKKRMatrixMultiplyKernelCuda(int *LIZlmax, int *LIZStoreIdx, int *offsets, int kkrsz_ns,
int ispin, int n_spin_pola, int n_spin_cant, int iie, int blkSizeTmatStore, int tmatStoreLDim,
cuDoubleComplex *devTmatStore, int nrmat_ns, cuDoubleComplex *devBgij, cuDoubleComplex *devM)
{
int ir1 = blockIdx.x;
int ir2 = blockIdx.y;
// extern cuDoubleComplex __shared__ *tmat_n;
cuDoubleComplex *tmat_n;
int iOffset = offsets[ir1];
int jOffset = offsets[ir2];
if(ir1 != ir2)
{
int lmax1 = LIZlmax[ir1];
int lmax2 = LIZlmax[ir2];
int kkr1=(lmax1+1)*(lmax1+1);
int kkr2=(lmax2+1)*(lmax2+1);
int kkr1_ns = kkr1 * n_spin_cant;
int kkr2_ns = kkr2 * n_spin_cant;
// BLAS::zgemm_("n", "n", &kkr1_ns, &kkr2_ns, &kkr1_ns, &cmone,
// &local.tmatStore(iie*local.blkSizeTmatStore, devAtom.LIZStoreIdx[ir1]), &kkr1_ns,
// // &tmat_n(0, 0), &kkr1_ns,
// &bgij(iOffset, jOffset), &nrmat_ns, &czero,
// // &bgijSmall(0, 0), &kkrsz_ns, &czero,
// &m(iOffset, jOffset), &nrmat_ns);
for(int j=0; j<kkr2_ns; j++)
{
}
// for(int i=0; i<kkr1_ns; i++)
// for(int j=0; j<kkr2_ns; j++)
// buildTmatNCuda(ispin, n_spin_pola, n_spin_cant, iie, blkSizeTmatStore, tmatStoreLDim,
// kkr1, kkr2, LIZStoreIdx[ir1], devTmatStore, kkrsz_ns, tmat_n);
tmat_n = &devTmatStore[IDX(iie*blkSizeTmatStore, LIZStoreIdx[ir1], tmatStoreLDim)];
for(int ij=threadIdx.x; ij < kkr1_ns*kkr2_ns; ij += blockDim.x)
{
int i = ij % kkr1_ns;
int j = ij / kkr1_ns;
devM[IDX(iOffset + i, jOffset + j, nrmat_ns)] = make_cuDoubleComplex(0.0,0.0);
for(int k=0; k<kkr1_ns ; k++)
devM[IDX(iOffset + i, jOffset + j, nrmat_ns)] = devM[IDX(iOffset + i, jOffset + j, nrmat_ns)] -
tmat_n[IDX(i,k,kkr1_ns)] * // tmat_n(i, k) * // local.tmatStore(iie*local.blkSizeTmatStore + , atom.LIZStoreIdx[ir1]) *
devBgij[IDX(iOffset + k, jOffset + j, nrmat_ns)];
}
}
}
void buildKKRMatrixLMaxIdenticalCuda(LSMSSystemParameters &lsms, LocalTypeInfo &local, AtomData &atom,
DeviceStorage &d, DeviceAtom &devAtom, int ispin,
int iie, Complex energy, Complex prel, Complex *devM)
{
cublasHandle_t cublasHandle = DeviceStorage::getCublasHandle();
int nrmat_ns = lsms.n_spin_cant*atom.nrmat; // total size of the kkr matrix
int kkrsz_ns = lsms.n_spin_cant*atom.kkrsz; // size of t00 block
bool fullRelativity = false;
if(lsms.relativity == full) fullRelativity = true;
// Complex cmone = Complex(-1.0,0.0);
// Complex czero=0.0;
Complex *devBgij = d.getDevBGij();
// Matrix<Complex> bgijSmall(kkrsz_ns, kkrsz_ns);
cuDoubleComplex cuEnergy = make_cuDoubleComplex(energy.real(), energy.imag());
cuDoubleComplex cuPrel = make_cuDoubleComplex(prel.real(), prel.imag());
unitMatrixCuda<Complex>(devM, nrmat_ns, nrmat_ns);
zeroMatrixCuda(devBgij, nrmat_ns, nrmat_ns);
// calculate Bgij
// reuse ipvt for offsets
int *devOffsets = d.getDevIpvt();
std::vector<int> offsets(devAtom.numLIZ);
for(int ir = 0; ir < devAtom.numLIZ; ir++)
offsets[ir] = ir * kkrsz_ns;
cudaMemcpy(devOffsets, &offsets[0], atom.numLIZ*sizeof(int), cudaMemcpyHostToDevice);
size_t hfnOffset, sinmpOffset, cosmpOffset, plmOffset, dlmOffset;
size_t smSize = sharedMemoryBGijCuda(lsms, &hfnOffset, &sinmpOffset, &cosmpOffset,
&plmOffset, &dlmOffset);
#ifdef COMPARE_ORIGINAL
printf("smSize = %zu\n", smSize);
printf(" hfnOffset = %zu\n", hfnOffset);
printf(" sinmpOffset = %zu\n", sinmpOffset);
printf(" cosmpOffset = %zu\n", cosmpOffset);
printf(" plmOffset = %zu\n", plmOffset);
printf(" dlmOffset = %zu\n", dlmOffset);
char *devTestSM;
cudaMalloc(&devTestSM, smSize);
{
// test
// Matrix<Real> testLIZPos(3,atom.numLIZ);
// Matrix<Complex> bgij(nrmat_ns, nrmat_ns);
Complex testIlp1[2*lsms.maxlmax + 1];
// cudaMemcpy(&bgij[0], devBgij, nrmat_ns*nrmat_ns*sizeof(Complex), cudaMemcpyDeviceToHost);
// cudaMemcpy(&testLIZPos[0], devAtom.LIZPos, 3*atom.numLIZ*sizeof(Real), cudaMemcpyDeviceToHost);
cudaMemcpy(&testIlp1[0], DeviceConstants::ilp1, (2*lsms.maxlmax + 1)*sizeof(Complex), cudaMemcpyDeviceToHost);
printf("in calculateTauMatrix: before buildGijCudaKernel:\n");
for(int l=0; l<2*lsms.maxlmax; l++)
{
printf("l=%d : ilp1 [%g + %gi] | DeviceConstats::ilp1 [%g + %gi]\n",l,IFactors::ilp1[l].real(),IFactors::ilp1[l].imag(), testIlp1[l].real(), testIlp1[l].imag());
}
}
#endif
int threads = 256;
// int threads = 1;
dim3 blocks = dim3(devAtom.numLIZ, devAtom.numLIZ,1);
buildGijCudaKernel<<<blocks,threads,smSize>>>(devAtom.LIZPos, devAtom.LIZlmax,
DeviceConstants::lofk, DeviceConstants::mofk, DeviceConstants::ilp1, DeviceConstants::illp, DeviceConstants::cgnt,
DeviceConstants::ndlj_illp, DeviceConstants::lmaxp1_cgnt, DeviceConstants::ndlj_cgnt,
hfnOffset, sinmpOffset, cosmpOffset, plmOffset, dlmOffset,
cuEnergy, cuPrel,
#if !defined(COMPARE_ORIGINAL)
devOffsets, nrmat_ns, (cuDoubleComplex *)devBgij);
#else
devOffsets, nrmat_ns, (cuDoubleComplex *)devBgij, devTestSM);
{
// test
// Matrix<Real> testLIZPos(3,atom.numLIZ);
// Matrix<Complex> bgij(nrmat_ns, nrmat_ns);
Complex testIlp1[2*lsms.maxlmax + 1];
// cudaMemcpy(&bgij[0], devBgij, nrmat_ns*nrmat_ns*sizeof(Complex), cudaMemcpyDeviceToHost);
// cudaMemcpy(&testLIZPos[0], devAtom.LIZPos, 3*atom.numLIZ*sizeof(Real), cudaMemcpyDeviceToHost);
cudaMemcpy(&testIlp1[0], DeviceConstants::ilp1, (2*lsms.maxlmax + 1)*sizeof(Complex), cudaMemcpyDeviceToHost);
printf("in calculateTauMatrix: before setBGijCuda:\n");
for(int l=0; l<2*lsms.maxlmax; l++)
{
printf("l=%d : ilp1 [%g + %gi] | DeviceConstats::ilp1 [%g + %gi]\n",l,IFactors::ilp1[l].real(),IFactors::ilp1[l].imag(), testIlp1[l].real(), testIlp1[l].imag());
}
}
#endif
setBGijCuda<<<blocks, threads>>>(fullRelativity, lsms.n_spin_cant, devAtom.LIZlmax,
devOffsets, nrmat_ns, (cuDoubleComplex *)devBgij);
#ifdef COMPARE_ORIGINAL
bool exitCompare = false;
Matrix<Real> testLIZPos(3,atom.numLIZ);
Matrix<Complex> bgij(nrmat_ns, nrmat_ns);
Complex testIlp1[2*lsms.maxlmax + 1];
cudaMemcpy(&bgij[0], devBgij, nrmat_ns*nrmat_ns*sizeof(Complex), cudaMemcpyDeviceToHost);
cudaMemcpy(&testLIZPos[0], devAtom.LIZPos, 3*atom.numLIZ*sizeof(Real), cudaMemcpyDeviceToHost);
cudaMemcpy(&testIlp1[0], DeviceConstants::ilp1, (2*lsms.maxlmax + 1)*sizeof(Complex), cudaMemcpyDeviceToHost);
for(int l=0; l<2*lsms.maxlmax; l++)
{
printf("l=%d : ilp1 [%g + %gi] | DeviceConstats::ilp1 [%g + %gi]\n",l,IFactors::ilp1[l].real(),IFactors::ilp1[l].imag(), testIlp1[l].real(), testIlp1[l].imag());
}
Complex testHfn[2*lsms.maxlmax + 1];
Real testSinmp[2*lsms.maxlmax + 1];
Real testCosmp[2*lsms.maxlmax + 1];
// Real plm[((lsms.maxlmax+1) * (lsms.maxlmax+2)) / 2];
Real testPlm[lsms.angularMomentumIndices.ndlm];
Complex testDlm[lsms.angularMomentumIndices.ndlj];
cudaMemcpy(testHfn, devTestSM + hfnOffset, (2*lsms.maxlmax + 1)*sizeof(Complex), cudaMemcpyDeviceToHost);
cudaMemcpy(testSinmp, devTestSM + sinmpOffset, (2*lsms.maxlmax + 1)*sizeof(Real), cudaMemcpyDeviceToHost);
cudaMemcpy(testCosmp, devTestSM + cosmpOffset, (2*lsms.maxlmax + 1)*sizeof(Real), cudaMemcpyDeviceToHost);
cudaMemcpy(testPlm, devTestSM + plmOffset, lsms.angularMomentumIndices.ndlm*sizeof(Real), cudaMemcpyDeviceToHost);
cudaMemcpy(testDlm, devTestSM + dlmOffset, lsms.angularMomentumIndices.ndlj*sizeof(Complex), cudaMemcpyDeviceToHost);
for(int i = 0; i < atom.numLIZ; i++)
{
if(atom.LIZPos(0,i) != testLIZPos(0,i) ||
atom.LIZPos(1,i) != testLIZPos(1,i) ||
atom.LIZPos(2,i) != testLIZPos(2,i))
{
printf("atom.LIZPos(*,%d) [%lf,%lf,%lf] != devAtom.LIZPos(*,%d) [%lf,%lf,%lf]\n",
i,atom.LIZPos(0,i),atom.LIZPos(1,i),atom.LIZPos(2,i),
i,testLIZPos(0,i),testLIZPos(1,i),testLIZPos(2,i));
}
}
// loop over the LIZ blocks
Complex hfn[2*lsms.maxlmax + 1];
Real sinmp[2*lsms.maxlmax + 1];
Real cosmp[2*lsms.maxlmax + 1];
// Real plm[((lsms.maxlmax+1) * (lsms.maxlmax+2)) / 2];
Real plm[lsms.angularMomentumIndices.ndlm];
Complex dlm[lsms.angularMomentumIndices.ndlj];
Real rij[3];
Real pi4=4.0*2.0*std::asin(1.0);
for(int ir1 = 0; ir1 < atom.numLIZ; ir1++)
{
int iOffset = ir1 * kkrsz_ns; // this assumes that there are NO lStep reductions of lmax!!!
for(int ir2 = 0; ir2 < atom.numLIZ; ir2++)
{
int jOffset = ir2 * kkrsz_ns; // this assumes that there are NO lStep reductions of lmax!!
int lmax1 = atom.LIZlmax[ir1];
int lmax2 = atom.LIZlmax[ir2];
int kkri=(lmax1+1)*(lmax1+1);
int kkrj=(lmax2+1)*(lmax2+1);
rij[0]=atom.LIZPos(0,ir1)-atom.LIZPos(0,ir2);
rij[1]=atom.LIZPos(1,ir1)-atom.LIZPos(1,ir2);
rij[2]=atom.LIZPos(2,ir1)-atom.LIZPos(2,ir2);
if(ir1 != ir2)
{
int kkr1 = kkri;
int kkr2 = kkrj;
Matrix<Complex> gijTest(kkr1,kkr2);
Matrix<Complex> bgijTest(2*kkr1, 2*kkr2);
int lmax=lsms.maxlmax;
int kkrsz=(lmax+1)*(lmax+1);
makegij_(&atom.LIZlmax[ir1],&kkr1,&atom.LIZlmax[ir2],&kkr2,
&lsms.maxlmax,&kkrsz,&lsms.angularMomentumIndices.ndlj,&lsms.angularMomentumIndices.ndlm,
&prel,&rij[0],&sinmp[0],&cosmp[0],
&sphericalHarmonicsCoeficients.clm[0],&plm[0],
&gauntCoeficients.cgnt(0,0,0),&gauntCoeficients.lmax,
&lsms.angularMomentumIndices.lofk[0],&lsms.angularMomentumIndices.mofk[0],
&iFactors.ilp1[0],&iFactors.illp(0,0),
&hfn[0],&dlm[0],&gijTest(0,0),
&pi4,&lsms.global.iprint,lsms.global.istop,32);
if(ir1 == 0 && ir2 == 1)
{
for(int l=0; l<=atom.LIZlmax[ir1]+atom.LIZlmax[ir2]; l++)
{
if(sinmp[l] != testSinmp[l])
printf("sinmp[%d] (%g) != testSinmp[%d] (%g)\n", l, sinmp[l], l, testSinmp[l]);
if(cosmp[l] != testCosmp[l])
printf("cosmp[%d] (%g) != testCosmp[%d] (%g)\n", l, cosmp[l], l, testCosmp[l]);
if(hfn[l] != testHfn[l])
printf("hfn[%d] (%g + %gi) != testHfn[%d] (%g + %gi)\n", l, hfn[l].real(), hfn[l].imag(), l, testHfn[l].real(), testHfn[l].imag());
}
}
int idx=0;
for(int i=0; i<kkri; i++)
for(int j=0; j<kkrj; j++)
{
if(bgij(iOffset + i, jOffset + j) != gijTest(i,j))
// if(bgij[idx] != gijTest[idx])
{
printf("buildBGijCPU [idx=%d]: bgij(%d + %d, %d + %d) [%g + %gi] != gijTest(%d, %d) [%g + %gi]\n", idx,
iOffset, i, jOffset, j, bgij(iOffset + i, jOffset + j).real(), bgij(iOffset + i, jOffset + j).imag(),
i, j, gijTest(i,j).real(), gijTest(i,j).imag());
exitCompare = true;
}
if(bgij(iOffset + kkri + i, jOffset + kkrj + j) != gijTest(i,j))
// if(bgij[idx] != gijTest[idx])
{
printf("buildBGijCPU : bgij(%d + %d, %d + %d) [%g + %gi] != gijTest(%d, %d) [%g + %gi]\n",
iOffset, i+kkri, jOffset, j+kkrj, bgij(iOffset + kkri + i, jOffset + kkrj + j).real(), bgij(iOffset + kkri + i, jOffset + kkrj + j).imag(),
i, j, gijTest(i,j).real(), gijTest(i,j).imag());
exitCompare = true;
}
if(bgij(iOffset + kkri + i, jOffset + j) != 0.0) //gijTest(i+kkri,j))
// if(bgij[idx] != gijTest[idx])
{
printf("buildBGijCPU : bgij(%d + %d, %d + %d) [%g + %gi] != 0.0\n",
iOffset, i+kkri, jOffset, j, bgij(iOffset + kkri + i, jOffset + j).real(), bgij(iOffset + kkri + i, jOffset + j).imag());
exitCompare = true;
}
if(bgij(iOffset + i, jOffset + kkrj + j) != 0.0) //gijTest(i,j+kkrj))
// if(bgij[idx] != gijTest[idx])
{
printf("buildBGijCPU : bgij(%d + %d, %d + %d) [%g + %gi] != 0.0\n",
iOffset, i, jOffset, j+kkrj, bgij(iOffset + i, jOffset + kkrj + j).real(), bgij(iOffset + i, jOffset + kkrj + j).imag());
exitCompare = true;
}
idx++;
}
}
}
}
/*
Complex psq=prel*prel;
for(int ir1 = 0; ir1 < atom.numLIZ; ir1++)
{
int iOffset = ir1 * kkrsz_ns; // this assumes that there are NO lStep reductions of lmax!!!
for(int ir2 = 0; ir2 < atom.numLIZ; ir2++)
{
int jOffset = ir2 * kkrsz_ns; // this assumes that there are NO lStep reductions of lmax!!
int lmax1 = atom.LIZlmax[ir1];
int lmax2 = atom.LIZlmax[ir2];
int kkr1=(lmax1+1)*(lmax1+1);
int kkr2=(lmax2+1)*(lmax2+1);
int kkr1_ns = 2*kkr1;
int kkr2_ns = 2*kkr2;
int nrel_rel=0;
if(lsms.relativity==full) nrel_rel=1;
setgij_(&gijTest(0,0),&bgijTest(0,0),&kkr1,&kkr1_ns,&kkr2,&kkr2_ns,
&lsms.n_spin_cant,&nrel_rel,&psq,&energy);
idx=0;
for(int i=0; i<2*kkri; i++)
for(int j=0; j<2*kkrj; j++)
{
// if(bgij(iOffset + i, jOffset + j) != bgijTest(i,j))
if(bgij[idx] != bgijTest[idx])
{
printf("buildBGijCPU [idx=%d]: bgij(%d + %d, %d + %d) [%g + %gi] != bgijTest(%d, %d) [%g + %gi]\n", idx,
iOffset, i, jOffset, j, bgij(iOffset + i, jOffset + j).real(), bgij(iOffset + i, jOffset + j).imag(),
i, j, bgijTest(i,j).real(), bgijTest(i,j).imag());
exitCompare = true;
}
idx++;
}
if((ir1==1 && ir2==0) || (ir1==10 && ir2==0))
{
printf("ir1=%d, ir2=%d: bgij(0,0) = %g + %gi; bgijTest(0,0) = %g + %gi\n",
ir1, ir2, bgij(0,0).real(), bgij(0,0).imag(), bgijTest(0,0).real(), bgijTest(0,0).imag());
printf(" rij = %g %g %g; prel=%g + %gi\n", rij[0], rij[1], rij[2], prel.real(), prel.imag());
printf(" kkr1 = %d; kkr2 = %d; kkrsz = %d\n", kkr1, kkr2, kkrsz);
}
*/
#endif
smSize = kkrsz_ns*kkrsz_ns*sizeof(cuDoubleComplex);
threads = 256;
// threads = 1;
// printf("buildKKRMatrixMultiplyKernelCuda: smSize=%zu\n",smSize);
// note that the shared memory requiremets of the present implementation is too large for lmax>3
// buildKKRMatrixMultiplyKernelCuda<<<blocks, threads, smSize>>>(devAtom.LIZlmax, devAtom.LIZStoreIdx, devOffsets,
buildKKRMatrixMultiplyKernelCuda<<<blocks, threads>>>(devAtom.LIZlmax, devAtom.LIZStoreIdx, devOffsets,
kkrsz_ns, ispin, lsms.n_spin_pola, lsms.n_spin_cant,
iie, d.getBlkSizeTmatStore(), d.getTmatStoreLDim(),
(cuDoubleComplex *)d.getDevTmatStore(), nrmat_ns,
(cuDoubleComplex *)devBgij, (cuDoubleComplex *)devM);
/*
// loop over the LIZ blocks
for(int ir1 = 0; ir1 < devAtom.numLIZ; ir1++)
{
int iOffset = ir1 * kkrsz_ns; // this assumes that there are NO lStep reductions of lmax!!!
for(int ir2 = 0; ir2 < devAtom.numLIZ; ir2++)
{
if(ir1 != ir2)
{
int jOffset = ir2 * kkrsz_ns; // this assumes that there are NO lStep reductions of lmax!!!
int lmax1 = devAtom.LIZlmax[ir1];
int lmax2 = devAtom.LIZlmax[ir2];
int kkr1=(lmax1+1)*(lmax1+1);
int kkr2=(lmax2+1)*(lmax2+1);
int kkr1_ns = kkr1 * lsms.n_spin_cant;
int kkr2_ns = kkr2 * lsms.n_spin_cant;
// buildBGijCuda(lsms, atom, ir1, ir2, rij, energy, prel, iOffset, jOffset, bgij);
// buildBGijCPU(lsms, atom, ir1, ir2, rij, energy, prel, 0, 0, bgijSmall);
BLAS::zgemm_("n", "n", &kkr1_ns, &kkr2_ns, &kkr1_ns, &cmone,
&local.tmatStore(iie*local.blkSizeTmatStore, devAtom.LIZStoreIdx[ir1]), &kkr1_ns,
// &tmat_n(0, 0), &kkr1_ns,
&bgij(iOffset, jOffset), &nrmat_ns, &czero,
// &bgijSmall(0, 0), &kkrsz_ns, &czero,
&m(iOffset, jOffset), &nrmat_ns);
// for(int i=0; i<kkr1_ns; i++)
// for(int j=0; j<kkr2_ns; j++)
// {
// m(iOffset + i, jOffset + j) = 0.0;
// for(int k=0; k<kkr1_ns ; k++)
// m(iOffset + i, jOffset + j) -= tmat_n(i, k) * // local.tmatStore(iie*local.blkSizeTmatStore + , atom.LIZStoreIdx[ir1]) *
// // bgij(iOffset + k, jOffset + j);
// bgijSmall(k, j);
// }
}
}
}
*/
#ifdef COMPARE_ORIGINAL
Matrix<Complex> mCPU(nrmat_ns,nrmat_ns);
Matrix<Complex> mGPU(nrmat_ns,nrmat_ns);
cudaMemcpy(&mGPU(0,0), devM, nrmat_ns*nrmat_ns*sizeof(Complex), cudaMemcpyDeviceToHost);
buildKKRMatrixCPU(lsms, local, atom, iie, energy, prel, mCPU);
for(int i=0; i<nrmat_ns; i++)
for(int j=0; j<nrmat_ns; j++)
{
if(mCPU(i,j) != mGPU(i,j))
// if(bgij[idx] != gijTest[idx])
{
printf("buildBGijCPU : mCPU(%d, %d) [%g + %gi] != mGPU(%d, %d) [%g + %gi]\n",
i, j, mCPU(i, j).real(), mCPU(i, j).imag(),
i, j, mGPU(i,j).real(), mGPU(i,j).imag());
exitCompare = true;
}
}
if(exitCompare)
exit(1);
#endif
}
void buildKKRMatrixLMaxDifferentCuda(LSMSSystemParameters &lsms, LocalTypeInfo &local, AtomData &atom,
DeviceStorage &d, DeviceAtom &devAtom, int ispin,
int iie, Complex energy, Complex prel, Complex *devM)
{
cublasHandle_t cublasHandle = DeviceStorage::getCublasHandle();
int nrmat_ns = lsms.n_spin_cant*atom.nrmat; // total size of the kkr matrix
int kkrsz_ns = lsms.n_spin_cant*atom.kkrsz; // size of t00 block
bool fullRelativity = false;
if(lsms.relativity == full) fullRelativity = true;
// Complex cmone = Complex(-1.0,0.0);
// Complex czero=0.0;
Complex *devBgij = d.getDevBGij();
// Matrix<Complex> bgijSmall(kkrsz_ns, kkrsz_ns);
cuDoubleComplex cuEnergy = make_cuDoubleComplex(energy.real(), energy.imag());
cuDoubleComplex cuPrel = make_cuDoubleComplex(prel.real(), prel.imag());
unitMatrixCuda<Complex>(devM, nrmat_ns, nrmat_ns);
zeroMatrixCuda(devBgij, nrmat_ns, nrmat_ns);
// calculate Bgij
// reuse ipvt for offsets
int *devOffsets = d.getDevIpvt();
std::vector<int> offsets(devAtom.numLIZ);
offsets[0] = 0;
for(int ir = 1; ir < atom.numLIZ; ir++)
offsets[ir] = offsets[ir-1] + lsms.n_spin_cant * (atom.LIZlmax[ir-1]+1)*(atom.LIZlmax[ir-1]+1);
cudaMemcpy(devOffsets, &offsets[0], atom.numLIZ*sizeof(int), cudaMemcpyHostToDevice);
size_t hfnOffset, sinmpOffset, cosmpOffset, plmOffset, dlmOffset;
size_t smSize = sharedMemoryBGijCuda(lsms, &hfnOffset, &sinmpOffset, &cosmpOffset,
&plmOffset, &dlmOffset);
#ifdef COMPARE_ORIGINAL
char *devTestSM;
cudaMalloc(&devTestSM, smSize);
#endif
int threads = 256;
dim3 blocks = dim3(devAtom.numLIZ, devAtom.numLIZ,1);
buildGijCudaKernel<<<blocks,threads,smSize>>>(devAtom.LIZPos, devAtom.LIZlmax,
DeviceConstants::lofk, DeviceConstants::mofk, DeviceConstants::ilp1, DeviceConstants::illp, DeviceConstants::cgnt,
DeviceConstants::ndlj_illp, DeviceConstants::lmaxp1_cgnt, DeviceConstants::ndlj_cgnt,
hfnOffset, sinmpOffset, cosmpOffset, plmOffset, dlmOffset,
cuEnergy, cuPrel,
#if !defined(COMPARE_ORIGINAL)
devOffsets, nrmat_ns, (cuDoubleComplex *)devBgij);
#else
devOffsets, nrmat_ns, (cuDoubleComplex *)devBgij, devTestSM);
#endif
setBGijCuda<<<blocks, threads>>>(fullRelativity, lsms.n_spin_cant, devAtom.LIZlmax,
devOffsets, nrmat_ns, (cuDoubleComplex *)devBgij);
smSize = kkrsz_ns*kkrsz_ns*sizeof(cuDoubleComplex);
buildKKRMatrixMultiplyKernelCuda<<<blocks, threads, smSize>>>(devAtom.LIZlmax, devAtom.LIZStoreIdx, devOffsets,
kkrsz_ns, ispin, lsms.n_spin_pola, lsms.n_spin_cant,
iie, d.getBlkSizeTmatStore(), d.getTmatStoreLDim(),
(cuDoubleComplex *)d.getDevTmatStore(), nrmat_ns,
(cuDoubleComplex *)devBgij, (cuDoubleComplex *)devM);
/*
// loop over the LIZ blocks
for(int ir1 = 0; ir1 < devAtom.numLIZ; ir1++)
{
int iOffset = ir1 * kkrsz_ns; // this assumes that there are NO lStep reductions of lmax!!!
for(int ir2 = 0; ir2 < devAtom.numLIZ; ir2++)
{
if(ir1 != ir2)
{
int jOffset = ir2 * kkrsz_ns; // this assumes that there are NO lStep reductions of lmax!!!
int lmax1 = devAtom.LIZlmax[ir1];
int lmax2 = devAtom.LIZlmax[ir2];
int kkr1=(lmax1+1)*(lmax1+1);
int kkr2=(lmax2+1)*(lmax2+1);
int kkr1_ns = kkr1 * lsms.n_spin_cant;
int kkr2_ns = kkr2 * lsms.n_spin_cant;
// buildBGijCuda(lsms, atom, ir1, ir2, rij, energy, prel, iOffset, jOffset, bgij);
// buildBGijCPU(lsms, atom, ir1, ir2, rij, energy, prel, 0, 0, bgijSmall);
BLAS::zgemm_("n", "n", &kkr1_ns, &kkr2_ns, &kkr1_ns, &cmone,
&local.tmatStore(iie*local.blkSizeTmatStore, devAtom.LIZStoreIdx[ir1]), &kkr1_ns,
// &tmat_n(0, 0), &kkr1_ns,
&bgij(iOffset, jOffset), &nrmat_ns, &czero,
// &bgijSmall(0, 0), &kkrsz_ns, &czero,
&m(iOffset, jOffset), &nrmat_ns);
// for(int i=0; i<kkr1_ns; i++)
// for(int j=0; j<kkr2_ns; j++)
// {
// m(iOffset + i, jOffset + j) = 0.0;
// for(int k=0; k<kkr1_ns ; k++)
// m(iOffset + i, jOffset + j) -= tmat_n(i, k) * // local.tmatStore(iie*local.blkSizeTmatStore + , atom.LIZStoreIdx[ir1]) *
// // bgij(iOffset + k, jOffset + j);
// bgijSmall(k, j);
// }
}
}
}
*/
}
void buildKKRMatrixCuda(LSMSSystemParameters &lsms, LocalTypeInfo &local, AtomData &atom,
DeviceStorage &devStorage, DeviceAtom &devAtom, int ispin,
int iie, Complex energy, Complex prel, Complex *devM)
{
// decide between identical lmax and different lmax:
// printf("buildKKRMatrixCuda not finished yet!\n");
// exit(1);
bool lmaxIdentical = true;
if(atom.LIZlmax[0] != lsms.maxlmax)
{
lmaxIdentical = false;
printf("atom.LIZlmax[0] (=%d) != lsms.maxlmax (=%d)\n",atom.LIZlmax[0], lsms.maxlmax);
}
for(int ir = 0; ir < atom.numLIZ; ir++)
{
if(atom.LIZlmax[ir] != atom.LIZlmax[0])
lmaxIdentical = false;
}
if(lmaxIdentical)
{
// printf("lmax identical in buildKKRMatrix\n");
buildKKRMatrixLMaxIdenticalCuda(lsms, local, atom, devStorage, devAtom, ispin,
iie, energy, prel, devM);
} else {
// printf("lmax not identical in buildKKRMatrix\n");
buildKKRMatrixLMaxDifferentCuda(lsms, local, atom, devStorage, devAtom, ispin,
iie, energy, prel, devM);
}
}
|
fea970b67da4bc93a573df80fe826862092e2904.hip | // !!! This is a file automatically generated by hipify!!!
#include "DT.cuh"
DTChunk::DTChunk(int argmaxDTLength, int argMaxDocLength, int argNumChunks, int argNumGPUs) {
maxDTLength = argmaxDTLength;
maxDocLength = argMaxDocLength;
numChunks = argNumChunks;
numGPUs = argNumGPUs;
NZDTCount = new int[maxDocLength];
DTIndex = new int[maxDTLength];
DTValue = new int[maxDTLength];
//DTCount = new int[maxDocLength];
//DTOffset = new int[maxDocLength];
DTLengthVec = new int[numChunks];
docLengthVec = new int[numChunks];
}
void DTChunk::loadDocDTLength(string argFilePrefix) {
ifstream DTLength((argFilePrefix + string("/DTLength.txt")).c_str(), ios::binary);//store max Doc and DT length
ifstream docLength((argFilePrefix + string("/docLength.txt")).c_str(), ios::binary);//store max Doc and DT length
for (int chunkId = 0; chunkId < numChunks; chunkId++) {
DTLength >> DTLengthVec[chunkId];
docLength >> docLengthVec[chunkId];
}
DTLength.close();
docLength.close();
}
void DTChunk::CPUMemSet() {
memset(NZDTCount, 0, maxDocLength * sizeof(int));
memset(DTIndex, 0, maxDTLength * sizeof(int));
memset(DTValue, 0, maxDTLength * sizeof(int));
//memset(DTCount, 0, maxDocLength * sizeof(int));
//memset(DTOffset, 0, maxDocLength * sizeof(int));
}
void DTChunk::InitDTGPU()
{
for (int GPUId = 0; GPUId < numGPUs; GPUId++) {
DTGPUChunk GPUChunkDT(maxDTLength, maxDocLength, GPUId);
GPUChunkDT.GPUMemAllocate(GPUId);
GPUChunkDT.GPUMemSet(GPUId);
DTGPUChunkVec.push_back(GPUChunkDT);
}
}
//void DTChunk::GPUMemAllocate(int argGPUId) {
//
// GPUId = argGPUId;
// hipSetDevice(GPUId);
// hipMalloc((void**)&deviceNZDTCount, (maxDocLength) * sizeof(int));
// hipMalloc((void**)&deviceDTIndex, (maxDTLength) * sizeof(int));
// hipMalloc((void**)&deviceDTValue, (maxDTLength) * sizeof(int));
// hipMalloc((void**)&deviceDTCount, (maxDocLength) * sizeof(int));
// hipMalloc((void**)&deviceDTOffset, (maxDocLength) * sizeof(int));
//
// DTMemory = (3 * maxDocLength + 2 * maxDTLength) * sizeof(int) / 1000000000.0;
// printf("DT memory usage:%f GB\n", DTMemory);
//
//}
void DTChunk::loadDTCountOffset(string argFilePrefix) {
/*chunkId = argChunkId;*/
for (int chunkId = 0; chunkId < numChunks; chunkId++) {
string chunkFolderName = argFilePrefix + "/chunk" + to_string(chunkId);
ifstream DTCountOffset((chunkFolderName + string("/DTCountOffset.txt")).c_str(), ios::binary);//store Word offset of TL
int* DTCount = new int[docLengthVec[chunkId]];
int* DTOffset = new int[docLengthVec[chunkId]];
memset(DTCount, 0, docLengthVec[chunkId] * sizeof(int));
memset(DTOffset, 0, docLengthVec[chunkId] * sizeof(int));
for (int i = 0; i < docLengthVec[chunkId]; i++)
{
DTCountOffset >> DTCount[i] >> DTOffset[i];
}
DTCountOffset.close();
DTCountVec.push_back(DTCount);
DTOffsetVec.push_back(DTOffset);
}
}
void DTChunk::CPU2GPU(int argGPUId) {
hipSetDevice(argGPUId);
GPUId = argGPUId;
//docLength = argDocLength;
hipMemcpy(DTGPUChunkVec[argGPUId].deviceNZDTCount, NZDTCount, (docLengthVec[chunkId]) * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(DTGPUChunkVec[argGPUId].deviceDTIndex, DTIndex, (DTLengthVec[chunkId]) * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(DTGPUChunkVec[argGPUId].deviceDTValue, DTValue, (DTLengthVec[chunkId]) * sizeof(int), hipMemcpyHostToDevice);
}
//void DTChunk::GPUMemSet(int argChunkId)
//{
// chunkId = argChunkId;
// hipMemset(deviceNZDTCount, 0, (maxDocLength) * sizeof(int));
// hipMemset(deviceDTIndex, 0, (maxDTLength) * sizeof(int));
// hipMemset(deviceDTValue, 0, (maxDTLength) * sizeof(int));
//
//}
void DTChunk::CPU2GPUDTCountOffset(int argGPUId) {
GPUId = argGPUId;
hipSetDevice(argGPUId);
//docLength = argDocLength;
hipMemcpy(DTGPUChunkVec[argGPUId].deviceDTCount, DTCountVec[argGPUId], (docLengthVec[argGPUId]) * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(DTGPUChunkVec[argGPUId].deviceDTOffset, DTOffsetVec[argGPUId], (docLengthVec[argGPUId]) * sizeof(int), hipMemcpyHostToDevice);
}
void DTChunk::GPU2CPU(int argGPUId) {
GPUId = argGPUId;
hipSetDevice(argGPUId);
//docLength = argDocLength;
hipMemcpy(NZDTCount, DTGPUChunkVec[argGPUId].deviceNZDTCount, (docLengthVec[argGPUId]) * sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(DTIndex, DTGPUChunkVec[argGPUId].deviceDTIndex, (DTLengthVec[argGPUId]) * sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(DTValue, DTGPUChunkVec[argGPUId].deviceDTValue, (DTLengthVec[argGPUId]) * sizeof(int), hipMemcpyDeviceToHost);
}
void DTChunk::CPU2Disk(string argFilePrefix,int argChunkId) {
chunkId = argChunkId;
//docLength = argDocLength;
string chunkFolderName = argFilePrefix + "/chunk" + to_string(chunkId);
ofstream OutputNZDTCount((chunkFolderName + string("/NZDTCount.txt")).c_str(), ios::binary);
for (int i = 0; i < docLengthVec[chunkId]; i++) {
OutputNZDTCount << NZDTCount[i] << "\n";
}
OutputNZDTCount.close();
ofstream OutputDTIndexValue((chunkFolderName + string("/DTIndexValue.txt")).c_str(), ios::binary);
for (int i = 0; i < DTLengthVec[chunkId]; i++) {
OutputDTIndexValue << DTIndex[i] <<" "<<DTValue[i]<< "\n";
}
OutputDTIndexValue.close();
} | fea970b67da4bc93a573df80fe826862092e2904.cu | #include "DT.cuh"
DTChunk::DTChunk(int argmaxDTLength, int argMaxDocLength, int argNumChunks, int argNumGPUs) {
maxDTLength = argmaxDTLength;
maxDocLength = argMaxDocLength;
numChunks = argNumChunks;
numGPUs = argNumGPUs;
NZDTCount = new int[maxDocLength];
DTIndex = new int[maxDTLength];
DTValue = new int[maxDTLength];
//DTCount = new int[maxDocLength];
//DTOffset = new int[maxDocLength];
DTLengthVec = new int[numChunks];
docLengthVec = new int[numChunks];
}
void DTChunk::loadDocDTLength(string argFilePrefix) {
ifstream DTLength((argFilePrefix + string("/DTLength.txt")).c_str(), ios::binary);//store max Doc and DT length
ifstream docLength((argFilePrefix + string("/docLength.txt")).c_str(), ios::binary);//store max Doc and DT length
for (int chunkId = 0; chunkId < numChunks; chunkId++) {
DTLength >> DTLengthVec[chunkId];
docLength >> docLengthVec[chunkId];
}
DTLength.close();
docLength.close();
}
void DTChunk::CPUMemSet() {
memset(NZDTCount, 0, maxDocLength * sizeof(int));
memset(DTIndex, 0, maxDTLength * sizeof(int));
memset(DTValue, 0, maxDTLength * sizeof(int));
//memset(DTCount, 0, maxDocLength * sizeof(int));
//memset(DTOffset, 0, maxDocLength * sizeof(int));
}
void DTChunk::InitDTGPU()
{
for (int GPUId = 0; GPUId < numGPUs; GPUId++) {
DTGPUChunk GPUChunkDT(maxDTLength, maxDocLength, GPUId);
GPUChunkDT.GPUMemAllocate(GPUId);
GPUChunkDT.GPUMemSet(GPUId);
DTGPUChunkVec.push_back(GPUChunkDT);
}
}
//void DTChunk::GPUMemAllocate(int argGPUId) {
//
// GPUId = argGPUId;
// cudaSetDevice(GPUId);
// cudaMalloc((void**)&deviceNZDTCount, (maxDocLength) * sizeof(int));
// cudaMalloc((void**)&deviceDTIndex, (maxDTLength) * sizeof(int));
// cudaMalloc((void**)&deviceDTValue, (maxDTLength) * sizeof(int));
// cudaMalloc((void**)&deviceDTCount, (maxDocLength) * sizeof(int));
// cudaMalloc((void**)&deviceDTOffset, (maxDocLength) * sizeof(int));
//
// DTMemory = (3 * maxDocLength + 2 * maxDTLength) * sizeof(int) / 1000000000.0;
// printf("DT memory usage:%f GB\n", DTMemory);
//
//}
void DTChunk::loadDTCountOffset(string argFilePrefix) {
/*chunkId = argChunkId;*/
for (int chunkId = 0; chunkId < numChunks; chunkId++) {
string chunkFolderName = argFilePrefix + "/chunk" + to_string(chunkId);
ifstream DTCountOffset((chunkFolderName + string("/DTCountOffset.txt")).c_str(), ios::binary);//store Word offset of TL
int* DTCount = new int[docLengthVec[chunkId]];
int* DTOffset = new int[docLengthVec[chunkId]];
memset(DTCount, 0, docLengthVec[chunkId] * sizeof(int));
memset(DTOffset, 0, docLengthVec[chunkId] * sizeof(int));
for (int i = 0; i < docLengthVec[chunkId]; i++)
{
DTCountOffset >> DTCount[i] >> DTOffset[i];
}
DTCountOffset.close();
DTCountVec.push_back(DTCount);
DTOffsetVec.push_back(DTOffset);
}
}
void DTChunk::CPU2GPU(int argGPUId) {
cudaSetDevice(argGPUId);
GPUId = argGPUId;
//docLength = argDocLength;
cudaMemcpy(DTGPUChunkVec[argGPUId].deviceNZDTCount, NZDTCount, (docLengthVec[chunkId]) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(DTGPUChunkVec[argGPUId].deviceDTIndex, DTIndex, (DTLengthVec[chunkId]) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(DTGPUChunkVec[argGPUId].deviceDTValue, DTValue, (DTLengthVec[chunkId]) * sizeof(int), cudaMemcpyHostToDevice);
}
//void DTChunk::GPUMemSet(int argChunkId)
//{
// chunkId = argChunkId;
// cudaMemset(deviceNZDTCount, 0, (maxDocLength) * sizeof(int));
// cudaMemset(deviceDTIndex, 0, (maxDTLength) * sizeof(int));
// cudaMemset(deviceDTValue, 0, (maxDTLength) * sizeof(int));
//
//}
void DTChunk::CPU2GPUDTCountOffset(int argGPUId) {
GPUId = argGPUId;
cudaSetDevice(argGPUId);
//docLength = argDocLength;
cudaMemcpy(DTGPUChunkVec[argGPUId].deviceDTCount, DTCountVec[argGPUId], (docLengthVec[argGPUId]) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(DTGPUChunkVec[argGPUId].deviceDTOffset, DTOffsetVec[argGPUId], (docLengthVec[argGPUId]) * sizeof(int), cudaMemcpyHostToDevice);
}
void DTChunk::GPU2CPU(int argGPUId) {
GPUId = argGPUId;
cudaSetDevice(argGPUId);
//docLength = argDocLength;
cudaMemcpy(NZDTCount, DTGPUChunkVec[argGPUId].deviceNZDTCount, (docLengthVec[argGPUId]) * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(DTIndex, DTGPUChunkVec[argGPUId].deviceDTIndex, (DTLengthVec[argGPUId]) * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(DTValue, DTGPUChunkVec[argGPUId].deviceDTValue, (DTLengthVec[argGPUId]) * sizeof(int), cudaMemcpyDeviceToHost);
}
void DTChunk::CPU2Disk(string argFilePrefix,int argChunkId) {
chunkId = argChunkId;
//docLength = argDocLength;
string chunkFolderName = argFilePrefix + "/chunk" + to_string(chunkId);
ofstream OutputNZDTCount((chunkFolderName + string("/NZDTCount.txt")).c_str(), ios::binary);
for (int i = 0; i < docLengthVec[chunkId]; i++) {
OutputNZDTCount << NZDTCount[i] << "\n";
}
OutputNZDTCount.close();
ofstream OutputDTIndexValue((chunkFolderName + string("/DTIndexValue.txt")).c_str(), ios::binary);
for (int i = 0; i < DTLengthVec[chunkId]; i++) {
OutputDTIndexValue << DTIndex[i] <<" "<<DTValue[i]<< "\n";
}
OutputDTIndexValue.close();
} |
d5775eadb2503050b3f6657e2e6df18f776f2f01.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
extern "C"
{
__global__ void testKernel(int* addr, unsigned short param1, char param2)
{
addr[0] = param1 + param2;
}
}
char* muGetErrorString(hipError_t result);
void muEC(int position) //checks and outputs error position and error string
{
hipError_t errcode = hipGetLastError();
if(errcode==hipSuccess)
{
printf("No error at position %i\n", position);
return;
}
printf("Error position: %i\nCode:%s\n", position, hipGetErrorString(errcode));
}
void muRC(int position, hipError_t result)
{
if(result==0)
printf("Success at %i\n", position);
else
printf("Error at %i:%s\n", position, muGetErrorString(result));
}
char* muGetErrorString(hipError_t result)
{
switch(result)
{
case 0: return "Success";
case 1: return "Invalid value";
case 2: return "Out of memory";
case 3: return "Not Initialized";
case 4: return "Deinitialized";
case 100: return "No device";
case 101: return "Invalid device";
case 200: return "Invalid image";
case 201: return "Invalid context";
case 202: return "Context already current";
case 205: return "Map failed";
case 206: return "Unmap failed";
case 207: return "Array is mapped";
case 208: return "Already mapped";
case 209: return "No binary for GPU";
case 210: return "Already acquired";
case 211: return "Not mapped";
case 300: return "Invalid source";
case 301: return "File not found";
case 400: return "Invalid handle";
case 500: return "Not found";
case 600: return "Not ready";
case 700: return "Launch failed";
case 701: return "Launch out of resources";
case 702: return "Launch timeout";
case 703: return "Launch incompatible texturing";
case 999: return "Unknown";
};
return "Unknown";
}
int main( int argc, char** argv)
{
if(argc<3)
{
puts("arguments: cubinname kernelname length tcount interval choice");
puts(" length: number of 4-byte elements to allocate in memory");
puts(" tcount: number of threads");
puts(" interval: number of output items per group");
puts(" choice: 0, all; 1, odd group only; 2, even group only; 3: none");
return 0;
}
int length = 8;
if(argc>=4)
{
length = atoi(argv[3]);
}
int tcount = 1;
if(argc>=5)
{
tcount = atoi(argv[4]);
}
int* cpu_output=new int[length];
int size = sizeof(int)*length;
int interval = 1;
if(argc>=6)
{
interval = atoi(argv[5]);
}
bool odd = true;
bool even = true;
if(argc>=7)
{
int choice = atoi(argv[6]);
if(choice==1)
even = false;
else if(choice==2)
odd = false;
else if(choice==3)
{
even = false;
odd = false;
}
}
hipDeviceptr_t gpu_output;
hipDevice_t device;
hipCtx_t context;
muRC(100, hipInit(0));
muRC(95, hipDeviceGet(&device, 0));
muRC(92, hipCtxCreate(&context, HIP_CTX_SCHED_SPIN, device));
muRC(90, cuMemAlloc(&gpu_output, size));
hipEvent_t eStart, eStop;
muRC(89, hipEventCreate(&eStart, hipEventDefault));
muRC(88, hipEventCreate(&eStop, hipEventDefault));
hipModule_t module;
hipFunction_t kernel;
hipError_t result = hipModuleLoad(&module, argv[1]);
muRC(0 , result);
result = hipModuleGetFunction(&kernel, module, argv[2]);
muRC(1, result);
int param = 0x1010;
muRC(2, hipParamSetSize(kernel, 20));
muRC(3, hipParamSetv(kernel, 0, &gpu_output, 8));
muRC(3, hipParamSetv(kernel, 16, ¶m, 4));
muRC(4, hipFuncSetBlockShape(kernel, tcount,1,1));
muRC(41, hipEventRecord(eStart,0) );
muRC(5, hipLaunch(kernel));
muRC(51, hipEventRecord(eStop,0) );
muRC(6, cuMemcpyDtoH(cpu_output, gpu_output, size));
muRC(7, hipCtxSynchronize());
float time;
muRC(75, hipEventElapsedTime(&time, eStart, eStop));
printf("length=%i\n", length);
printf("tcount=%i\n", tcount);
printf("time=%f\n", time);
for(int i=0; i<length/interval; i++)
{
if(i%2==0)
{
if(!even) continue;
}
else
{
if(!odd) continue;
}
for(int j=0; j<interval; j++)
printf("i=%i, j=%i, output=%i\n", i, j, cpu_output[i*interval+j]);
if(interval!=1)
puts("");
}
muRC(8, hipModuleUnload(module));
muRC(9, hipFree(gpu_output));
muRC(10, hipCtxDestroy(context));
delete[] cpu_output;
return 0;
}
| d5775eadb2503050b3f6657e2e6df18f776f2f01.cu | #include <stdlib.h>
#include <stdio.h>
#include <cuda.h>
extern "C"
{
__global__ void testKernel(int* addr, unsigned short param1, char param2)
{
addr[0] = param1 + param2;
}
}
char* muGetErrorString(CUresult result);
void muEC(int position) //checks and outputs error position and error string
{
cudaError_t errcode = cudaGetLastError();
if(errcode==cudaSuccess)
{
printf("No error at position %i\n", position);
return;
}
printf("Error position: %i\nCode:%s\n", position, cudaGetErrorString(errcode));
}
void muRC(int position, CUresult result)
{
if(result==0)
printf("Success at %i\n", position);
else
printf("Error at %i:%s\n", position, muGetErrorString(result));
}
char* muGetErrorString(CUresult result)
{
switch(result)
{
case 0: return "Success";
case 1: return "Invalid value";
case 2: return "Out of memory";
case 3: return "Not Initialized";
case 4: return "Deinitialized";
case 100: return "No device";
case 101: return "Invalid device";
case 200: return "Invalid image";
case 201: return "Invalid context";
case 202: return "Context already current";
case 205: return "Map failed";
case 206: return "Unmap failed";
case 207: return "Array is mapped";
case 208: return "Already mapped";
case 209: return "No binary for GPU";
case 210: return "Already acquired";
case 211: return "Not mapped";
case 300: return "Invalid source";
case 301: return "File not found";
case 400: return "Invalid handle";
case 500: return "Not found";
case 600: return "Not ready";
case 700: return "Launch failed";
case 701: return "Launch out of resources";
case 702: return "Launch timeout";
case 703: return "Launch incompatible texturing";
case 999: return "Unknown";
};
return "Unknown";
}
int main( int argc, char** argv)
{
if(argc<3)
{
puts("arguments: cubinname kernelname length tcount interval choice");
puts(" length: number of 4-byte elements to allocate in memory");
puts(" tcount: number of threads");
puts(" interval: number of output items per group");
puts(" choice: 0, all; 1, odd group only; 2, even group only; 3: none");
return 0;
}
int length = 8;
if(argc>=4)
{
length = atoi(argv[3]);
}
int tcount = 1;
if(argc>=5)
{
tcount = atoi(argv[4]);
}
int* cpu_output=new int[length];
int size = sizeof(int)*length;
int interval = 1;
if(argc>=6)
{
interval = atoi(argv[5]);
}
bool odd = true;
bool even = true;
if(argc>=7)
{
int choice = atoi(argv[6]);
if(choice==1)
even = false;
else if(choice==2)
odd = false;
else if(choice==3)
{
even = false;
odd = false;
}
}
CUdeviceptr gpu_output;
CUdevice device;
CUcontext context;
muRC(100, cuInit(0));
muRC(95, cuDeviceGet(&device, 0));
muRC(92, cuCtxCreate(&context, CU_CTX_SCHED_SPIN, device));
muRC(90, cuMemAlloc(&gpu_output, size));
CUevent eStart, eStop;
muRC(89, cuEventCreate(&eStart, CU_EVENT_DEFAULT));
muRC(88, cuEventCreate(&eStop, CU_EVENT_DEFAULT));
CUmodule module;
CUfunction kernel;
CUresult result = cuModuleLoad(&module, argv[1]);
muRC(0 , result);
result = cuModuleGetFunction(&kernel, module, argv[2]);
muRC(1, result);
int param = 0x1010;
muRC(2, cuParamSetSize(kernel, 20));
muRC(3, cuParamSetv(kernel, 0, &gpu_output, 8));
muRC(3, cuParamSetv(kernel, 16, ¶m, 4));
muRC(4, cuFuncSetBlockShape(kernel, tcount,1,1));
muRC(41, cuEventRecord(eStart,0) );
muRC(5, cuLaunch(kernel));
muRC(51, cuEventRecord(eStop,0) );
muRC(6, cuMemcpyDtoH(cpu_output, gpu_output, size));
muRC(7, cuCtxSynchronize());
float time;
muRC(75, cuEventElapsedTime(&time, eStart, eStop));
printf("length=%i\n", length);
printf("tcount=%i\n", tcount);
printf("time=%f\n", time);
for(int i=0; i<length/interval; i++)
{
if(i%2==0)
{
if(!even) continue;
}
else
{
if(!odd) continue;
}
for(int j=0; j<interval; j++)
printf("i=%i, j=%i, output=%i\n", i, j, cpu_output[i*interval+j]);
if(interval!=1)
puts("");
}
muRC(8, cuModuleUnload(module));
muRC(9, cuMemFree(gpu_output));
muRC(10, cuCtxDestroy(context));
delete[] cpu_output;
return 0;
}
|
2bfe296ef6d7218f5f335a3710dac587f51b1e15.hip | // !!! This is a file automatically generated by hipify!!!
/*#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>*/
#include <stdio.h>
#include "kernel.h"
#include "matrix.h"
#include "timer.h"
#include<string.h>
#define THRESHOLD 0.000001
#define YMAX 32
#define threads 32
#define BLOCK_DIM 1024
#define CAPACITY 25498020
#define COARSE_FACTOR 100
#define WARP_SIZE 32
__global__ void spmspm(COOMatrix *result, CSRMatrix A, CSCMatrix B, float bias, unsigned int* nnz_out) {
//unsigned int r = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int c = blockIdx.x*blockDim.x + threadIdx.x;
//unsigned int segmentx = COARSE_FACTOR*blockDim.x*blockIdx.x;
unsigned int segmenty = COARSE_FACTOR*blockDim.y*blockIdx.y;
unsigned int row = segmenty + threadIdx.y;
//unsigned int col =segmentx + threadIdx.x;
for(unsigned int cr= 0 ; cr<COARSE_FACTOR;++cr){
unsigned int r = row + cr*threads;
//for(unsigned int cc =0; cc <COARSE_FACTOR;++cc){
//unsigned int c = col + cc*threads;
if (r < A.numRows && c < B.numCols) {
unsigned int rowPtrA = A.rowPtrs[r];
unsigned int nnzA = A.rowPtrs[r + 1] - rowPtrA;
unsigned int colPtrB = B.colPtrs[c];
unsigned int nnzB = B.colPtrs[c + 1] - colPtrB;
if (nnzA > 0 && nnzB > 0) { // if a row is not all zeros , we do computation otherwise we skip row
float sum = 0.0f;
unsigned int ia = 0, ib = 0;
while (ia < nnzA && ib < nnzB) { // loops over all non zeros from A and B and stop when there is no more non zero
unsigned int colIdx = A.colIdxs[rowPtrA + ia]; //single item col index from A
unsigned int rowIdx = B.rowIdxs[colPtrB + ib]; //single item row index from B
if (rowIdx < B.nnz && colIdx < A.nnz) {
if (colIdx < rowIdx) {
ia++;
}
else if (colIdx > rowIdx) {
ib++;
}
else {
sum += A.values[rowPtrA + ia] * B.values[ib + colPtrB];// do the multiplication of the row that matches the column
ia++;
ib++;
}
}
}
if (sum > THRESHOLD || sum < -THRESHOLD) { //if not smaller than abs(threshold)
sum += bias; //add to it the bias
//Remove negative and zero values
if (sum > 0) {//if end result is positive otherwise I also do not want to add it to result
if (sum > YMAX) { //make sure it is on an upper limit
sum = YMAX;
}
//Assign a leader thread
unsigned int activeThreads = __activemask();
unsigned int leader = __ffs(activeThreads)-1;
//Find how many threads needs to add to the queue
unsigned int numActive = __popc(activeThreads);
//Have the leader perform the atomic operation
unsigned int nnzIndxTemp;
if(threadIdx.x%WARP_SIZE == leader){
nnzIndxTemp = atomicAdd(nnz_out, numActive);
}
//Broadcast the result
nnzIndxTemp = __shfl_sync(activeThreads,nnzIndxTemp,leader);
//Find the position of each thread
unsigned int previousThreads = (1 << (threadIdx.x%WARP_SIZE)) - 1;
unsigned int activePreviousThreads = activeThreads & previousThreads;
unsigned int offset = __popc(activePreviousThreads);
// Store the result
result->rowIdxs[offset + nnzIndxTemp] = r;
result->colIdxs[offset + nnzIndxTemp] = c;
result->values[offset + nnzIndxTemp] = sum;
}
}
}
}
}
}
__global__ void histogram_private_kernel(unsigned int* rowIdxs, unsigned int* rowPtrs, unsigned int nnz, unsigned int numRows) {
unsigned int t = blockDim.x*blockIdx.x + threadIdx.x;
if (t < nnz) {
unsigned int rIdx = rowIdxs[t];
atomicAdd(&rowPtrs[rIdx], 1);
}
}
__global__ void scan_kernel(unsigned int* input, unsigned int* output, unsigned int* partialSums, unsigned int N) {
// TODO
unsigned int segment = 2 * blockDim.x * blockIdx.x;
unsigned int i = segment + threadIdx.x;
__shared__ unsigned int input_s[2 * BLOCK_DIM];
int tid = threadIdx.x;
if (i < N)
{
input_s[tid] = input[i];
}
else
{
input_s[tid] = 0;
}
if (i + BLOCK_DIM < N)
{
input_s[tid + BLOCK_DIM] = input[i + BLOCK_DIM];
}
else
{
input_s[tid + BLOCK_DIM] = 0;
}
__syncthreads();
//reduction step
for (unsigned int stride = 1; stride <= BLOCK_DIM; stride *= 2)
{
int index = (threadIdx.x + 1) * 2 * stride - 1;
if (index < 2 * BLOCK_DIM)
input_s[index] += input_s[index - stride];
__syncthreads();
}
//save partial sum
if (threadIdx.x == 0)
{
partialSums[blockIdx.x] = input_s[2 * BLOCK_DIM - 1];
input_s[2 * BLOCK_DIM - 1] = 0.0f;
}
__syncthreads();
//post reduction step
for (unsigned int stride = BLOCK_DIM; stride > 0; stride /= 2)
{
int index = (threadIdx.x + 1) * 2 * stride - 1;
if (index < 2 * BLOCK_DIM)
{
//add then swap
unsigned int temp = input_s[index];
input_s[index] += input_s[index - stride];
input_s[index - stride] = temp;
}
__syncthreads();
}
if (i < N)
{
output[i] = input_s[tid];
}
if (i + BLOCK_DIM < N)
{
output[i + BLOCK_DIM] = input_s[tid + BLOCK_DIM];
}
}
__global__ void add_kernel(unsigned int* output, unsigned int* partialSums, unsigned int N) {
// TODO
unsigned int i = 2 * blockIdx.x*blockDim.x + threadIdx.x;
if (blockIdx.x != 0) {
if (i < N) {
output[i] += partialSums[blockIdx.x];
}
if (i + BLOCK_DIM < N) {
output[i + BLOCK_DIM] += partialSums[blockIdx.x];
}
}
}
//output_d rowptrs n = numrows +1
void scan_gpu_d(unsigned int* input_d, unsigned int* output_d, unsigned int N) {
// Configurations
const unsigned int numThreadsPerBlock = BLOCK_DIM;
const unsigned int numElementsPerBlock = 2 * numThreadsPerBlock;
const unsigned int numBlocks = (N + numElementsPerBlock - 1) / numElementsPerBlock;
// Allocate partial sums
unsigned int *partialSums_d;
hipMalloc((void**)&partialSums_d, numBlocks * sizeof(unsigned int));
hipDeviceSynchronize();
scan_kernel << < numBlocks, numThreadsPerBlock >> > (input_d, output_d, partialSums_d, N);
hipDeviceSynchronize();
// Scan partial sums then add
if (numBlocks > 1) {
// Scan partial sums
scan_gpu_d(partialSums_d, partialSums_d, numBlocks);
// Add scanned sums
add_kernel << < numBlocks, numThreadsPerBlock >> > (output_d, partialSums_d, N);
}
// Free memory
hipFree(partialSums_d);
hipDeviceSynchronize();
}
__global__ void Binning_kernel(unsigned int* inrowIdxs, unsigned int* incolIdxs, float* invalues, unsigned int* rowPtrs, unsigned int* colIdxs, float* values, unsigned int nnz, unsigned int numRows, unsigned int* rowPtrsBin) {
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < nnz) {
unsigned int row = inrowIdxs[i];
unsigned int col = incolIdxs[i];
float val = invalues[i];
unsigned int init = rowPtrs[row];
unsigned int nnzIdx = atomicAdd(&rowPtrsBin[row], 1);
colIdxs[nnzIdx+init] = col;
values[nnzIdx+init]=val;
}
}
__global__ void sorting_kernel(unsigned int* colIdxs, float* values, unsigned int* rowPtrs, unsigned int numRows) {
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < numRows) {
unsigned int nnzA = rowPtrs[i + 1] - rowPtrs[i];
if (nnzA > 0) {
for (unsigned int j = 0; j < nnzA - 1;++j) {
for (unsigned int k = 0; k < nnzA - j - 1; ++k) {
unsigned int l_0 = k + rowPtrs[i];
unsigned int l_1 = l_0 + 1;
if (colIdxs[l_0] > colIdxs[l_1]) {
//swap col
unsigned int tmp = colIdxs[l_0];
colIdxs[l_0] = colIdxs[l_1];
colIdxs[l_1] = tmp;
//swap float
float valtmp = values[l_0];
values[l_0] = values[l_1];
values[l_1] = valtmp;
}
}
}
}
}
}
//converts from CSRMatrix to Vector and a vector of indices where the row is not all zeros
void findNonzeroRows(Vector* v, CSRMatrix* A) {
unsigned int nnz = 0;
for (unsigned int r = 0; r < A->numRows; ++r) {
unsigned int rowPtrA = A->rowPtrs[r];
unsigned int nnzA = A->rowPtrs[r + 1] - rowPtrA;
if (nnzA > 0) {
if (nnz >= v->capacity) {
expandVectorCapacity(v, 2 * v->capacity);
}
v->data[nnz] = r;
++nnz;
}
}
v->nnz = nnz;
}
COOMatrix* createEmptyCOO(unsigned int numRows, unsigned int numCols, unsigned int capacity) {
COOMatrix *coo = (COOMatrix *)malloc(sizeof(COOMatrix));
coo->rowIdxs = (unsigned int *)malloc(capacity * sizeof(unsigned int));
coo->colIdxs = (unsigned int *)malloc(capacity * sizeof(unsigned int));
coo->values = (float *)malloc(capacity * sizeof(float));
coo->numRows = numRows;
coo->numCols = numCols;
coo->nnz = 0;
coo->capacity = CAPACITY;
for (unsigned int i = 0; i < coo->capacity;++i) {
coo->rowIdxs[i] = 0;
coo->colIdxs[i] = 0;
coo->values[i] = 0.0f;
}
return coo;
}
void sparseNN(Vector* result, COOMatrix* featureVectors, COOMatrix** layerWeights, float bias, unsigned int numLayers) {
Timer timer;
// Convert featureVectors to CSR
startTime(&timer);
CSRMatrix* Y0 = createCSRfromCOO(featureVectors);
stopTimeAndPrint(&timer, "Convert feature vectors to CSR");
// Convert layer weights to CSC
startTime(&timer);
CSCMatrix* W[numLayers];
for (unsigned int layer = 0; layer < numLayers; ++layer) {
W[layer] = createCSCfromCOO(layerWeights[layer]);
}
stopTimeAndPrint(&timer, "Convert weights to CSC");
// Double buffers
startTime(&timer);
COOMatrix *tmp = createEmptyCOO(Y0->numRows, Y0->numCols, CAPACITY);
CSRMatrix *inBuffer = Y0;
COOMatrix *outBuffer = tmp;
stopTimeAndPrint(&timer, "Allocate temporary buffer");
inBuffer->capacity = CAPACITY;
// Allocate GPU memory
startTime(&timer);
outBuffer->capacity = CAPACITY;
//allocating inbuffer address and value
CSRMatrix inBuffer_d;
inBuffer_d.numRows = inBuffer->numRows;
inBuffer_d.numCols = inBuffer->numCols;
inBuffer_d.nnz = inBuffer->nnz;
inBuffer_d.capacity = inBuffer->capacity;
hipMalloc((void**)&inBuffer_d.rowPtrs, (inBuffer->numRows + 1) * sizeof(unsigned int));
hipMalloc((void**)&inBuffer_d.colIdxs, CAPACITY * sizeof(unsigned int));
hipMalloc((void**)&inBuffer_d.values, CAPACITY * sizeof(float));
hipMemcpy(inBuffer_d.rowPtrs, inBuffer->rowPtrs, (inBuffer->numRows + 1) * sizeof(unsigned int), hipMemcpyHostToDevice);
hipMemcpy(inBuffer_d.colIdxs, inBuffer->colIdxs, (inBuffer->nnz) * sizeof(unsigned int), hipMemcpyHostToDevice);
hipMemcpy(inBuffer_d.values, inBuffer->values, inBuffer->nnz * sizeof(float), hipMemcpyHostToDevice);
printf("inbuffer allocated\n");
/////////////////////////
//outBuffer_d allocation
COOMatrix *outBuffer_d;
unsigned int* out_rowIdxs_d;
unsigned int* out_colIdxs_d;
float* out_values_d;
unsigned int* out_nnz_d;
unsigned int* out_nnz_h = (unsigned int*)malloc(sizeof(unsigned int*));
*out_nnz_h = outBuffer->nnz;
hipMalloc((void**)&outBuffer_d, sizeof(COOMatrix));
hipMalloc((void**)&out_rowIdxs_d, outBuffer->capacity * sizeof(unsigned int));
hipMalloc((void**)&out_colIdxs_d, outBuffer->capacity * sizeof(unsigned int));
hipMalloc((void**)&out_values_d, outBuffer->capacity * sizeof(float));
hipMalloc((void**)&out_nnz_d, sizeof(unsigned int));
//copying outbuffer
hipMemcpy(outBuffer_d, outBuffer, sizeof(COOMatrix), hipMemcpyHostToDevice);
hipMemcpy(out_rowIdxs_d, outBuffer->rowIdxs, outBuffer->capacity * sizeof(unsigned int), hipMemcpyHostToDevice);
hipMemcpy(out_colIdxs_d, outBuffer->colIdxs, outBuffer->capacity * sizeof(unsigned int), hipMemcpyHostToDevice);
hipMemcpy(out_values_d, outBuffer->values, outBuffer->capacity * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(out_nnz_d, out_nnz_h, sizeof(unsigned int), hipMemcpyHostToDevice);
hipMemcpy(&(outBuffer_d->rowIdxs), &out_rowIdxs_d, sizeof(unsigned int*), hipMemcpyHostToDevice);
hipMemcpy(&(outBuffer_d->colIdxs), &out_colIdxs_d, sizeof(unsigned int*), hipMemcpyHostToDevice);
hipMemcpy(&(outBuffer_d->values), &out_values_d, sizeof(float*), hipMemcpyHostToDevice);
hipMemcpy(&(outBuffer_d->numRows), &(outBuffer->numRows), sizeof(unsigned int), hipMemcpyHostToDevice);
hipDeviceSynchronize();
printf("outbuffer allocated\n");
//////////////////////////////////
// allocating W_d
CSCMatrix W_d[numLayers];
for (unsigned int layer = 0; layer < numLayers; ++layer) {
W_d[layer].numRows = W[layer]->numRows;
W_d[layer].numCols = W[layer]->numCols;
W_d[layer].nnz = W[layer]->nnz;
W_d[layer].capacity = W[layer]->capacity;
hipMalloc((void**)&W_d[layer].colPtrs, (W[layer]->numCols + 1) * sizeof(unsigned int));
hipMalloc((void**)&W_d[layer].rowIdxs, W[layer]->capacity * sizeof(unsigned int));
hipMalloc((void**)&W_d[layer].values, W[layer]->capacity * sizeof(float));
}
for (unsigned int layer = 0; layer < numLayers; ++layer) {
hipMemcpy(W_d[layer].colPtrs, W[layer]->colPtrs, (W[layer]->numCols + 1) * sizeof(unsigned int), hipMemcpyHostToDevice);
hipMemcpy(W_d[layer].rowIdxs, W[layer]->rowIdxs, W[layer]->capacity * sizeof(unsigned int), hipMemcpyHostToDevice);
hipMemcpy(W_d[layer].values, W[layer]->values, W[layer]->capacity * sizeof(float), hipMemcpyHostToDevice);
}
hipDeviceSynchronize();
stopTime(&timer);
printElapsedTime(timer, "Allocation and copy time on GPU Memory");
unsigned int *rowPtrstmp_d;
unsigned int *rowPtrstmp;
rowPtrstmp = (unsigned int *)malloc((inBuffer_d.numRows + 1) * sizeof(unsigned int));
hipMalloc((void**)&rowPtrstmp_d, (inBuffer_d.numRows + 1) * sizeof(unsigned int));
for(unsigned int i=0; i<inBuffer_d.numRows+1;i++){
rowPtrstmp[i]=0;
}
hipMemcpy(rowPtrstmp_d, rowPtrstmp, (inBuffer_d.numRows + 1) * sizeof(unsigned int), hipMemcpyHostToDevice);
//kernel loop
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Loop over layers
for (unsigned int layer = 0; layer < numLayers; ++layer) {
// SpMSpM
printf("Computing layer %u (SpMSpM)", layer);
startTime(&timer);
dim3 numThreadsPerBlock3(threads, threads);
dim3 numBlocks3((W_d[layer].numCols + numThreadsPerBlock3.x - 1) / (numThreadsPerBlock3.x), (inBuffer_d.numRows + numThreadsPerBlock3.y*COARSE_FACTOR - 1) /( numThreadsPerBlock3.y*COARSE_FACTOR));
spmspm << <numBlocks3, numThreadsPerBlock3 >> > (outBuffer_d, inBuffer_d, W_d[layer], bias, out_nnz_d);
hipDeviceSynchronize();
stopTimeAndPrint(&timer, "");
hipMemcpy(out_nnz_h, out_nnz_d, sizeof(unsigned int), hipMemcpyDeviceToHost);
printf("nnz %d\n", *out_nnz_h);
inBuffer_d.nnz = *out_nnz_h;
inBuffer_d.numCols = W_d[layer].numCols;
hipDeviceSynchronize();
printf("kernel time for layer %u", layer);
stopTimeAndPrint(&timer, "");
startTime(&timer);
//calling histogram to fill rowPtrs of inBuffer
unsigned int numThreadsPerBlock = 1024;
unsigned int numBlocks = (*out_nnz_h + numThreadsPerBlock - 1) / numThreadsPerBlock;
//initializing rowstmp and rowstmp_d
if (layer != 0)
hipMemcpy(rowPtrstmp_d, rowPtrstmp, (inBuffer_d.numRows + 1) * sizeof(unsigned int), hipMemcpyHostToDevice);
histogram_private_kernel << < numBlocks, numThreadsPerBlock >> > (out_rowIdxs_d, rowPtrstmp_d, *out_nnz_h, inBuffer_d.numRows);
hipDeviceSynchronize();
printf("Histogram time for layer %u", layer);
stopTimeAndPrint(&timer, "");
startTime(&timer);
//calling the scan kernel to scan kernel ptrs
const unsigned int numElementsPerBlock = 2 * numThreadsPerBlock;
numBlocks = ((inBuffer_d.numRows + 1) + numElementsPerBlock - 1) / numElementsPerBlock;
// Allocate partial sums
unsigned int *partialSums_d;
hipMalloc((void**)&partialSums_d, numBlocks * sizeof(unsigned int));
hipDeviceSynchronize();
// Call kernel
scan_kernel << < numBlocks, numThreadsPerBlock >> > (rowPtrstmp_d, inBuffer_d.rowPtrs, partialSums_d, inBuffer_d.numRows + 1);
hipDeviceSynchronize();
// Scan partial sums then add
if (numBlocks > 1) {
// Scan partial sums
scan_gpu_d(partialSums_d, partialSums_d, numBlocks);
// Add scanned sums
add_kernel << < numBlocks, numThreadsPerBlock >> > (inBuffer_d.rowPtrs, partialSums_d, inBuffer_d.numRows + 1);
}
hipDeviceSynchronize();
//used to check if scan and histogram same as nnz of kernel
hipMemcpy(rowPtrstmp, inBuffer_d.rowPtrs, sizeof(unsigned int) * (inBuffer_d.numRows + 1), hipMemcpyDeviceToHost);
printf("test %u\n", rowPtrstmp[inBuffer_d.numRows]);
// Free memory
hipFree(partialSums_d);
printf("Scan time for layer %u", layer);
stopTimeAndPrint(&timer, "");
startTime(&timer);
//Binning
for (unsigned int i = 0; i < inBuffer_d.numRows + 1;i++) {
rowPtrstmp[i] = 0;
}
hipMemcpy(rowPtrstmp_d, rowPtrstmp, (inBuffer_d.numRows + 1) * sizeof(unsigned int), hipMemcpyHostToDevice);
hipDeviceSynchronize();
numBlocks = (*out_nnz_h + numThreadsPerBlock - 1) / numThreadsPerBlock;
Binning_kernel << < numBlocks, numThreadsPerBlock >> > (out_rowIdxs_d, out_colIdxs_d, out_values_d, inBuffer_d.rowPtrs, inBuffer_d.colIdxs, inBuffer_d.values, *out_nnz_h, inBuffer_d.numRows, rowPtrstmp_d);
hipDeviceSynchronize();
//Sorting
numBlocks = ((inBuffer_d.numRows + 1) + numThreadsPerBlock - 1) / numThreadsPerBlock;
sorting_kernel << < numBlocks, numThreadsPerBlock >> > (inBuffer_d.colIdxs, inBuffer_d.values, inBuffer_d.rowPtrs, inBuffer_d.numRows);
hipDeviceSynchronize();
printf("Converting time for layer %u", layer);
stopTimeAndPrint(&timer, "");
//reinitializing nnz and rowstmp
*out_nnz_h = 0;
hipMemcpy(out_nnz_d, out_nnz_h, sizeof(unsigned int), hipMemcpyHostToDevice);
for (unsigned int i = 0; i < inBuffer_d.numRows + 1;i++) {
rowPtrstmp[i] = 0;
}
hipDeviceSynchronize();
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Copy data from GPU
startTime(&timer);
//TODO
inBuffer->numRows = inBuffer_d.numRows;
inBuffer->numCols = inBuffer_d.numCols;
inBuffer->nnz = inBuffer_d.nnz;
hipMemcpy(inBuffer->rowPtrs, inBuffer_d.rowPtrs, (inBuffer_d.numRows + 1) * sizeof(unsigned int), hipMemcpyDeviceToHost);
hipMemcpy(inBuffer->colIdxs, inBuffer_d.colIdxs, inBuffer_d.nnz * sizeof(unsigned int), hipMemcpyDeviceToHost);
hipMemcpy(inBuffer->values, inBuffer_d.values, inBuffer_d.nnz * sizeof(float), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
stopTime(&timer);
printElapsedTime(timer, "Copy from GPU time");
//CPU
// Find
//nonzero rows
startTime(&timer);
findNonzeroRows(result, inBuffer);
stopTimeAndPrint(&timer, "Find nonzero rows");
// Free GPU memory
startTime(&timer);
hipFree(inBuffer_d.rowPtrs);
hipFree(inBuffer_d.colIdxs);
hipFree(inBuffer_d.values);
hipFree(outBuffer_d);
// hipFree(tmpOutBuffer.rowIdxs);
// hipFree(tmpOutBuffer.colIdxs);
// hipFree(tmpOutBuffer.values);
//hipFree(inBuffer_d);
free(inBuffer);
free(outBuffer);
for (unsigned int layer = 0; layer < numLayers; ++layer) {
hipFree(W_d[layer].colPtrs);
hipFree(W_d[layer].rowIdxs);
hipFree(W_d[layer].values);
}
hipDeviceSynchronize();
stopTime(&timer);
printElapsedTime(timer, "Deallocation time");
// Free buffers
startTime(&timer);
//freeCSR(Y0);
for (unsigned int layer = 0; layer < numLayers; ++layer) {
freeCSC(W[layer]);
}
stopTimeAndPrint(&timer, "Deallocate memory");
}
| 2bfe296ef6d7218f5f335a3710dac587f51b1e15.cu | /*#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>*/
#include <stdio.h>
#include "kernel.h"
#include "matrix.h"
#include "timer.h"
#include<string.h>
#define THRESHOLD 0.000001
#define YMAX 32
#define threads 32
#define BLOCK_DIM 1024
#define CAPACITY 25498020
#define COARSE_FACTOR 100
#define WARP_SIZE 32
__global__ void spmspm(COOMatrix *result, CSRMatrix A, CSCMatrix B, float bias, unsigned int* nnz_out) {
//unsigned int r = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int c = blockIdx.x*blockDim.x + threadIdx.x;
//unsigned int segmentx = COARSE_FACTOR*blockDim.x*blockIdx.x;
unsigned int segmenty = COARSE_FACTOR*blockDim.y*blockIdx.y;
unsigned int row = segmenty + threadIdx.y;
//unsigned int col =segmentx + threadIdx.x;
for(unsigned int cr= 0 ; cr<COARSE_FACTOR;++cr){
unsigned int r = row + cr*threads;
//for(unsigned int cc =0; cc <COARSE_FACTOR;++cc){
//unsigned int c = col + cc*threads;
if (r < A.numRows && c < B.numCols) {
unsigned int rowPtrA = A.rowPtrs[r];
unsigned int nnzA = A.rowPtrs[r + 1] - rowPtrA;
unsigned int colPtrB = B.colPtrs[c];
unsigned int nnzB = B.colPtrs[c + 1] - colPtrB;
if (nnzA > 0 && nnzB > 0) { // if a row is not all zeros , we do computation otherwise we skip row
float sum = 0.0f;
unsigned int ia = 0, ib = 0;
while (ia < nnzA && ib < nnzB) { // loops over all non zeros from A and B and stop when there is no more non zero
unsigned int colIdx = A.colIdxs[rowPtrA + ia]; //single item col index from A
unsigned int rowIdx = B.rowIdxs[colPtrB + ib]; //single item row index from B
if (rowIdx < B.nnz && colIdx < A.nnz) {
if (colIdx < rowIdx) {
ia++;
}
else if (colIdx > rowIdx) {
ib++;
}
else {
sum += A.values[rowPtrA + ia] * B.values[ib + colPtrB];// do the multiplication of the row that matches the column
ia++;
ib++;
}
}
}
if (sum > THRESHOLD || sum < -THRESHOLD) { //if not smaller than abs(threshold)
sum += bias; //add to it the bias
//Remove negative and zero values
if (sum > 0) {//if end result is positive otherwise I also do not want to add it to result
if (sum > YMAX) { //make sure it is on an upper limit
sum = YMAX;
}
//Assign a leader thread
unsigned int activeThreads = __activemask();
unsigned int leader = __ffs(activeThreads)-1;
//Find how many threads needs to add to the queue
unsigned int numActive = __popc(activeThreads);
//Have the leader perform the atomic operation
unsigned int nnzIndxTemp;
if(threadIdx.x%WARP_SIZE == leader){
nnzIndxTemp = atomicAdd(nnz_out, numActive);
}
//Broadcast the result
nnzIndxTemp = __shfl_sync(activeThreads,nnzIndxTemp,leader);
//Find the position of each thread
unsigned int previousThreads = (1 << (threadIdx.x%WARP_SIZE)) - 1;
unsigned int activePreviousThreads = activeThreads & previousThreads;
unsigned int offset = __popc(activePreviousThreads);
// Store the result
result->rowIdxs[offset + nnzIndxTemp] = r;
result->colIdxs[offset + nnzIndxTemp] = c;
result->values[offset + nnzIndxTemp] = sum;
}
}
}
}
}
}
__global__ void histogram_private_kernel(unsigned int* rowIdxs, unsigned int* rowPtrs, unsigned int nnz, unsigned int numRows) {
unsigned int t = blockDim.x*blockIdx.x + threadIdx.x;
if (t < nnz) {
unsigned int rIdx = rowIdxs[t];
atomicAdd(&rowPtrs[rIdx], 1);
}
}
__global__ void scan_kernel(unsigned int* input, unsigned int* output, unsigned int* partialSums, unsigned int N) {
// TODO
unsigned int segment = 2 * blockDim.x * blockIdx.x;
unsigned int i = segment + threadIdx.x;
__shared__ unsigned int input_s[2 * BLOCK_DIM];
int tid = threadIdx.x;
if (i < N)
{
input_s[tid] = input[i];
}
else
{
input_s[tid] = 0;
}
if (i + BLOCK_DIM < N)
{
input_s[tid + BLOCK_DIM] = input[i + BLOCK_DIM];
}
else
{
input_s[tid + BLOCK_DIM] = 0;
}
__syncthreads();
//reduction step
for (unsigned int stride = 1; stride <= BLOCK_DIM; stride *= 2)
{
int index = (threadIdx.x + 1) * 2 * stride - 1;
if (index < 2 * BLOCK_DIM)
input_s[index] += input_s[index - stride];
__syncthreads();
}
//save partial sum
if (threadIdx.x == 0)
{
partialSums[blockIdx.x] = input_s[2 * BLOCK_DIM - 1];
input_s[2 * BLOCK_DIM - 1] = 0.0f;
}
__syncthreads();
//post reduction step
for (unsigned int stride = BLOCK_DIM; stride > 0; stride /= 2)
{
int index = (threadIdx.x + 1) * 2 * stride - 1;
if (index < 2 * BLOCK_DIM)
{
//add then swap
unsigned int temp = input_s[index];
input_s[index] += input_s[index - stride];
input_s[index - stride] = temp;
}
__syncthreads();
}
if (i < N)
{
output[i] = input_s[tid];
}
if (i + BLOCK_DIM < N)
{
output[i + BLOCK_DIM] = input_s[tid + BLOCK_DIM];
}
}
__global__ void add_kernel(unsigned int* output, unsigned int* partialSums, unsigned int N) {
// TODO
unsigned int i = 2 * blockIdx.x*blockDim.x + threadIdx.x;
if (blockIdx.x != 0) {
if (i < N) {
output[i] += partialSums[blockIdx.x];
}
if (i + BLOCK_DIM < N) {
output[i + BLOCK_DIM] += partialSums[blockIdx.x];
}
}
}
//output_d rowptrs n = numrows +1
void scan_gpu_d(unsigned int* input_d, unsigned int* output_d, unsigned int N) {
// Configurations
const unsigned int numThreadsPerBlock = BLOCK_DIM;
const unsigned int numElementsPerBlock = 2 * numThreadsPerBlock;
const unsigned int numBlocks = (N + numElementsPerBlock - 1) / numElementsPerBlock;
// Allocate partial sums
unsigned int *partialSums_d;
cudaMalloc((void**)&partialSums_d, numBlocks * sizeof(unsigned int));
cudaDeviceSynchronize();
scan_kernel << < numBlocks, numThreadsPerBlock >> > (input_d, output_d, partialSums_d, N);
cudaDeviceSynchronize();
// Scan partial sums then add
if (numBlocks > 1) {
// Scan partial sums
scan_gpu_d(partialSums_d, partialSums_d, numBlocks);
// Add scanned sums
add_kernel << < numBlocks, numThreadsPerBlock >> > (output_d, partialSums_d, N);
}
// Free memory
cudaFree(partialSums_d);
cudaDeviceSynchronize();
}
__global__ void Binning_kernel(unsigned int* inrowIdxs, unsigned int* incolIdxs, float* invalues, unsigned int* rowPtrs, unsigned int* colIdxs, float* values, unsigned int nnz, unsigned int numRows, unsigned int* rowPtrsBin) {
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < nnz) {
unsigned int row = inrowIdxs[i];
unsigned int col = incolIdxs[i];
float val = invalues[i];
unsigned int init = rowPtrs[row];
unsigned int nnzIdx = atomicAdd(&rowPtrsBin[row], 1);
colIdxs[nnzIdx+init] = col;
values[nnzIdx+init]=val;
}
}
__global__ void sorting_kernel(unsigned int* colIdxs, float* values, unsigned int* rowPtrs, unsigned int numRows) {
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < numRows) {
unsigned int nnzA = rowPtrs[i + 1] - rowPtrs[i];
if (nnzA > 0) {
for (unsigned int j = 0; j < nnzA - 1;++j) {
for (unsigned int k = 0; k < nnzA - j - 1; ++k) {
unsigned int l_0 = k + rowPtrs[i];
unsigned int l_1 = l_0 + 1;
if (colIdxs[l_0] > colIdxs[l_1]) {
//swap col
unsigned int tmp = colIdxs[l_0];
colIdxs[l_0] = colIdxs[l_1];
colIdxs[l_1] = tmp;
//swap float
float valtmp = values[l_0];
values[l_0] = values[l_1];
values[l_1] = valtmp;
}
}
}
}
}
}
//converts from CSRMatrix to Vector and a vector of indices where the row is not all zeros
void findNonzeroRows(Vector* v, CSRMatrix* A) {
unsigned int nnz = 0;
for (unsigned int r = 0; r < A->numRows; ++r) {
unsigned int rowPtrA = A->rowPtrs[r];
unsigned int nnzA = A->rowPtrs[r + 1] - rowPtrA;
if (nnzA > 0) {
if (nnz >= v->capacity) {
expandVectorCapacity(v, 2 * v->capacity);
}
v->data[nnz] = r;
++nnz;
}
}
v->nnz = nnz;
}
COOMatrix* createEmptyCOO(unsigned int numRows, unsigned int numCols, unsigned int capacity) {
COOMatrix *coo = (COOMatrix *)malloc(sizeof(COOMatrix));
coo->rowIdxs = (unsigned int *)malloc(capacity * sizeof(unsigned int));
coo->colIdxs = (unsigned int *)malloc(capacity * sizeof(unsigned int));
coo->values = (float *)malloc(capacity * sizeof(float));
coo->numRows = numRows;
coo->numCols = numCols;
coo->nnz = 0;
coo->capacity = CAPACITY;
for (unsigned int i = 0; i < coo->capacity;++i) {
coo->rowIdxs[i] = 0;
coo->colIdxs[i] = 0;
coo->values[i] = 0.0f;
}
return coo;
}
void sparseNN(Vector* result, COOMatrix* featureVectors, COOMatrix** layerWeights, float bias, unsigned int numLayers) {
Timer timer;
// Convert featureVectors to CSR
startTime(&timer);
CSRMatrix* Y0 = createCSRfromCOO(featureVectors);
stopTimeAndPrint(&timer, "Convert feature vectors to CSR");
// Convert layer weights to CSC
startTime(&timer);
CSCMatrix* W[numLayers];
for (unsigned int layer = 0; layer < numLayers; ++layer) {
W[layer] = createCSCfromCOO(layerWeights[layer]);
}
stopTimeAndPrint(&timer, "Convert weights to CSC");
// Double buffers
startTime(&timer);
COOMatrix *tmp = createEmptyCOO(Y0->numRows, Y0->numCols, CAPACITY);
CSRMatrix *inBuffer = Y0;
COOMatrix *outBuffer = tmp;
stopTimeAndPrint(&timer, "Allocate temporary buffer");
inBuffer->capacity = CAPACITY;
// Allocate GPU memory
startTime(&timer);
outBuffer->capacity = CAPACITY;
//allocating inbuffer address and value
CSRMatrix inBuffer_d;
inBuffer_d.numRows = inBuffer->numRows;
inBuffer_d.numCols = inBuffer->numCols;
inBuffer_d.nnz = inBuffer->nnz;
inBuffer_d.capacity = inBuffer->capacity;
cudaMalloc((void**)&inBuffer_d.rowPtrs, (inBuffer->numRows + 1) * sizeof(unsigned int));
cudaMalloc((void**)&inBuffer_d.colIdxs, CAPACITY * sizeof(unsigned int));
cudaMalloc((void**)&inBuffer_d.values, CAPACITY * sizeof(float));
cudaMemcpy(inBuffer_d.rowPtrs, inBuffer->rowPtrs, (inBuffer->numRows + 1) * sizeof(unsigned int), cudaMemcpyHostToDevice);
cudaMemcpy(inBuffer_d.colIdxs, inBuffer->colIdxs, (inBuffer->nnz) * sizeof(unsigned int), cudaMemcpyHostToDevice);
cudaMemcpy(inBuffer_d.values, inBuffer->values, inBuffer->nnz * sizeof(float), cudaMemcpyHostToDevice);
printf("inbuffer allocated\n");
/////////////////////////
//outBuffer_d allocation
COOMatrix *outBuffer_d;
unsigned int* out_rowIdxs_d;
unsigned int* out_colIdxs_d;
float* out_values_d;
unsigned int* out_nnz_d;
unsigned int* out_nnz_h = (unsigned int*)malloc(sizeof(unsigned int*));
*out_nnz_h = outBuffer->nnz;
cudaMalloc((void**)&outBuffer_d, sizeof(COOMatrix));
cudaMalloc((void**)&out_rowIdxs_d, outBuffer->capacity * sizeof(unsigned int));
cudaMalloc((void**)&out_colIdxs_d, outBuffer->capacity * sizeof(unsigned int));
cudaMalloc((void**)&out_values_d, outBuffer->capacity * sizeof(float));
cudaMalloc((void**)&out_nnz_d, sizeof(unsigned int));
//copying outbuffer
cudaMemcpy(outBuffer_d, outBuffer, sizeof(COOMatrix), cudaMemcpyHostToDevice);
cudaMemcpy(out_rowIdxs_d, outBuffer->rowIdxs, outBuffer->capacity * sizeof(unsigned int), cudaMemcpyHostToDevice);
cudaMemcpy(out_colIdxs_d, outBuffer->colIdxs, outBuffer->capacity * sizeof(unsigned int), cudaMemcpyHostToDevice);
cudaMemcpy(out_values_d, outBuffer->values, outBuffer->capacity * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(out_nnz_d, out_nnz_h, sizeof(unsigned int), cudaMemcpyHostToDevice);
cudaMemcpy(&(outBuffer_d->rowIdxs), &out_rowIdxs_d, sizeof(unsigned int*), cudaMemcpyHostToDevice);
cudaMemcpy(&(outBuffer_d->colIdxs), &out_colIdxs_d, sizeof(unsigned int*), cudaMemcpyHostToDevice);
cudaMemcpy(&(outBuffer_d->values), &out_values_d, sizeof(float*), cudaMemcpyHostToDevice);
cudaMemcpy(&(outBuffer_d->numRows), &(outBuffer->numRows), sizeof(unsigned int), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
printf("outbuffer allocated\n");
//////////////////////////////////
// allocating W_d
CSCMatrix W_d[numLayers];
for (unsigned int layer = 0; layer < numLayers; ++layer) {
W_d[layer].numRows = W[layer]->numRows;
W_d[layer].numCols = W[layer]->numCols;
W_d[layer].nnz = W[layer]->nnz;
W_d[layer].capacity = W[layer]->capacity;
cudaMalloc((void**)&W_d[layer].colPtrs, (W[layer]->numCols + 1) * sizeof(unsigned int));
cudaMalloc((void**)&W_d[layer].rowIdxs, W[layer]->capacity * sizeof(unsigned int));
cudaMalloc((void**)&W_d[layer].values, W[layer]->capacity * sizeof(float));
}
for (unsigned int layer = 0; layer < numLayers; ++layer) {
cudaMemcpy(W_d[layer].colPtrs, W[layer]->colPtrs, (W[layer]->numCols + 1) * sizeof(unsigned int), cudaMemcpyHostToDevice);
cudaMemcpy(W_d[layer].rowIdxs, W[layer]->rowIdxs, W[layer]->capacity * sizeof(unsigned int), cudaMemcpyHostToDevice);
cudaMemcpy(W_d[layer].values, W[layer]->values, W[layer]->capacity * sizeof(float), cudaMemcpyHostToDevice);
}
cudaDeviceSynchronize();
stopTime(&timer);
printElapsedTime(timer, "Allocation and copy time on GPU Memory");
unsigned int *rowPtrstmp_d;
unsigned int *rowPtrstmp;
rowPtrstmp = (unsigned int *)malloc((inBuffer_d.numRows + 1) * sizeof(unsigned int));
cudaMalloc((void**)&rowPtrstmp_d, (inBuffer_d.numRows + 1) * sizeof(unsigned int));
for(unsigned int i=0; i<inBuffer_d.numRows+1;i++){
rowPtrstmp[i]=0;
}
cudaMemcpy(rowPtrstmp_d, rowPtrstmp, (inBuffer_d.numRows + 1) * sizeof(unsigned int), cudaMemcpyHostToDevice);
//kernel loop
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Loop over layers
for (unsigned int layer = 0; layer < numLayers; ++layer) {
// SpMSpM
printf("Computing layer %u (SpMSpM)", layer);
startTime(&timer);
dim3 numThreadsPerBlock3(threads, threads);
dim3 numBlocks3((W_d[layer].numCols + numThreadsPerBlock3.x - 1) / (numThreadsPerBlock3.x), (inBuffer_d.numRows + numThreadsPerBlock3.y*COARSE_FACTOR - 1) /( numThreadsPerBlock3.y*COARSE_FACTOR));
spmspm << <numBlocks3, numThreadsPerBlock3 >> > (outBuffer_d, inBuffer_d, W_d[layer], bias, out_nnz_d);
cudaDeviceSynchronize();
stopTimeAndPrint(&timer, "");
cudaMemcpy(out_nnz_h, out_nnz_d, sizeof(unsigned int), cudaMemcpyDeviceToHost);
printf("nnz %d\n", *out_nnz_h);
inBuffer_d.nnz = *out_nnz_h;
inBuffer_d.numCols = W_d[layer].numCols;
cudaDeviceSynchronize();
printf("kernel time for layer %u", layer);
stopTimeAndPrint(&timer, "");
startTime(&timer);
//calling histogram to fill rowPtrs of inBuffer
unsigned int numThreadsPerBlock = 1024;
unsigned int numBlocks = (*out_nnz_h + numThreadsPerBlock - 1) / numThreadsPerBlock;
//initializing rowstmp and rowstmp_d
if (layer != 0)
cudaMemcpy(rowPtrstmp_d, rowPtrstmp, (inBuffer_d.numRows + 1) * sizeof(unsigned int), cudaMemcpyHostToDevice);
histogram_private_kernel << < numBlocks, numThreadsPerBlock >> > (out_rowIdxs_d, rowPtrstmp_d, *out_nnz_h, inBuffer_d.numRows);
cudaDeviceSynchronize();
printf("Histogram time for layer %u", layer);
stopTimeAndPrint(&timer, "");
startTime(&timer);
//calling the scan kernel to scan kernel ptrs
const unsigned int numElementsPerBlock = 2 * numThreadsPerBlock;
numBlocks = ((inBuffer_d.numRows + 1) + numElementsPerBlock - 1) / numElementsPerBlock;
// Allocate partial sums
unsigned int *partialSums_d;
cudaMalloc((void**)&partialSums_d, numBlocks * sizeof(unsigned int));
cudaDeviceSynchronize();
// Call kernel
scan_kernel << < numBlocks, numThreadsPerBlock >> > (rowPtrstmp_d, inBuffer_d.rowPtrs, partialSums_d, inBuffer_d.numRows + 1);
cudaDeviceSynchronize();
// Scan partial sums then add
if (numBlocks > 1) {
// Scan partial sums
scan_gpu_d(partialSums_d, partialSums_d, numBlocks);
// Add scanned sums
add_kernel << < numBlocks, numThreadsPerBlock >> > (inBuffer_d.rowPtrs, partialSums_d, inBuffer_d.numRows + 1);
}
cudaDeviceSynchronize();
//used to check if scan and histogram same as nnz of kernel
cudaMemcpy(rowPtrstmp, inBuffer_d.rowPtrs, sizeof(unsigned int) * (inBuffer_d.numRows + 1), cudaMemcpyDeviceToHost);
printf("test %u\n", rowPtrstmp[inBuffer_d.numRows]);
// Free memory
cudaFree(partialSums_d);
printf("Scan time for layer %u", layer);
stopTimeAndPrint(&timer, "");
startTime(&timer);
//Binning
for (unsigned int i = 0; i < inBuffer_d.numRows + 1;i++) {
rowPtrstmp[i] = 0;
}
cudaMemcpy(rowPtrstmp_d, rowPtrstmp, (inBuffer_d.numRows + 1) * sizeof(unsigned int), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
numBlocks = (*out_nnz_h + numThreadsPerBlock - 1) / numThreadsPerBlock;
Binning_kernel << < numBlocks, numThreadsPerBlock >> > (out_rowIdxs_d, out_colIdxs_d, out_values_d, inBuffer_d.rowPtrs, inBuffer_d.colIdxs, inBuffer_d.values, *out_nnz_h, inBuffer_d.numRows, rowPtrstmp_d);
cudaDeviceSynchronize();
//Sorting
numBlocks = ((inBuffer_d.numRows + 1) + numThreadsPerBlock - 1) / numThreadsPerBlock;
sorting_kernel << < numBlocks, numThreadsPerBlock >> > (inBuffer_d.colIdxs, inBuffer_d.values, inBuffer_d.rowPtrs, inBuffer_d.numRows);
cudaDeviceSynchronize();
printf("Converting time for layer %u", layer);
stopTimeAndPrint(&timer, "");
//reinitializing nnz and rowstmp
*out_nnz_h = 0;
cudaMemcpy(out_nnz_d, out_nnz_h, sizeof(unsigned int), cudaMemcpyHostToDevice);
for (unsigned int i = 0; i < inBuffer_d.numRows + 1;i++) {
rowPtrstmp[i] = 0;
}
cudaDeviceSynchronize();
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Copy data from GPU
startTime(&timer);
//TODO
inBuffer->numRows = inBuffer_d.numRows;
inBuffer->numCols = inBuffer_d.numCols;
inBuffer->nnz = inBuffer_d.nnz;
cudaMemcpy(inBuffer->rowPtrs, inBuffer_d.rowPtrs, (inBuffer_d.numRows + 1) * sizeof(unsigned int), cudaMemcpyDeviceToHost);
cudaMemcpy(inBuffer->colIdxs, inBuffer_d.colIdxs, inBuffer_d.nnz * sizeof(unsigned int), cudaMemcpyDeviceToHost);
cudaMemcpy(inBuffer->values, inBuffer_d.values, inBuffer_d.nnz * sizeof(float), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
stopTime(&timer);
printElapsedTime(timer, "Copy from GPU time");
//CPU
// Find
//nonzero rows
startTime(&timer);
findNonzeroRows(result, inBuffer);
stopTimeAndPrint(&timer, "Find nonzero rows");
// Free GPU memory
startTime(&timer);
cudaFree(inBuffer_d.rowPtrs);
cudaFree(inBuffer_d.colIdxs);
cudaFree(inBuffer_d.values);
cudaFree(outBuffer_d);
// cudaFree(tmpOutBuffer.rowIdxs);
// cudaFree(tmpOutBuffer.colIdxs);
// cudaFree(tmpOutBuffer.values);
//cudaFree(inBuffer_d);
free(inBuffer);
free(outBuffer);
for (unsigned int layer = 0; layer < numLayers; ++layer) {
cudaFree(W_d[layer].colPtrs);
cudaFree(W_d[layer].rowIdxs);
cudaFree(W_d[layer].values);
}
cudaDeviceSynchronize();
stopTime(&timer);
printElapsedTime(timer, "Deallocation time");
// Free buffers
startTime(&timer);
//freeCSR(Y0);
for (unsigned int layer = 0; layer < numLayers; ++layer) {
freeCSC(W[layer]);
}
stopTimeAndPrint(&timer, "Deallocate memory");
}
|
34d30ba1c95389b12b0792828ab46965c6fa709a.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <cstdio>
#include <cstdlib>
#include <math.h>
#include <string.h>
//#include <windows.h>
// #include "__pikoDefines.h"
#include "reyesPipe.h"
#include "__pikoCompiledPipe.cuh"
#ifdef __PIKOC_HOST__
//#include <GL/glut.h>
#include <piko/builtinTypes.h>
#include "host_math.h"
#include "pikoTypes.h"
#include "FPSMeter.h"
#include "globalDef.h"
//#include "performance.h"
#include "tracedata.cuh"
// pikoc does not work well with assimp, so it will not be included when pikoc runs
#ifndef __PIKOC__
#include "sceneParser.h"
#endif // __PIKOC__
using namespace std;
#define PATCH_BUFFER_SIZE 6000
//__device__ int init_num_prims;
//__device__ piko_patch init_data_d[MAX_NUM_PRIMS];
//__device__ int init_data_index;
/*
int *tracePos;
__device__ int *tracePos_device;
TraceElement *traceItem;
__device__ TraceElement *traceItem_device;
__device__ int traceDoneCounter;
// to measure performance
int *resCount;
__device__ int localDoneCounter;
__device__ int *resCnt_device;
int *queueSize;
__device__ int *queueSize_device;
*/
// ----------------------------------------
// function prototypes
// ----------------------------------------
void init(int argc, char* argv[]);
void initScreen(int W, int H);
void initScene();
void initPipe();
void display();
void destroyApp();
void doPerfTest(int n_runs = 10);
void runPipe();
void pipelineTest();
void resetDepthBuffer();
void printDepthBuffer();
// camera helper functions here
void buildProjectionMatrix();
void buildLookAt();
void glhPerspectivef2(float *matrix, float fovyInDegrees, float aspectRatio,
float znear, float zfar);
void glhFrustumf2(float *matrix, float left, float right, float bottom, float top,
float znear, float zfar);
void loadPatchBuffer(int start, int end);
// ----------------------------------------
// global variables
// ----------------------------------------
// camera angles
float theta, phi, camdist;
#ifndef __PIKOC__
// main scene
scene sMain;
#endif // __PIKOC__
piko_patch* patchBuffer = NULL;
ReyesPipe piko_pipe;
// state
ConstantState pipelineConstantState;
MutableState pipelineMutableState;
int numPatches;
//__device__ Pixel pixels_d[SCREEN_WIDTH * SCREEN_HEIGHT];
//__device__ int numPixs;
int main(int argc, char* argv[])
{
//glutInit(&argc, argv);
initScreen(SCREEN_WIDTH, SCREEN_HEIGHT);
//initScreen(1024, 768);
initScene();
initPipe();
display();
//glutDisplayFunc(display);
//doPerfTest(5);
//atexit(destroyApp);
//glutMainLoop();
}
cvec4f matmultfloat4(float * mvpMat, cvec4f v)
{
cvec4f outRes;
(outRes).x = mvpMat[0] * v.x + mvpMat[4] * v.y + mvpMat[8 ] * v.z + mvpMat[12] * v.w;
(outRes).y = mvpMat[1] * v.x + mvpMat[5] * v.y + mvpMat[9 ] * v.z + mvpMat[13] * v.w;
(outRes).z = mvpMat[1] * v.x + mvpMat[6] * v.y + mvpMat[10] * v.z + mvpMat[14] * v.w;
(outRes).w = mvpMat[3] * v.x + mvpMat[7] * v.y + mvpMat[11] * v.z + mvpMat[15] * v.w;
return outRes;
}
void initScreen(int W, int H){
#ifndef __PIKOC__
sMain.cam().W() = W;
sMain.cam().H() = H;
#endif // __PIKOC__
pipelineConstantState.screenSizeX = W;
pipelineConstantState.screenSizeY = H;
//glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA | GLUT_DEPTH);
//glutInitWindowSize(W,H);
//glutCreateWindow("Reyes Pipeline");
//glClearColor(0.0f, 0.0f, 0.2f, 1.0f);
}
void display()
{
// update state
size_t printBufferSize;
CUDA_CHECKED_CALL(hipDeviceGetLimit(&printBufferSize, hipLimitPrintfFifoSize));
printf("Original Buffer Size: %u\n", printBufferSize);
printBufferSize *= 10;
CUDA_CHECKED_CALL(hipDeviceSetLimit(hipLimitPrintfFifoSize, printBufferSize));
buildProjectionMatrix();
resetDepthBuffer();
printf("running display\n");
piko_pipe.prepare();
printf("After prepare\n");
piko_pipe.run_single();
printf("After single\n");
unsigned* data = piko_pipe.pikoScreen.getData();
int x = pipelineConstantState.screenSizeX, y = pipelineConstantState.screenSizeY;
FILE *fp = fopen("result", "w");
for(int i=0; i<y; i++)
{
for(int j=0; j<x*4; j++)
{
fprintf(fp, "%d", ((char*)data)[i*x*4 + j]);
if(j != x*4 - 1)
{
fprintf(fp, "\t");
}
}
if(i != y - 1)
{
fprintf(fp, "\n");
}
}
fclose(fp);
return;
//glDrawPixels(pipelineConstantState.screenSizeX, pipelineConstantState.screenSizeY, GL_RGBA, GL_UNSIGNED_BYTE, data);
//glutSwapBuffers();
// for(int i=0; i< pipelineConstantState.screenSizeX * pipelineConstantState.screenSizeY; i++)
// {
// if(data[i] != 0)
// printf("%d: %x\n", i, data[i]);
// }
}
void doPerfTest(int n_runs)
{
printf("Running perf test...\n");
buildProjectionMatrix();
resetDepthBuffer();
piko_pipe.prepare();
piko_pipe.run_single();
Stopwatch mywatch;
mywatch.Reset();
for(int run = 0; run < n_runs; run++)
{
printf("Start %d ---------- ", run);
buildProjectionMatrix();
resetDepthBuffer();
piko_pipe.prepare();
printf("end\n");
}
float prepTime = mywatch.GetTime();
mywatch.Reset();
for(int run = 0; run < n_runs; run++)
{
printf("Start %d ---------- ", run);
fflush(NULL);
buildProjectionMatrix();
resetDepthBuffer();
piko_pipe.prepare();
piko_pipe.run_single();
printf("end\n");
}
float fullrunTime = mywatch.GetTime();
float total_time_to_ms = 1000.0f / (float) n_runs;
printf("Prep time = %0.2f ms\n", total_time_to_ms * (prepTime));
printf("Full run time = %0.2f ms\n", total_time_to_ms * (fullrunTime));
printf("Raster time = %0.2f ms\n", total_time_to_ms * (fullrunTime - prepTime));
}
void initScene(){
// the scene will only be compiled when going through gcc and not pikoc
// parse scene file
sceneParser scp;
int nT, nV, nP;
buildProjectionMatrix();
scp.parseFile("./", "bezteapot.scene", &sMain);
printf("Flattening scene assets: "); fflush(stdout);
sMain.flatten(nT,nV, nP);
printf("T: %d, V: %d P:%d\n", nT, nV, nP);
numPatches = nP;
// create the final matrix
// FIXME: perhaps this is flipped?
//HOST::matmult4x4(pipelineConstantState.projMatrix, pipelineConstantState.viewMatrix,pipelineConstantState.viewProjMatrix );
//HOST::matmult4x4(pipelineConstantState.viewMatrix, pipelineConstantState.projMatrix, pipelineConstantState.viewProjMatrix);
sMain.cam().display();
}
void initPipe()
{
// build the state from the scene
// camera& cam = sMain.cam();
// pipelineConstantState.camera_eye = cam.eye();
// pipelineConstantState.camera_target = cam.target();
// pipelineConstantState.camera_up = cam.up();
// if(sMain.lights().size() > 0) {
// pipelineConstantState.lightPos = sMain.lights()[0].pos();
// pipelineConstantState.lightColor = sMain.lights()[0].dif();
// }
// else {
// // some default light that might suck
// pipelineConstantState.lightPos = gencvec3f(1.0,1.0,1.0);
// pipelineConstantState.lightColor = gencvec3f(1.0,1.0,1.0);
// }
int numLoadPatches = numPatches;
loadPatchBuffer(0,numLoadPatches);
resetDepthBuffer();
piko_pipe.allocate(pipelineConstantState, pipelineMutableState, patchBuffer, numLoadPatches);
}
// void runPipe()
// {
// int count = 1;
// ReyesPipe p;
// p.run(pipelineConstantState,patchBuffer, 1);
// }
void getPerspectiveMat(float *mat, float fovy,
float aspect, float n, float f)
{
float l, r, b, t;
float pi = 4.0 * atan(1);
float jiao = fovy * pi / 360;
t = n * tanf(jiao);
b = -t;
l = b * aspect;
r = t * aspect;
memset(mat, 0, sizeof(float)*16);
mat[0] = (2.0f * n) / (r-l);
mat[2] = (r+l)/(r-l);
mat[5] = (2.0f * n) / (t-b);
mat[6] = (t+b)/(t-b);
mat[10] = -(f+n)/(f-n);
mat[11] = -(2.0f*f*n)/(f-n);
mat[14] = -1.0;
}
void getLookUpMat(float *res, float ex, float ey, float ez,
float tx, float ty, float tz, float ux, float uy, float uz)
{
float fx = tx - ex;
float fy = ty - ey;
float fz = tz - ez;
float flength = sqrt(fx*fx + fy*fy + fz*fz);
fx /= flength;
fy /= flength;
fz /= flength;
float sx, sy, sz;
sx = fy*uz - fz*uy;
sy = fz*ux - fx*uz;
sz = fx*uy - fy*ux;
float slength = sqrt(sx*sx + sy*sy + sz*sz);
sx /= slength;
sy /= slength;
sz /= slength;
ux = sy*fz - sz*fy;
uy = sz*fx - sx*fz;
uz = sx*fy - sy*fx;
float mat[16];
memset(mat, 0, sizeof(float)*16);
mat[0] = 1;
mat[5] = 1;
mat[10] = 1;
mat[15] = 1;
mat[0] = sx;
mat[1] = sy;
mat[2] = sz;
mat[4] = ux;
mat[5] = uy;
mat[6] = uz;
mat[8] = -fx;
mat[9] = -fy;
mat[10] = -fz;
float trans[16];
memset(trans, 0, sizeof(float)*16);
trans[0] = 1;
trans[5] = 1;
trans[10] = 1;
trans[15] = 1;
trans[3] = -ex;
trans[7] = -ey;
trans[11] = -ez;
memset(res, 0, sizeof(float)*16);
for(int i=0; i<4; i++)
{
for(int j=0; j<4; j++)
{
for(int k=0; k<4; k++)
{
res[i*4 + j] += mat[i*4 + k] * trans[k*4 + j];
}
}
}
}
void buildProjectionMatrix()
{
camera& cam = sMain.cam();
float viewmat[16], mat[16], resProj[16], resView[16];
//glMatrixMode(GL_PROJECTION);
//glLoadIdentity();
memset(viewmat, 0, sizeof(float)*16);
viewmat[0] = 1;
viewmat[5] = 1;
viewmat[10] = 1;
viewmat[15] = 1;
getPerspectiveMat(mat, cam.fovyDeg(), cam.aspect(), cam.zNear(), cam.zFar());
//gluPerspective(cam.fovyDeg(), cam.aspect(), cam.zNear(), cam.zFar());
//glGetFloatv(GL_PROJECTION_MATRIX, viewmat);
memset(resProj, 0, sizeof(resProj));
for(int i=0; i<4; i++)
{
for(int j=0; j<4; j++)
{
for(int k=0; k<4; k++)
{
// resviewmatmat
resProj[j*4+i] += viewmat[k*4+i]*mat[k*4+j];
}
}
}
//glLoadMatrixf(resProj);
//glMatrixMode(GL_MODELVIEW);
//glLoadIdentity();
memset(viewmat, 0, sizeof(float)*16);
viewmat[0] = 1;
viewmat[5] = 1;
viewmat[10] = 1;
viewmat[15] = 1;
float lu[16];
getLookUpMat(lu, cam.eye().x, cam.eye().y, cam.eye().z,
cam.target().x, cam.target().y, cam.target().z,
cam.up().x, cam.up().y, cam.up().z);
memset(resView, 0, sizeof(resView));
for(int i=0; i<4; i++)
{
for(int j=0; j<4; j++)
{
for(int k=0; k<4; k++)
{
// resviewmatlu
resView[j*4+i] += viewmat[k*4+i]*lu[k*4+j];
}
}
}
//glLoadMatrixf(resView);
/*
gluLookAt(
cam.eye().x, cam.eye().y, cam.eye().z,
cam.target().x, cam.target().y, cam.target().z,
cam.up().x, cam.up().y, cam.up().z);
*/
/*
glGetFloatv(GL_MODELVIEW_MATRIX, viewmat);
for(int i=0; i<4; i++)
{
for(int j=0; j<4; j++)
{
printf("%.2f\t", viewmat[i*4+j]);
}
printf("\n");
}
printf("\n");
printf("\n");
for(int i=0; i<4; i++)
{
for(int j=0; j<4; j++)
{
printf("%.2f\t", lu[i*4+j]);
}
printf("\n");
}
printf("\n");
printf("\n");
*/
//glMatrixMode(GL_PROJECTION);
//glPushMatrix();
// glGetFloatv(GL_MODELVIEW_MATRIX, pipelineConstantState.viewMatrix);
memcpy(pipelineConstantState.viewMatrix, resView, sizeof(float)*16);
//glMultMatrixf(pipelineConstantState.viewMatrix);
float newProj[16];
memset(newProj, 0, sizeof(newProj));
for(int i=0; i<4; i++)
{
for(int j=0; j<4; j++)
{
for(int k=0; k<4; k++)
{
newProj[j*4 + i] += resProj[k*4+i] * resView[j*4 + k];
}
}
}
memcpy(pipelineConstantState.viewProjMatrix, newProj, sizeof(float)*16);
//glGetFloatv(GL_PROJECTION_MATRIX, pipelineConstantState.viewProjMatrix);
//glPopMatrix();
// printf("final projection matrix:\n");
// for(int i=0; i<16; i++) {
// if (i%4 ==0) printf("\n");
// printf("%f ", pipelineConstantState.viewProjMatrix[i]);
// } printf("\n");
// printf("final modelview matrix:\n");
// for(int i=0; i<16; i++) {
// if (i%4 ==0) printf("\n");
// printf("%f ", pipelineConstantState.viewMatrix[i]);
// } printf("\n");
}
void loadPatchBuffer(int start, int end) {
// lazy create
if(patchBuffer == NULL) {
patchBuffer = new piko_patch[PATCH_BUFFER_SIZE];
}
int size = end - start;
if (size <=0) return;
int counter = 0;
printf("\nfetching patches from %d to %d\n", start, end);
for(int i=start; i<end; i++) {
for(int j=0; j<16; j++)
{
patchBuffer[counter].CP[j] = sMain._flatPatches[i*16+j];
//printf("flat patch: ");
//disp4(patchBuffer[counter].CP[j]);
//disp4(sMain._flatPatches[i*16+j]);
//printf("\n");
}
patchBuffer[counter].numSplits = 0; // all patches begin with zero splits
//patchBuffer[counter].id = counter;
//patchBuffer[counter].bbmin.x = 99999.0f;
//patchBuffer[counter].bbmin.y = 99999.0f;
//patchBuffer[counter].bbmax.x = -99999.0f;
//patchBuffer[counter].bbmax.y = -99999.0f;
counter++;
}
}
void pipelineTest()
{
// test out parts of the pipeline here
cvec4f point = patchBuffer[0].CP[0];
printf("\n\n point:\n");
disp4(point);
cvec4f clipPoint = matmultfloat4(pipelineConstantState.viewProjMatrix, point);
if(clipPoint.w == 0.0f) clipPoint.w = 1.0f;
clipPoint.x /= clipPoint.w;
clipPoint.y /= clipPoint.w;
clipPoint.z /= clipPoint.w;
clipPoint.x = (clipPoint.x+1.0) * 0.5 * pipelineConstantState.screenSizeX;
clipPoint.y = (clipPoint.y+1.0) * 0.5 * pipelineConstantState.screenSizeY;
printf("\n");
disp4(clipPoint);
printf("\n");
}
void resetDepthBuffer() {
int nPixels = pipelineConstantState.screenSizeX * pipelineConstantState.screenSizeY;
for(int i = 0; i < nPixels; i++) {
pipelineMutableState.zBuffer[i] = 1.0f;
}
}
void printDepthBuffer() {
int nPixels = pipelineConstantState.screenSizeX * pipelineConstantState.screenSizeY;
for(int i = 0; i < nPixels; i++) {
printf("%f\n", pipelineMutableState.zBuffer[i]);
}
}
void destroyApp()
{
piko_pipe.destroy();
}
#endif // __PIKOC_HOST__
| 34d30ba1c95389b12b0792828ab46965c6fa709a.cu |
#include <iostream>
#include <cstdio>
#include <cstdlib>
#include <math.h>
#include <string.h>
//#include <windows.h>
// #include "__pikoDefines.h"
#include "reyesPipe.h"
#include "__pikoCompiledPipe.cuh"
#ifdef __PIKOC_HOST__
//#include <GL/glut.h>
#include <piko/builtinTypes.h>
#include "host_math.h"
#include "pikoTypes.h"
#include "FPSMeter.h"
#include "globalDef.h"
//#include "performance.h"
#include "tracedata.cuh"
// pikoc does not work well with assimp, so it will not be included when pikoc runs
#ifndef __PIKOC__
#include "sceneParser.h"
#endif // __PIKOC__
using namespace std;
#define PATCH_BUFFER_SIZE 6000
//__device__ int init_num_prims;
//__device__ piko_patch init_data_d[MAX_NUM_PRIMS];
//__device__ int init_data_index;
/*
int *tracePos;
__device__ int *tracePos_device;
TraceElement *traceItem;
__device__ TraceElement *traceItem_device;
__device__ int traceDoneCounter;
// to measure performance
int *resCount;
__device__ int localDoneCounter;
__device__ int *resCnt_device;
int *queueSize;
__device__ int *queueSize_device;
*/
// ----------------------------------------
// function prototypes
// ----------------------------------------
void init(int argc, char* argv[]);
void initScreen(int W, int H);
void initScene();
void initPipe();
void display();
void destroyApp();
void doPerfTest(int n_runs = 10);
void runPipe();
void pipelineTest();
void resetDepthBuffer();
void printDepthBuffer();
// camera helper functions here
void buildProjectionMatrix();
void buildLookAt();
void glhPerspectivef2(float *matrix, float fovyInDegrees, float aspectRatio,
float znear, float zfar);
void glhFrustumf2(float *matrix, float left, float right, float bottom, float top,
float znear, float zfar);
void loadPatchBuffer(int start, int end);
// ----------------------------------------
// global variables
// ----------------------------------------
// camera angles
float theta, phi, camdist;
#ifndef __PIKOC__
// main scene
scene sMain;
#endif // __PIKOC__
piko_patch* patchBuffer = NULL;
ReyesPipe piko_pipe;
// state
ConstantState pipelineConstantState;
MutableState pipelineMutableState;
int numPatches;
//__device__ Pixel pixels_d[SCREEN_WIDTH * SCREEN_HEIGHT];
//__device__ int numPixs;
int main(int argc, char* argv[])
{
//glutInit(&argc, argv);
initScreen(SCREEN_WIDTH, SCREEN_HEIGHT);
//initScreen(1024, 768);
initScene();
initPipe();
display();
//glutDisplayFunc(display);
//doPerfTest(5);
//atexit(destroyApp);
//glutMainLoop();
}
cvec4f matmultfloat4(float * mvpMat, cvec4f v)
{
cvec4f outRes;
(outRes).x = mvpMat[0] * v.x + mvpMat[4] * v.y + mvpMat[8 ] * v.z + mvpMat[12] * v.w;
(outRes).y = mvpMat[1] * v.x + mvpMat[5] * v.y + mvpMat[9 ] * v.z + mvpMat[13] * v.w;
(outRes).z = mvpMat[1] * v.x + mvpMat[6] * v.y + mvpMat[10] * v.z + mvpMat[14] * v.w;
(outRes).w = mvpMat[3] * v.x + mvpMat[7] * v.y + mvpMat[11] * v.z + mvpMat[15] * v.w;
return outRes;
}
void initScreen(int W, int H){
#ifndef __PIKOC__
sMain.cam().W() = W;
sMain.cam().H() = H;
#endif // __PIKOC__
pipelineConstantState.screenSizeX = W;
pipelineConstantState.screenSizeY = H;
//glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA | GLUT_DEPTH);
//glutInitWindowSize(W,H);
//glutCreateWindow("Reyes Pipeline");
//glClearColor(0.0f, 0.0f, 0.2f, 1.0f);
}
void display()
{
// update state
size_t printBufferSize;
CUDA_CHECKED_CALL(cudaDeviceGetLimit(&printBufferSize, cudaLimitPrintfFifoSize));
printf("Original Buffer Size: %u\n", printBufferSize);
printBufferSize *= 10;
CUDA_CHECKED_CALL(cudaDeviceSetLimit(cudaLimitPrintfFifoSize, printBufferSize));
buildProjectionMatrix();
resetDepthBuffer();
printf("running display\n");
piko_pipe.prepare();
printf("After prepare\n");
piko_pipe.run_single();
printf("After single\n");
unsigned* data = piko_pipe.pikoScreen.getData();
int x = pipelineConstantState.screenSizeX, y = pipelineConstantState.screenSizeY;
FILE *fp = fopen("result", "w");
for(int i=0; i<y; i++)
{
for(int j=0; j<x*4; j++)
{
fprintf(fp, "%d", ((char*)data)[i*x*4 + j]);
if(j != x*4 - 1)
{
fprintf(fp, "\t");
}
}
if(i != y - 1)
{
fprintf(fp, "\n");
}
}
fclose(fp);
return;
//glDrawPixels(pipelineConstantState.screenSizeX, pipelineConstantState.screenSizeY, GL_RGBA, GL_UNSIGNED_BYTE, data);
//glutSwapBuffers();
// for(int i=0; i< pipelineConstantState.screenSizeX * pipelineConstantState.screenSizeY; i++)
// {
// if(data[i] != 0)
// printf("%d: %x\n", i, data[i]);
// }
}
void doPerfTest(int n_runs)
{
printf("Running perf test...\n");
buildProjectionMatrix();
resetDepthBuffer();
piko_pipe.prepare();
piko_pipe.run_single();
Stopwatch mywatch;
mywatch.Reset();
for(int run = 0; run < n_runs; run++)
{
printf("Start %d ---------- ", run);
buildProjectionMatrix();
resetDepthBuffer();
piko_pipe.prepare();
printf("end\n");
}
float prepTime = mywatch.GetTime();
mywatch.Reset();
for(int run = 0; run < n_runs; run++)
{
printf("Start %d ---------- ", run);
fflush(NULL);
buildProjectionMatrix();
resetDepthBuffer();
piko_pipe.prepare();
piko_pipe.run_single();
printf("end\n");
}
float fullrunTime = mywatch.GetTime();
float total_time_to_ms = 1000.0f / (float) n_runs;
printf("Prep time = %0.2f ms\n", total_time_to_ms * (prepTime));
printf("Full run time = %0.2f ms\n", total_time_to_ms * (fullrunTime));
printf("Raster time = %0.2f ms\n", total_time_to_ms * (fullrunTime - prepTime));
}
void initScene(){
// the scene will only be compiled when going through gcc and not pikoc
// parse scene file
sceneParser scp;
int nT, nV, nP;
buildProjectionMatrix();
scp.parseFile("./", "bezteapot.scene", &sMain);
printf("Flattening scene assets: "); fflush(stdout);
sMain.flatten(nT,nV, nP);
printf("T: %d, V: %d P:%d\n", nT, nV, nP);
numPatches = nP;
// create the final matrix
// FIXME: perhaps this is flipped?
//HOST::matmult4x4(pipelineConstantState.projMatrix, pipelineConstantState.viewMatrix,pipelineConstantState.viewProjMatrix );
//HOST::matmult4x4(pipelineConstantState.viewMatrix, pipelineConstantState.projMatrix, pipelineConstantState.viewProjMatrix);
sMain.cam().display();
}
void initPipe()
{
// build the state from the scene
// camera& cam = sMain.cam();
// pipelineConstantState.camera_eye = cam.eye();
// pipelineConstantState.camera_target = cam.target();
// pipelineConstantState.camera_up = cam.up();
// if(sMain.lights().size() > 0) {
// pipelineConstantState.lightPos = sMain.lights()[0].pos();
// pipelineConstantState.lightColor = sMain.lights()[0].dif();
// }
// else {
// // some default light that might suck
// pipelineConstantState.lightPos = gencvec3f(1.0,1.0,1.0);
// pipelineConstantState.lightColor = gencvec3f(1.0,1.0,1.0);
// }
int numLoadPatches = numPatches;
loadPatchBuffer(0,numLoadPatches);
resetDepthBuffer();
piko_pipe.allocate(pipelineConstantState, pipelineMutableState, patchBuffer, numLoadPatches);
}
// void runPipe()
// {
// int count = 1;
// ReyesPipe p;
// p.run(pipelineConstantState,patchBuffer, 1);
// }
void getPerspectiveMat(float *mat, float fovy,
float aspect, float n, float f)
{
float l, r, b, t;
float pi = 4.0 * atan(1);
float jiao = fovy * pi / 360;
t = n * tanf(jiao);
b = -t;
l = b * aspect;
r = t * aspect;
memset(mat, 0, sizeof(float)*16);
mat[0] = (2.0f * n) / (r-l);
mat[2] = (r+l)/(r-l);
mat[5] = (2.0f * n) / (t-b);
mat[6] = (t+b)/(t-b);
mat[10] = -(f+n)/(f-n);
mat[11] = -(2.0f*f*n)/(f-n);
mat[14] = -1.0;
}
void getLookUpMat(float *res, float ex, float ey, float ez,
float tx, float ty, float tz, float ux, float uy, float uz)
{
float fx = tx - ex;
float fy = ty - ey;
float fz = tz - ez;
float flength = sqrt(fx*fx + fy*fy + fz*fz);
fx /= flength;
fy /= flength;
fz /= flength;
float sx, sy, sz;
sx = fy*uz - fz*uy;
sy = fz*ux - fx*uz;
sz = fx*uy - fy*ux;
float slength = sqrt(sx*sx + sy*sy + sz*sz);
sx /= slength;
sy /= slength;
sz /= slength;
ux = sy*fz - sz*fy;
uy = sz*fx - sx*fz;
uz = sx*fy - sy*fx;
float mat[16];
memset(mat, 0, sizeof(float)*16);
mat[0] = 1;
mat[5] = 1;
mat[10] = 1;
mat[15] = 1;
mat[0] = sx;
mat[1] = sy;
mat[2] = sz;
mat[4] = ux;
mat[5] = uy;
mat[6] = uz;
mat[8] = -fx;
mat[9] = -fy;
mat[10] = -fz;
float trans[16];
memset(trans, 0, sizeof(float)*16);
trans[0] = 1;
trans[5] = 1;
trans[10] = 1;
trans[15] = 1;
trans[3] = -ex;
trans[7] = -ey;
trans[11] = -ez;
memset(res, 0, sizeof(float)*16);
for(int i=0; i<4; i++)
{
for(int j=0; j<4; j++)
{
for(int k=0; k<4; k++)
{
res[i*4 + j] += mat[i*4 + k] * trans[k*4 + j];
}
}
}
}
void buildProjectionMatrix()
{
camera& cam = sMain.cam();
float viewmat[16], mat[16], resProj[16], resView[16];
//glMatrixMode(GL_PROJECTION);
//glLoadIdentity();
memset(viewmat, 0, sizeof(float)*16);
viewmat[0] = 1;
viewmat[5] = 1;
viewmat[10] = 1;
viewmat[15] = 1;
getPerspectiveMat(mat, cam.fovyDeg(), cam.aspect(), cam.zNear(), cam.zFar());
//gluPerspective(cam.fovyDeg(), cam.aspect(), cam.zNear(), cam.zFar());
//glGetFloatv(GL_PROJECTION_MATRIX, viewmat);
memset(resProj, 0, sizeof(resProj));
for(int i=0; i<4; i++)
{
for(int j=0; j<4; j++)
{
for(int k=0; k<4; k++)
{
// res和viewmat是列优先,mat是行优先
resProj[j*4+i] += viewmat[k*4+i]*mat[k*4+j];
}
}
}
//glLoadMatrixf(resProj);
//glMatrixMode(GL_MODELVIEW);
//glLoadIdentity();
memset(viewmat, 0, sizeof(float)*16);
viewmat[0] = 1;
viewmat[5] = 1;
viewmat[10] = 1;
viewmat[15] = 1;
float lu[16];
getLookUpMat(lu, cam.eye().x, cam.eye().y, cam.eye().z,
cam.target().x, cam.target().y, cam.target().z,
cam.up().x, cam.up().y, cam.up().z);
memset(resView, 0, sizeof(resView));
for(int i=0; i<4; i++)
{
for(int j=0; j<4; j++)
{
for(int k=0; k<4; k++)
{
// res和viewmat是列优先,lu是行优先
resView[j*4+i] += viewmat[k*4+i]*lu[k*4+j];
}
}
}
//glLoadMatrixf(resView);
/*
gluLookAt(
cam.eye().x, cam.eye().y, cam.eye().z,
cam.target().x, cam.target().y, cam.target().z,
cam.up().x, cam.up().y, cam.up().z);
*/
/*
glGetFloatv(GL_MODELVIEW_MATRIX, viewmat);
for(int i=0; i<4; i++)
{
for(int j=0; j<4; j++)
{
printf("%.2f\t", viewmat[i*4+j]);
}
printf("\n");
}
printf("\n");
printf("\n");
for(int i=0; i<4; i++)
{
for(int j=0; j<4; j++)
{
printf("%.2f\t", lu[i*4+j]);
}
printf("\n");
}
printf("\n");
printf("\n");
*/
//glMatrixMode(GL_PROJECTION);
//glPushMatrix();
// glGetFloatv(GL_MODELVIEW_MATRIX, pipelineConstantState.viewMatrix);
memcpy(pipelineConstantState.viewMatrix, resView, sizeof(float)*16);
//glMultMatrixf(pipelineConstantState.viewMatrix);
float newProj[16];
memset(newProj, 0, sizeof(newProj));
for(int i=0; i<4; i++)
{
for(int j=0; j<4; j++)
{
for(int k=0; k<4; k++)
{
newProj[j*4 + i] += resProj[k*4+i] * resView[j*4 + k];
}
}
}
memcpy(pipelineConstantState.viewProjMatrix, newProj, sizeof(float)*16);
//glGetFloatv(GL_PROJECTION_MATRIX, pipelineConstantState.viewProjMatrix);
//glPopMatrix();
// printf("final projection matrix:\n");
// for(int i=0; i<16; i++) {
// if (i%4 ==0) printf("\n");
// printf("%f ", pipelineConstantState.viewProjMatrix[i]);
// } printf("\n");
// printf("final modelview matrix:\n");
// for(int i=0; i<16; i++) {
// if (i%4 ==0) printf("\n");
// printf("%f ", pipelineConstantState.viewMatrix[i]);
// } printf("\n");
}
void loadPatchBuffer(int start, int end) {
// lazy create
if(patchBuffer == NULL) {
patchBuffer = new piko_patch[PATCH_BUFFER_SIZE];
}
int size = end - start;
if (size <=0) return;
int counter = 0;
printf("\nfetching patches from %d to %d\n", start, end);
for(int i=start; i<end; i++) {
for(int j=0; j<16; j++)
{
patchBuffer[counter].CP[j] = sMain._flatPatches[i*16+j];
//printf("flat patch: ");
//disp4(patchBuffer[counter].CP[j]);
//disp4(sMain._flatPatches[i*16+j]);
//printf("\n");
}
patchBuffer[counter].numSplits = 0; // all patches begin with zero splits
//patchBuffer[counter].id = counter;
//patchBuffer[counter].bbmin.x = 99999.0f;
//patchBuffer[counter].bbmin.y = 99999.0f;
//patchBuffer[counter].bbmax.x = -99999.0f;
//patchBuffer[counter].bbmax.y = -99999.0f;
counter++;
}
}
void pipelineTest()
{
// test out parts of the pipeline here
cvec4f point = patchBuffer[0].CP[0];
printf("\n\n point:\n");
disp4(point);
cvec4f clipPoint = matmultfloat4(pipelineConstantState.viewProjMatrix, point);
if(clipPoint.w == 0.0f) clipPoint.w = 1.0f;
clipPoint.x /= clipPoint.w;
clipPoint.y /= clipPoint.w;
clipPoint.z /= clipPoint.w;
clipPoint.x = (clipPoint.x+1.0) * 0.5 * pipelineConstantState.screenSizeX;
clipPoint.y = (clipPoint.y+1.0) * 0.5 * pipelineConstantState.screenSizeY;
printf("\n");
disp4(clipPoint);
printf("\n");
}
void resetDepthBuffer() {
int nPixels = pipelineConstantState.screenSizeX * pipelineConstantState.screenSizeY;
for(int i = 0; i < nPixels; i++) {
pipelineMutableState.zBuffer[i] = 1.0f;
}
}
void printDepthBuffer() {
int nPixels = pipelineConstantState.screenSizeX * pipelineConstantState.screenSizeY;
for(int i = 0; i < nPixels; i++) {
printf("%f\n", pipelineMutableState.zBuffer[i]);
}
}
void destroyApp()
{
piko_pipe.destroy();
}
#endif // __PIKOC_HOST__
|
3b91896b8243cda99f21e861775c2d9cb4431452.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "CUDAKernels.hpp"
#include "RungeKuttaSolver.hpp"
#include "Kronauer.hpp"
#include "parameters.hpp"
#define norm(u,v) ((u.x-v.x)*(u.x-v.x) + (u.y-v.y)*(u.y-v.y))
__global__ void FindEntrainmentTimesKernel( const int2 dim,
const double* pXMeshPts,
const double* pYMeshPts,
const double t_final,
const double2* pRefOrbit,
double* pResult)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index<dim.x*dim.y)
{
Kronauer* p_problem = new Kronauer();
RungeKuttaSolver* p_solver = new RungeKuttaSolver( timestep, p_problem);
// Initialise system
int orbit_ind;
double time = 0.0;
double2 u;
double2 ref_pt = pRefOrbit[0];
u.x = pXMeshPts[index % dim.x];
u.y = pYMeshPts[index / dim.x];
do
{
time += timestep;
p_solver->RungeKuttaStep( time, u);
p_solver->RungeKuttaStep( time, ref_pt);
//orbit_ind = (int) ( (time+24.0-tShift)/timestep) % (int) (24.0/timestep);
//ref_pt = pRefOrbit[orbit_ind];
} while ( (time<t_final) & (norm( u, ref_pt) > entrain_thresh*entrain_thresh));
pResult[index] = time;
delete( p_solver);
delete( p_problem);
}
}
__global__ void FindEntrainmentTimesPhaseKernel( const int2 dim,
const double* pXMeshPts,
const double* pYMeshPts,
const double t_final,
double* pResult)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index<dim.x*dim.y)
{
Kronauer* p_problem = new Kronauer();
RungeKuttaSolver* p_solver = new RungeKuttaSolver( timestep, p_problem);
// Initialise system
double time = 0.0;
double2 u;
double2 u_old;
u.x = pXMeshPts[index % dim.x];
u.y = pYMeshPts[index / dim.x];
do
{
time += timestep;
u_old = u;
p_solver->RungeKuttaStep( time, u);
} while ( (time<t_final) & (( abs( fmod( time-tShift-24.0, 24.0) - entrained_phase) > 0.01) || (u.x > 0.0) || (u_old.x < 0.0)));
pResult[index] = time;
delete( p_solver);
delete( p_problem);
}
}
__global__ void FindInsideFlagKernel( const int2 dim,
const double* pXMeshPts,
const double* pYMeshPts,
const double t_final,
const double* pRefOrbitAmp,
double* pResult)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index<dim.x*dim.y)
{
Kronauer* p_problem = new Kronauer();
RungeKuttaSolver* p_solver = new RungeKuttaSolver( timestep, p_problem);
// Initialise system
int orbit_ind;
double time = 0.0;
double2 u;
bool inside_flag = false;
u.x = pXMeshPts[index % dim.x];
u.y = pYMeshPts[index / dim.x];
do
{
time += timestep;
p_solver->RungeKuttaStep( time, u);
orbit_ind = (int) ( ( atan2( u.y, u.x)+pi)/theta_step);
inside_flag = ( sqrtf( u.x*u.x+u.y*u.y) < pRefOrbitAmp[orbit_ind]);
} while ( (time<t_final) && (!inside_flag));
pResult[index] = (double) inside_flag;
delete( p_solver);
delete( p_problem);
}
}
| 3b91896b8243cda99f21e861775c2d9cb4431452.cu | #include "CUDAKernels.hpp"
#include "RungeKuttaSolver.hpp"
#include "Kronauer.hpp"
#include "parameters.hpp"
#define norm(u,v) ((u.x-v.x)*(u.x-v.x) + (u.y-v.y)*(u.y-v.y))
__global__ void FindEntrainmentTimesKernel( const int2 dim,
const double* pXMeshPts,
const double* pYMeshPts,
const double t_final,
const double2* pRefOrbit,
double* pResult)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index<dim.x*dim.y)
{
Kronauer* p_problem = new Kronauer();
RungeKuttaSolver* p_solver = new RungeKuttaSolver( timestep, p_problem);
// Initialise system
int orbit_ind;
double time = 0.0;
double2 u;
double2 ref_pt = pRefOrbit[0];
u.x = pXMeshPts[index % dim.x];
u.y = pYMeshPts[index / dim.x];
do
{
time += timestep;
p_solver->RungeKuttaStep( time, u);
p_solver->RungeKuttaStep( time, ref_pt);
//orbit_ind = (int) ( (time+24.0-tShift)/timestep) % (int) (24.0/timestep);
//ref_pt = pRefOrbit[orbit_ind];
} while ( (time<t_final) & (norm( u, ref_pt) > entrain_thresh*entrain_thresh));
pResult[index] = time;
delete( p_solver);
delete( p_problem);
}
}
__global__ void FindEntrainmentTimesPhaseKernel( const int2 dim,
const double* pXMeshPts,
const double* pYMeshPts,
const double t_final,
double* pResult)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index<dim.x*dim.y)
{
Kronauer* p_problem = new Kronauer();
RungeKuttaSolver* p_solver = new RungeKuttaSolver( timestep, p_problem);
// Initialise system
double time = 0.0;
double2 u;
double2 u_old;
u.x = pXMeshPts[index % dim.x];
u.y = pYMeshPts[index / dim.x];
do
{
time += timestep;
u_old = u;
p_solver->RungeKuttaStep( time, u);
} while ( (time<t_final) & (( abs( fmod( time-tShift-24.0, 24.0) - entrained_phase) > 0.01) || (u.x > 0.0) || (u_old.x < 0.0)));
pResult[index] = time;
delete( p_solver);
delete( p_problem);
}
}
__global__ void FindInsideFlagKernel( const int2 dim,
const double* pXMeshPts,
const double* pYMeshPts,
const double t_final,
const double* pRefOrbitAmp,
double* pResult)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index<dim.x*dim.y)
{
Kronauer* p_problem = new Kronauer();
RungeKuttaSolver* p_solver = new RungeKuttaSolver( timestep, p_problem);
// Initialise system
int orbit_ind;
double time = 0.0;
double2 u;
bool inside_flag = false;
u.x = pXMeshPts[index % dim.x];
u.y = pYMeshPts[index / dim.x];
do
{
time += timestep;
p_solver->RungeKuttaStep( time, u);
orbit_ind = (int) ( ( atan2( u.y, u.x)+pi)/theta_step);
inside_flag = ( sqrtf( u.x*u.x+u.y*u.y) < pRefOrbitAmp[orbit_ind]);
} while ( (time<t_final) && (!inside_flag));
pResult[index] = (double) inside_flag;
delete( p_solver);
delete( p_problem);
}
}
|
2af7e2eb87387ea259177dd0b9d6880703eddcc0.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <iostream>
#include <math.h>
#include <ctime>
#include <cmath>
#include "enum_header.h"
#include <unistd.h>
#include <stdio.h>
/* we need these includes for CUDA's random number stuff */
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
// REMEMBER TO PUT __host__ __device__ IN FRONT OF CLASS METHODS
#define PI 3.14159265358979323846
double* three_dim_index(double* matrix, int i, int j, int k, double m, int b, int num_assets);
__device__ double* two_dim_indexGPU(double* vector, int i, int j, double m, int b){
//int m_int= (int)m;
double* p;
//specify index layout here
p=&vector[b*(i)+(j)];
return p;
}
__device__ double* three_dim_indexGPU(double* matrix, int i, int j, int k, double m, int b, int num_assets){
//int m_int = (int)m;
double* p;
//specify index layout here
//p=&matrix[(m_int)*b*(k)+(m_int)*(j)+(i)];
p=&matrix[i*b*num_assets+j*num_assets+k];
return p;
}
__device__ double densityGPU(double Xold, double Xnew, double sigma, double r, double delta, double delta_t){
double f=0, x=0;
//x=(1/(sigma*sqrt(delta_t)))*(log(Xnew)-log(Xold)-(r-delta-0.5*sigma*sigma)*delta_t);
x=(1/(sigma*sqrt(delta_t)))*(Xnew-Xold-(r-delta-0.5*sigma*sigma)*delta_t);
//f= (1/(sigma*sqrt(delta_t)*Xnew))*(1/(sqrt(2*PI)))*exp(-0.5*x*x); // this is the transition density
f= (1/(sigma*sqrt(delta_t)))*(1/(sqrt(2*PI)))*exp(-0.5*x*x);
return f;
}
/*
__global__ void init(unsigned int seed, hiprandState_t* states) {
int idx=blockDim.x*blockIdx.x + threadIdx.x;
// we have to initialize the state
hiprand_init(seed, // the seed can be the same for each core, here we pass the time in from the CPU
idx, // the sequence number should be different for each core (unless you want all
// cores to get the same sequence of numbers for some reason - use thread id!
0, // the offset is how much extra we advance in the sequence for each call, can be 0
&states[idx]);
}
*/
__device__ double GeometricPayOffCallV(double* X, double m, int b, int num_assets, double Strike){
double h;
h=1;
for(int l=0; l<num_assets; l++){
// h*=exp(X[i][j][l]);
//h*= exp(*two_dim_indexGPU(X, i, l, m, b));
h*=exp(X[l]);
}
h=pow(h,1.0/(num_assets));
if(h-Strike>0){
h=h-Strike;
}
else{
h=0;
}
return h;
}
__device__ double GeometricPayOffPutV(double* X, double m, int b, int num_assets, double Strike){
double h;
h=1;
for(int l=0; l<num_assets; l++){
// h*=exp(X[i][j][l]);
//h*= exp(*two_dim_indexGPU(X, i, l, m, b));
h*=exp(X[l]);
}
h=pow(h,1.0/(num_assets));
if(Strike-h>0){
h=Strike-h;
}
else{
h=0;
}
return h;
}
__device__ void S_weights(double* S_Weights, double* X_device, double* S_new, int m, int b, double* sigma_device, double* delta_device, double delta_t, int num_assets, double r , int i, double* weight_denominator_device ){//note: S_new used to be just S
//if(blockDim.x*blockIdx.x + threadIdx.x==0){printf("Beginning \n");}
//double density_product,
double sum, w_s;
for(int h=0; h<b; h++){ //h=k
//if(blockDim.x*blockIdx.x + threadIdx.x==0){printf("Outside loop, i=%i \n", h);}
sum=0;
w_s=1;
for(int kk=0; kk<num_assets; kk++){
//w_s*=densityGPU(*two_dim_indexGPU(S, i, kk, m, num_assets), *three_dim_indexGPU(X_device, (i+1), h, kk, m, b), sigma_device[kk], r, delta_device[kk], delta_t);
w_s*=densityGPU(S_new[kk], *three_dim_indexGPU(X_device, (i+1), h, kk, m, b, num_assets), sigma_device[kk], r, delta_device[kk], delta_t);
}
/*
clock_t start_time =clock();
clock_t stop_time =clock();
int time=stop_time-start_time;
if(blockDim.x*blockIdx.x + threadIdx.x==0){printf("result at i=%i , = %i\n",i, time);}
*/
/*
density_product=1;
//if(blockDim.x*blockIdx.x + threadIdx.x==0){printf("after first inside loop \n");}
for(int g=0; g<b; g++){ //g=l
//if(blockDim.x*blockIdx.x + threadIdx.x==0){printf("inside second loop i=%i \n", g);}
for(int gg=0; gg<num_assets; gg++){
density_product*=densityGPU(*three_dim_indexGPU(X_device, i, g, gg, m, b), *three_dim_indexGPU(X_device, (i+1), h, gg, m, b), sigma_device[gg], r, delta_device[gg], delta_t);
}
sum+=(1/((double)b))*density_product;
}
*/
sum = *two_dim_indexGPU(weight_denominator_device, i, h, m-1, b);
if(sum==0){printf("division by zero in weights function of path estimator\n");}
w_s = (((double)b)*w_s)/sum;
//if(blockDim.x*blockIdx.x + threadIdx.x==0){printf("w_s=%f \n", w_s);}
//*two_dim_indexGPU(S_Weights, i, h, m, b)=w_s;
S_Weights[h]=w_s;
}
//if(blockDim.x*blockIdx.x + threadIdx.x==0){printf("End \n");}
}
__global__ void PathEstimatorKernel(double* X_device, double* weight_denominator_device, double* V_device, double* delta_device, double* sigma_device, double* X0_device, int N, double strike, double r, double delta_t, int b, int m, int num_assets, hiprandState_t* states, double* results_dev, double* asset_amount_device){
int idx =blockDim.x*blockIdx.x + threadIdx.x;
//if(blockDim.x*blockIdx.x + threadIdx.x==N+1){printf("n+1 outside \n");}
if(idx<N){
//printf("inside \n");
//if(blockDim.x*blockIdx.x + threadIdx.x==N-1){printf("n-1 \n");}
//if(blockDim.x*blockIdx.x + threadIdx.x==N+1){printf("n+1 inside \n");}
//GeometricPayOffPut thePayOff(strike);
//GeometricPayOffPut payoff(strike);
//enum Containers { vector, matrix };
//Containers Vector = vector;
//Containers Matrix = matrix;
double v_0, S_i, Z, C, H, sum, weight; //, w_s, sum_Z;
//srand((unsigned)time(NULL));
//std::random_device rd;
//std::default_random_engine generator;
//generator.seed( rd() );
//std::normal_distribution<double> distribution(0.0,1.0);
/// ARRAY CODE
//const int S_N=(m)*num_assets;
//const int S_W_N=(m)*b;
const int S_N= num_assets;
const int S_W_N= b;
double* S_new;
S_new= new double[S_N];
//double s_new[S_new_N];
//S_new=s_new;
//double* S_old;
//S_old=new double[S_N];
double* S_Weights;
S_Weights=new double[S_W_N];
//double s_weights[S_W_new_N];
//S_Weights=s_weights;
//double* S_new;
//double* S_old;
//double* S_Weights;
/*
double s_new[1];
//double s_old[1];
double s_weights[250];
S_new=s_new;
//S_old=s_old;
S_Weights=s_weights;
*/
//S_Weights=new double[250];
//S_new=new double[1];
//if(idx==0){printf("X[0][0][0]= %f \n",*three_dim_indexGPU(X_device,0,0,0,m,b));}
//if(idx==0){printf("before the loop");}
int i=0;
do {
if(i==0){
for(int ll=0; ll<num_assets; ll++){
//Z=boxmuller();
// NEED TO CHANGE THE RANDOM NUMBER GENERATOR
//Z=distribution(generator);
Z=hiprand_normal_double(&states[idx]);
//printf("for idx=%i, r=%f",idx,Z);
//printf("random number for idx %i is %f",idx,Z);
S_i=X0_device[ll] + (r-delta_device[ll]-0.5*pow(sigma_device[ll], 2))*delta_t + sigma_device[ll]*sqrt(delta_t)*Z;
//tempnodevector.push_back(S_i);
//*two_dim_indexGPU(S, i, ll, m, num_assets)=S_i;
S_new[ll]=S_i;
}
}
else{
for(int jj=0; jj<num_assets; jj++){
//Z=boxmuller();
//Z=distribution(generator);
Z=hiprand_normal_double(&states[idx]);
//if(idx==0){printf("random number=%f /n", Z);}
//S_i=(*two_dim_indexGPU(S, (i-1), jj, m, num_assets)) + (r-delta_device[jj]-0.5*pow(sigma_device[jj], 2))*delta_t + sigma_device[jj]*sqrt(delta_t)*Z;
S_i=S_new[jj] + (r-delta_device[jj]-0.5*pow(sigma_device[jj], 2))*delta_t + sigma_device[jj]*sqrt(delta_t)*Z;
//tempnodevector.push_back(S_i);
//*two_dim_indexGPU(S, i, jj, m, num_assets)=S_i;
S_new[jj]=S_i;
}
}
//printf("inside \n");
//if(idx==0){printf("before the call, m =%i /n", m);}
if(i<m-1){
//S_weights(tempvec, S_Weights, X, S, m, b, sigma, delta, delta_t, asset_amount, r, i );
//S_weights(S_Weights, X_device, S, m, b, sigma_device, delta_device, delta_t, num_assets, r, i );
//right
S_weights(S_Weights, X_device, S_new, m, b, sigma_device, delta_device, delta_t, num_assets, r, i, weight_denominator_device);
}
//printf("inside \n");
double con_val=0; //continuation value variable
sum=0;
if(i==m-1){
C=0;//continuation value at the last time step
}
else{
for(int k=0; k<b; k++){
//weight= * two_dim_indexGPU(S_Weights, i, k, m, b);
//right
weight= S_Weights[k];
//con_val=V[(m-1)-i-1][k];
con_val= *two_dim_indexGPU(V_device, (m-1-i-1), k, m, b);
//con_val=0;
sum+=(weight) * (con_val);
}
//con_val=inner_product(b, first_vector, second_vector);
C=(1/(double)b)*sum; //continuation value
// C=(1/(double)b)*con_val;
}
//printf("inside \n");
//H=Payoff(S, strike, asset_amount, i)*exp(-r*delta_t*((i+1)));
//H=thePayOff(S, i, 0, m, num_assets, Vector, num_assets)*exp(-r*delta_t*((i+1)));
//H=0;
H= GeometricPayOffCallV(S_new, m, num_assets, num_assets, strike)*exp(-r*delta_t*((i+1)));
i=i+1;
/*for(int copy=0; copy<num_assets; copy++){
S_old[copy]=S_new[copy];
}*/
}while(H<C);//this will stop once H is less then the continuation value. at m-1, c=0 therefore m-1 is the max amount of loops.
v_0=H;
//if(idx==0){printf("result %i=%f", idx, v_0);}
results_dev[idx]=v_0;
delete[] S_new;
//delete[] S_old;
delete[] S_Weights;
//return v_0;
//printf("inside \n");
}
}
double PathEstimator(double strike, double r, double delta_t, int b, double m, double sigma[], double delta[], double X0[], double* X, double* weight_denominator, double* V, double asset_amount[], int num_assets, int Path_estimator_iterations, int iterator, int Final_iteration, hiprandState_t* States, hiprandState_t* states, int threads ){
//m=int(m);
//for(int test=0; test<((m-1)*b); test++){
//printf("at the start of pathestimator den=%f /n", weight_denominator[test]);
//}
//printf("Ib serial X[0][0][0]= %f \n",*three_dim_index(X,0,0,0,m,b));
hipError_t error = hipGetLastError();
if( error != hipSuccess )
{
std::cout << hipGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
int N= Path_estimator_iterations;
double* sigma_host;
sigma_host =sigma;
double* delta_host;
delta_host =delta;
double* X0_host;
X0_host =X0;
double* asset_amount_host;
asset_amount_host =asset_amount;
int m_int=(int)m;
//printf("at the start of pathestimator m_int=%i /n", m_int);
int X_N=(m_int) * b * (num_assets);
int W_N=(m_int-1) * b;
int V_N=(m_int) * b;
int delta_N= num_assets;
int sigma_N=num_assets;
int X0_N=num_assets;
int asset_amount_N = num_assets;
double* X_device;
double* V_device;
double* weight_denominator_device;
double* sigma_device;
double* delta_device;
double* X0_device;
double* asset_amount_device;
error = hipGetLastError();
if( error != hipSuccess )
{
std::cout << hipGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
hipMalloc((void**) &X_device, X_N*sizeof(double) );
hipMemcpy(X_device, X, X_N*sizeof(double), hipMemcpyHostToDevice);
hipMalloc((void**) &V_device, V_N*sizeof(double) );
hipMemcpy(V_device, V, V_N*sizeof(double), hipMemcpyHostToDevice);
hipMalloc((void**) &weight_denominator_device, W_N*sizeof(double) );
hipMemcpy(weight_denominator_device, weight_denominator, W_N*sizeof(double), hipMemcpyHostToDevice);
hipMalloc((void**) &X0_device, X0_N*sizeof(double) );
hipMemcpy(X0_device, X0_host, X0_N*sizeof(double), hipMemcpyHostToDevice);
hipMalloc((void**) &sigma_device, sigma_N*sizeof(double) );
hipMemcpy(sigma_device, sigma_host, sigma_N*sizeof(double), hipMemcpyHostToDevice);
hipMalloc((void**) &delta_device, delta_N*sizeof(double) );
hipMemcpy(delta_device, delta_host, delta_N*sizeof(double), hipMemcpyHostToDevice);
hipMalloc((void**) &asset_amount_device, asset_amount_N*sizeof(double) );
hipMemcpy(asset_amount_device, asset_amount_host, asset_amount_N*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(states, States, threads*sizeof(hiprandState_t*), hipMemcpyHostToDevice);
//dim3 gridDim((int)ceil(N/512.0));
//printf("the grid dim is:%i\n",(int)ceil(N/512.0));
//dim3 blockDim(512);
dim3 gridDim((int)ceil(N/512.0));
dim3 blockDim(512.0);
/*if(N>512){
gridDim()= ceil(N/521);
}
*/
error = hipGetLastError();
if( error != hipSuccess )
{
std::cout << hipGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
double* results;
results = new double[N];
double* results_dev;
hipMalloc((void**) &results_dev, N*sizeof(double) );
// CALL RANDOM SEEDING KERNEL HERE
error = hipGetLastError();
if( error != hipSuccess )
{
std::cout << hipGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
/*
hiprandState_t* states;
hipMalloc((void**) &states, N * sizeof(hiprandState_t));
init<<<gridDim, blockDim>>>(time(0), states);
hipDeviceSynchronize();
*/
error = hipGetLastError();
if( error != hipSuccess )
{
std::cout << hipGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
//printf("inside \n");
//hipDeviceSetLimit(hipLimitMallocHeapSize, 80000000*sizeof(double));
//size_t size;
//hipDeviceGetLimit(&size, hipLimitMallocHeapSize);
//printf("Heap size found to be %d\n",(int)size);
//printf("after");
//for(int test=0; test<V_N; test++){
//printf("N=%i, strike=%f, r=%f, delta_t=%f, num_a=%i, b=%i", N, strike, r, delta_t, num_assets,b);
//}hipLaunchKernelGGL((
PathEstimatorKernel), dim3(gridDim), dim3(blockDim), 0, 0, X_device, weight_denominator_device, V_device, delta_device, sigma_device, X0_device, N, strike, r, delta_t, b, m_int, num_assets, states, results_dev, asset_amount_device);
hipDeviceSynchronize();
error = hipGetLastError();
if( error != hipSuccess )
{
std::cout << hipGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
//printf("here");
hipMemcpy(results, results_dev, sizeof(double)*N, hipMemcpyDeviceToHost);
error = hipGetLastError();
if( error != hipSuccess )
{
std::cout << hipGetErrorString(error) << std::endl;
printf("Found at line %d\n", __LINE__);
exit(1);
}
//hipDeviceSynchronize();
//hipMemcpy(States, states, sizeof(hiprandState_t)*N, hipMemcpyDeviceToHost);
hipMemcpy(States, states, sizeof(hiprandState_t)*threads, hipMemcpyDeviceToHost);
error = hipGetLastError();
if( error != hipSuccess )
{
std::cout << hipGetErrorString(error) << std::endl;
printf("Found at line %d\n", __LINE__);
exit(1);
}
double result=0;
for(int f=0; f<Path_estimator_iterations; f++){
result+=results[f];
//printf("random %i =%f\n", f, results[f]);
}
result=(1/double(N))*result;
delete[] results;
error = hipGetLastError();
if( error != hipSuccess )
{
std::cout << hipGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
hipFree(X_device);
hipFree(V_device);
hipFree(weight_denominator_device);
hipFree(sigma_device);
hipFree(delta_device);
hipFree(X0_device);
hipFree(results_dev);
hipFree(asset_amount_device);
if(iterator==Final_iteration-1){
hipFree(states);
//printf("done, iter=%i",iterator);
}
//hipDeviceReset();
return result;
//hipDeviceReset();
}
| 2af7e2eb87387ea259177dd0b9d6880703eddcc0.cu | #include <cuda.h>
#include <iostream>
#include <math.h>
#include <ctime>
#include <cmath>
#include "enum_header.h"
#include <unistd.h>
#include <stdio.h>
/* we need these includes for CUDA's random number stuff */
#include <curand.h>
#include <curand_kernel.h>
// REMEMBER TO PUT __host__ __device__ IN FRONT OF CLASS METHODS
#define PI 3.14159265358979323846
double* three_dim_index(double* matrix, int i, int j, int k, double m, int b, int num_assets);
__device__ double* two_dim_indexGPU(double* vector, int i, int j, double m, int b){
//int m_int= (int)m;
double* p;
//specify index layout here
p=&vector[b*(i)+(j)];
return p;
}
__device__ double* three_dim_indexGPU(double* matrix, int i, int j, int k, double m, int b, int num_assets){
//int m_int = (int)m;
double* p;
//specify index layout here
//p=&matrix[(m_int)*b*(k)+(m_int)*(j)+(i)];
p=&matrix[i*b*num_assets+j*num_assets+k];
return p;
}
__device__ double densityGPU(double Xold, double Xnew, double sigma, double r, double delta, double delta_t){
double f=0, x=0;
//x=(1/(sigma*sqrt(delta_t)))*(log(Xnew)-log(Xold)-(r-delta-0.5*sigma*sigma)*delta_t);
x=(1/(sigma*sqrt(delta_t)))*(Xnew-Xold-(r-delta-0.5*sigma*sigma)*delta_t);
//f= (1/(sigma*sqrt(delta_t)*Xnew))*(1/(sqrt(2*PI)))*exp(-0.5*x*x); // this is the transition density
f= (1/(sigma*sqrt(delta_t)))*(1/(sqrt(2*PI)))*exp(-0.5*x*x);
return f;
}
/*
__global__ void init(unsigned int seed, curandState_t* states) {
int idx=blockDim.x*blockIdx.x + threadIdx.x;
// we have to initialize the state
curand_init(seed, // the seed can be the same for each core, here we pass the time in from the CPU
idx, // the sequence number should be different for each core (unless you want all
// cores to get the same sequence of numbers for some reason - use thread id!
0, // the offset is how much extra we advance in the sequence for each call, can be 0
&states[idx]);
}
*/
__device__ double GeometricPayOffCallV(double* X, double m, int b, int num_assets, double Strike){
double h;
h=1;
for(int l=0; l<num_assets; l++){
// h*=exp(X[i][j][l]);
//h*= exp(*two_dim_indexGPU(X, i, l, m, b));
h*=exp(X[l]);
}
h=pow(h,1.0/(num_assets));
if(h-Strike>0){
h=h-Strike;
}
else{
h=0;
}
return h;
}
__device__ double GeometricPayOffPutV(double* X, double m, int b, int num_assets, double Strike){
double h;
h=1;
for(int l=0; l<num_assets; l++){
// h*=exp(X[i][j][l]);
//h*= exp(*two_dim_indexGPU(X, i, l, m, b));
h*=exp(X[l]);
}
h=pow(h,1.0/(num_assets));
if(Strike-h>0){
h=Strike-h;
}
else{
h=0;
}
return h;
}
__device__ void S_weights(double* S_Weights, double* X_device, double* S_new, int m, int b, double* sigma_device, double* delta_device, double delta_t, int num_assets, double r , int i, double* weight_denominator_device ){//note: S_new used to be just S
//if(blockDim.x*blockIdx.x + threadIdx.x==0){printf("Beginning \n");}
//double density_product,
double sum, w_s;
for(int h=0; h<b; h++){ //h=k
//if(blockDim.x*blockIdx.x + threadIdx.x==0){printf("Outside loop, i=%i \n", h);}
sum=0;
w_s=1;
for(int kk=0; kk<num_assets; kk++){
//w_s*=densityGPU(*two_dim_indexGPU(S, i, kk, m, num_assets), *three_dim_indexGPU(X_device, (i+1), h, kk, m, b), sigma_device[kk], r, delta_device[kk], delta_t);
w_s*=densityGPU(S_new[kk], *three_dim_indexGPU(X_device, (i+1), h, kk, m, b, num_assets), sigma_device[kk], r, delta_device[kk], delta_t);
}
/*
clock_t start_time =clock();
clock_t stop_time =clock();
int time=stop_time-start_time;
if(blockDim.x*blockIdx.x + threadIdx.x==0){printf("result at i=%i , = %i\n",i, time);}
*/
/*
density_product=1;
//if(blockDim.x*blockIdx.x + threadIdx.x==0){printf("after first inside loop \n");}
for(int g=0; g<b; g++){ //g=l
//if(blockDim.x*blockIdx.x + threadIdx.x==0){printf("inside second loop i=%i \n", g);}
for(int gg=0; gg<num_assets; gg++){
density_product*=densityGPU(*three_dim_indexGPU(X_device, i, g, gg, m, b), *three_dim_indexGPU(X_device, (i+1), h, gg, m, b), sigma_device[gg], r, delta_device[gg], delta_t);
}
sum+=(1/((double)b))*density_product;
}
*/
sum = *two_dim_indexGPU(weight_denominator_device, i, h, m-1, b);
if(sum==0){printf("division by zero in weights function of path estimator\n");}
w_s = (((double)b)*w_s)/sum;
//if(blockDim.x*blockIdx.x + threadIdx.x==0){printf("w_s=%f \n", w_s);}
//*two_dim_indexGPU(S_Weights, i, h, m, b)=w_s;
S_Weights[h]=w_s;
}
//if(blockDim.x*blockIdx.x + threadIdx.x==0){printf("End \n");}
}
__global__ void PathEstimatorKernel(double* X_device, double* weight_denominator_device, double* V_device, double* delta_device, double* sigma_device, double* X0_device, int N, double strike, double r, double delta_t, int b, int m, int num_assets, curandState_t* states, double* results_dev, double* asset_amount_device){
int idx =blockDim.x*blockIdx.x + threadIdx.x;
//if(blockDim.x*blockIdx.x + threadIdx.x==N+1){printf("n+1 outside \n");}
if(idx<N){
//printf("inside \n");
//if(blockDim.x*blockIdx.x + threadIdx.x==N-1){printf("n-1 \n");}
//if(blockDim.x*blockIdx.x + threadIdx.x==N+1){printf("n+1 inside \n");}
//GeometricPayOffPut thePayOff(strike);
//GeometricPayOffPut payoff(strike);
//enum Containers { vector, matrix };
//Containers Vector = vector;
//Containers Matrix = matrix;
double v_0, S_i, Z, C, H, sum, weight; //, w_s, sum_Z;
//srand((unsigned)time(NULL));
//std::random_device rd;
//std::default_random_engine generator;
//generator.seed( rd() );
//std::normal_distribution<double> distribution(0.0,1.0);
/// ARRAY CODE
//const int S_N=(m)*num_assets;
//const int S_W_N=(m)*b;
const int S_N= num_assets;
const int S_W_N= b;
double* S_new;
S_new= new double[S_N];
//double s_new[S_new_N];
//S_new=s_new;
//double* S_old;
//S_old=new double[S_N];
double* S_Weights;
S_Weights=new double[S_W_N];
//double s_weights[S_W_new_N];
//S_Weights=s_weights;
//double* S_new;
//double* S_old;
//double* S_Weights;
/*
double s_new[1];
//double s_old[1];
double s_weights[250];
S_new=s_new;
//S_old=s_old;
S_Weights=s_weights;
*/
//S_Weights=new double[250];
//S_new=new double[1];
//if(idx==0){printf("X[0][0][0]= %f \n",*three_dim_indexGPU(X_device,0,0,0,m,b));}
//if(idx==0){printf("before the loop");}
int i=0;
do {
if(i==0){
for(int ll=0; ll<num_assets; ll++){
//Z=boxmuller();
// NEED TO CHANGE THE RANDOM NUMBER GENERATOR
//Z=distribution(generator);
Z=curand_normal_double(&states[idx]);
//printf("for idx=%i, r=%f",idx,Z);
//printf("random number for idx %i is %f",idx,Z);
S_i=X0_device[ll] + (r-delta_device[ll]-0.5*pow(sigma_device[ll], 2))*delta_t + sigma_device[ll]*sqrt(delta_t)*Z;
//tempnodevector.push_back(S_i);
//*two_dim_indexGPU(S, i, ll, m, num_assets)=S_i;
S_new[ll]=S_i;
}
}
else{
for(int jj=0; jj<num_assets; jj++){
//Z=boxmuller();
//Z=distribution(generator);
Z=curand_normal_double(&states[idx]);
//if(idx==0){printf("random number=%f /n", Z);}
//S_i=(*two_dim_indexGPU(S, (i-1), jj, m, num_assets)) + (r-delta_device[jj]-0.5*pow(sigma_device[jj], 2))*delta_t + sigma_device[jj]*sqrt(delta_t)*Z;
S_i=S_new[jj] + (r-delta_device[jj]-0.5*pow(sigma_device[jj], 2))*delta_t + sigma_device[jj]*sqrt(delta_t)*Z;
//tempnodevector.push_back(S_i);
//*two_dim_indexGPU(S, i, jj, m, num_assets)=S_i;
S_new[jj]=S_i;
}
}
//printf("inside \n");
//if(idx==0){printf("before the call, m =%i /n", m);}
if(i<m-1){
//S_weights(tempvec, S_Weights, X, S, m, b, sigma, delta, delta_t, asset_amount, r, i );
//S_weights(S_Weights, X_device, S, m, b, sigma_device, delta_device, delta_t, num_assets, r, i );
//right
S_weights(S_Weights, X_device, S_new, m, b, sigma_device, delta_device, delta_t, num_assets, r, i, weight_denominator_device);
}
//printf("inside \n");
double con_val=0; //continuation value variable
sum=0;
if(i==m-1){
C=0;//continuation value at the last time step
}
else{
for(int k=0; k<b; k++){
//weight= * two_dim_indexGPU(S_Weights, i, k, m, b);
//right
weight= S_Weights[k];
//con_val=V[(m-1)-i-1][k];
con_val= *two_dim_indexGPU(V_device, (m-1-i-1), k, m, b);
//con_val=0;
sum+=(weight) * (con_val);
}
//con_val=inner_product(b, first_vector, second_vector);
C=(1/(double)b)*sum; //continuation value
// C=(1/(double)b)*con_val;
}
//printf("inside \n");
//H=Payoff(S, strike, asset_amount, i)*exp(-r*delta_t*((i+1)));
//H=thePayOff(S, i, 0, m, num_assets, Vector, num_assets)*exp(-r*delta_t*((i+1)));
//H=0;
H= GeometricPayOffCallV(S_new, m, num_assets, num_assets, strike)*exp(-r*delta_t*((i+1)));
i=i+1;
/*for(int copy=0; copy<num_assets; copy++){
S_old[copy]=S_new[copy];
}*/
}while(H<C);//this will stop once H is less then the continuation value. at m-1, c=0 therefore m-1 is the max amount of loops.
v_0=H;
//if(idx==0){printf("result %i=%f", idx, v_0);}
results_dev[idx]=v_0;
delete[] S_new;
//delete[] S_old;
delete[] S_Weights;
//return v_0;
//printf("inside \n");
}
}
double PathEstimator(double strike, double r, double delta_t, int b, double m, double sigma[], double delta[], double X0[], double* X, double* weight_denominator, double* V, double asset_amount[], int num_assets, int Path_estimator_iterations, int iterator, int Final_iteration, curandState_t* States, curandState_t* states, int threads ){
//m=int(m);
//for(int test=0; test<((m-1)*b); test++){
//printf("at the start of pathestimator den=%f /n", weight_denominator[test]);
//}
//printf("Ib serial X[0][0][0]= %f \n",*three_dim_index(X,0,0,0,m,b));
cudaError_t error = cudaGetLastError();
if( error != cudaSuccess )
{
std::cout << cudaGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
int N= Path_estimator_iterations;
double* sigma_host;
sigma_host =sigma;
double* delta_host;
delta_host =delta;
double* X0_host;
X0_host =X0;
double* asset_amount_host;
asset_amount_host =asset_amount;
int m_int=(int)m;
//printf("at the start of pathestimator m_int=%i /n", m_int);
int X_N=(m_int) * b * (num_assets);
int W_N=(m_int-1) * b;
int V_N=(m_int) * b;
int delta_N= num_assets;
int sigma_N=num_assets;
int X0_N=num_assets;
int asset_amount_N = num_assets;
double* X_device;
double* V_device;
double* weight_denominator_device;
double* sigma_device;
double* delta_device;
double* X0_device;
double* asset_amount_device;
error = cudaGetLastError();
if( error != cudaSuccess )
{
std::cout << cudaGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
cudaMalloc((void**) &X_device, X_N*sizeof(double) );
cudaMemcpy(X_device, X, X_N*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc((void**) &V_device, V_N*sizeof(double) );
cudaMemcpy(V_device, V, V_N*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc((void**) &weight_denominator_device, W_N*sizeof(double) );
cudaMemcpy(weight_denominator_device, weight_denominator, W_N*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc((void**) &X0_device, X0_N*sizeof(double) );
cudaMemcpy(X0_device, X0_host, X0_N*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc((void**) &sigma_device, sigma_N*sizeof(double) );
cudaMemcpy(sigma_device, sigma_host, sigma_N*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc((void**) &delta_device, delta_N*sizeof(double) );
cudaMemcpy(delta_device, delta_host, delta_N*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc((void**) &asset_amount_device, asset_amount_N*sizeof(double) );
cudaMemcpy(asset_amount_device, asset_amount_host, asset_amount_N*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(states, States, threads*sizeof(curandState_t*), cudaMemcpyHostToDevice);
//dim3 gridDim((int)ceil(N/512.0));
//printf("the grid dim is:%i\n",(int)ceil(N/512.0));
//dim3 blockDim(512);
dim3 gridDim((int)ceil(N/512.0));
dim3 blockDim(512.0);
/*if(N>512){
gridDim()= ceil(N/521);
}
*/
error = cudaGetLastError();
if( error != cudaSuccess )
{
std::cout << cudaGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
double* results;
results = new double[N];
double* results_dev;
cudaMalloc((void**) &results_dev, N*sizeof(double) );
// CALL RANDOM SEEDING KERNEL HERE
error = cudaGetLastError();
if( error != cudaSuccess )
{
std::cout << cudaGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
/*
curandState_t* states;
cudaMalloc((void**) &states, N * sizeof(curandState_t));
init<<<gridDim, blockDim>>>(time(0), states);
cudaDeviceSynchronize();
*/
error = cudaGetLastError();
if( error != cudaSuccess )
{
std::cout << cudaGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
//printf("inside \n");
//cudaDeviceSetLimit(cudaLimitMallocHeapSize, 80000000*sizeof(double));
//size_t size;
//cudaDeviceGetLimit(&size, cudaLimitMallocHeapSize);
//printf("Heap size found to be %d\n",(int)size);
//printf("after");
//for(int test=0; test<V_N; test++){
//printf("N=%i, strike=%f, r=%f, delta_t=%f, num_a=%i, b=%i", N, strike, r, delta_t, num_assets,b);
//}
PathEstimatorKernel<<<gridDim, blockDim>>>(X_device, weight_denominator_device, V_device, delta_device, sigma_device, X0_device, N, strike, r, delta_t, b, m_int, num_assets, states, results_dev, asset_amount_device);
cudaDeviceSynchronize();
error = cudaGetLastError();
if( error != cudaSuccess )
{
std::cout << cudaGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
//printf("here");
cudaMemcpy(results, results_dev, sizeof(double)*N, cudaMemcpyDeviceToHost);
error = cudaGetLastError();
if( error != cudaSuccess )
{
std::cout << cudaGetErrorString(error) << std::endl;
printf("Found at line %d\n", __LINE__);
exit(1);
}
//cudaDeviceSynchronize();
//cudaMemcpy(States, states, sizeof(curandState_t)*N, cudaMemcpyDeviceToHost);
cudaMemcpy(States, states, sizeof(curandState_t)*threads, cudaMemcpyDeviceToHost);
error = cudaGetLastError();
if( error != cudaSuccess )
{
std::cout << cudaGetErrorString(error) << std::endl;
printf("Found at line %d\n", __LINE__);
exit(1);
}
double result=0;
for(int f=0; f<Path_estimator_iterations; f++){
result+=results[f];
//printf("random %i =%f\n", f, results[f]);
}
result=(1/double(N))*result;
delete[] results;
error = cudaGetLastError();
if( error != cudaSuccess )
{
std::cout << cudaGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
cudaFree(X_device);
cudaFree(V_device);
cudaFree(weight_denominator_device);
cudaFree(sigma_device);
cudaFree(delta_device);
cudaFree(X0_device);
cudaFree(results_dev);
cudaFree(asset_amount_device);
if(iterator==Final_iteration-1){
cudaFree(states);
//printf("done, iter=%i",iterator);
}
//cudaDeviceReset();
return result;
//cudaDeviceReset();
}
|
19645934609c47e206725a673e23ae84cb8ed2c9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cfloat>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/math.h"
#include "div_conv_norm_op.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void DivConvNormForward(const int nthreads,
const T* input1, const T* input2,
const int channels, const int pixels,
T* output) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
const int idx = index % pixels;
const int b = (index / pixels) / channels;
const float divider = input2[b * pixels + idx];
// only do safe division
if (divider > 0.) {
output[index] = input1[index] / divider;
} else {
output[index] = 0.;
}
}
}
template <typename T>
__global__ void DivConvNormBackward(const int nthreads,
const T* input_grad, const T* input1, const T* input2,
const int channels, const int pixels, T* output_grad1) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
const int idx = index % pixels;
const int b = (index / pixels) / channels;
const float divider = input2[b * pixels + idx];
// only deal with input 1
if (divider > 0.) {
output_grad1[index] = input_grad[index] / divider;
} else {
output_grad1[index] = 0.;
}
}
}
} // namespace
template<>
bool DivConvNormOp<float, CUDAContext>::RunOnDevice() {
auto& X1 = Input(0); // Input data 1
auto& X2 = Input(1); // Input data 2
auto* Y = Output(0); // Output data, summation of the two
const int N = X1.dim32(0);
const int C = X1.dim32(1);
const int H = X1.dim32(2);
const int W = X1.dim32(3);
const int A = X2.dim32(1);
DCHECK_EQ(N, X2.dim32(0));
DCHECK_EQ(C % A, 0);
DCHECK_EQ(H, X2.dim32(2));
DCHECK_EQ(W, X2.dim32(3));
const int pixels = H * W;
const int X = C / A;
// N, C, H, W
Y->Resize(N, C, H, W);
const int output_size = Y->size();
hipLaunchKernelGGL(( DivConvNormForward<float>), dim3(CAFFE_GET_BLOCKS(output_size)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
output_size, X1.data<float>(), X2.data<float>(),
X, pixels, Y->mutable_data<float>());
return true;
}
template<>
bool DivConvNormGradientOp<float, CUDAContext>::RunOnDevice() {
auto& dY = Input(0); // Gradient of the output data
auto& X1 = Input(1); // Input data 1
auto& X2 = Input(2); // Input data 2
auto* dX1 = Output(0); // Gradient of the input data 1
const int C = X1.dim32(1);
const int H = X1.dim32(2);
const int W = X1.dim32(3);
const int A = X2.dim32(1);
const int X = C / A;
const int pixels = H * W;
const int output_size = dY.size();
dX1->ResizeLike(X1);
hipLaunchKernelGGL(( DivConvNormBackward<float>), dim3(CAFFE_GET_BLOCKS(output_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
output_size,
dY.data<float>(),
X1.data<float>(),
X2.data<float>(),
X,
pixels,
dX1->mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(DivConvNorm,
DivConvNormOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(DivConvNormGradient,
DivConvNormGradientOp<float, CUDAContext>);
} // namespace caffe2 | 19645934609c47e206725a673e23ae84cb8ed2c9.cu | #include <cfloat>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/math.h"
#include "div_conv_norm_op.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void DivConvNormForward(const int nthreads,
const T* input1, const T* input2,
const int channels, const int pixels,
T* output) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
const int idx = index % pixels;
const int b = (index / pixels) / channels;
const float divider = input2[b * pixels + idx];
// only do safe division
if (divider > 0.) {
output[index] = input1[index] / divider;
} else {
output[index] = 0.;
}
}
}
template <typename T>
__global__ void DivConvNormBackward(const int nthreads,
const T* input_grad, const T* input1, const T* input2,
const int channels, const int pixels, T* output_grad1) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
const int idx = index % pixels;
const int b = (index / pixels) / channels;
const float divider = input2[b * pixels + idx];
// only deal with input 1
if (divider > 0.) {
output_grad1[index] = input_grad[index] / divider;
} else {
output_grad1[index] = 0.;
}
}
}
} // namespace
template<>
bool DivConvNormOp<float, CUDAContext>::RunOnDevice() {
auto& X1 = Input(0); // Input data 1
auto& X2 = Input(1); // Input data 2
auto* Y = Output(0); // Output data, summation of the two
const int N = X1.dim32(0);
const int C = X1.dim32(1);
const int H = X1.dim32(2);
const int W = X1.dim32(3);
const int A = X2.dim32(1);
DCHECK_EQ(N, X2.dim32(0));
DCHECK_EQ(C % A, 0);
DCHECK_EQ(H, X2.dim32(2));
DCHECK_EQ(W, X2.dim32(3));
const int pixels = H * W;
const int X = C / A;
// N, C, H, W
Y->Resize(N, C, H, W);
const int output_size = Y->size();
DivConvNormForward<float><<<CAFFE_GET_BLOCKS(output_size), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
output_size, X1.data<float>(), X2.data<float>(),
X, pixels, Y->mutable_data<float>());
return true;
}
template<>
bool DivConvNormGradientOp<float, CUDAContext>::RunOnDevice() {
auto& dY = Input(0); // Gradient of the output data
auto& X1 = Input(1); // Input data 1
auto& X2 = Input(2); // Input data 2
auto* dX1 = Output(0); // Gradient of the input data 1
const int C = X1.dim32(1);
const int H = X1.dim32(2);
const int W = X1.dim32(3);
const int A = X2.dim32(1);
const int X = C / A;
const int pixels = H * W;
const int output_size = dY.size();
dX1->ResizeLike(X1);
DivConvNormBackward<float><<<CAFFE_GET_BLOCKS(output_size),
CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
output_size,
dY.data<float>(),
X1.data<float>(),
X2.data<float>(),
X,
pixels,
dX1->mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(DivConvNorm,
DivConvNormOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(DivConvNormGradient,
DivConvNormGradientOp<float, CUDAContext>);
} // namespace caffe2 |
a0879bf29eb1db4988f320d7ef5e4543a5a370b2.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/combine.hpp>
#include <cudf/table/table_device_view.cuh>
#include <cudf/utilities/error.hpp>
#include <cudf/detail/valid_if.cuh>
#include <strings/utilities.hpp>
#include <strings/utilities.cuh>
#include <algorithm>
#include <rmm/thrust_rmm_allocator.h>
#include <thrust/transform_scan.h>
#include <thrust/transform_reduce.h>
#include <thrust/logical.h>
namespace cudf
{
namespace strings
{
namespace detail
{
//
std::unique_ptr<column> concatenate( table_view const& strings_columns,
string_scalar const& separator = string_scalar(""),
string_scalar const& narep = string_scalar("",false),
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
hipStream_t stream=0 )
{
auto num_columns = strings_columns.num_columns();
CUDF_EXPECTS( num_columns > 0, "At least one column must be specified");
// check all columns are of type string
CUDF_EXPECTS( std::all_of( strings_columns.begin(), strings_columns.end(), [] (auto c) {return c.type().id()==STRING;}),
"All columns must be of type string" );
if( num_columns==1 ) // single strings column returns a copy
return std::make_unique<column>(*(strings_columns.begin()),stream,mr);
auto strings_count = strings_columns.num_rows();
if( strings_count == 0 ) // empty begets empty
return detail::make_empty_strings_column(mr,stream);
CUDF_EXPECTS( separator.is_valid(), "Parameter separator must be a valid string_scalar");
string_view d_separator(separator.data(),separator.size());
string_view const d_narep = [&narep] {
if( !narep.is_valid() )
return string_view(nullptr,0);
return narep.size()==0 ? string_view("",0) : string_view(narep.data(),narep.size());
} ();
// Create device views from the strings columns.
auto table = table_device_view::create(strings_columns,stream);
auto d_table = *table;
// create resulting null mask
auto valid_mask = cudf::experimental::detail::valid_if(
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
[d_table, d_narep] __device__ (size_type idx) {
bool null_element = thrust::any_of( thrust::seq, d_table.begin(), d_table.end(),
[idx] (auto col) { return col.is_null(idx);});
return( !null_element || !d_narep.is_null() );
}, stream, mr );
rmm::device_buffer null_mask = valid_mask.first;
auto null_count = valid_mask.second;
// build offsets column by computing sizes of each string in the output
auto offsets_transformer = [d_table, num_columns, d_separator, d_narep] __device__ (size_type row_idx) {
// for this row (idx), iterate over each column and add up the bytes
bool null_element = thrust::any_of( thrust::seq, d_table.begin(), d_table.end(),
[row_idx] (auto const& d_column) { return d_column.is_null(row_idx);});
if( null_element && d_narep.is_null() )
return 0;
size_type bytes = thrust::transform_reduce( thrust::seq, d_table.begin(), d_table.end(),
[row_idx, d_separator, d_narep] __device__ (column_device_view const& d_column) {
return d_separator.size_bytes() +
(d_column.is_null(row_idx) ? d_narep.size_bytes()
: d_column.element<string_view>(row_idx).size_bytes());
}, 0, thrust::plus<size_type>());
// separator goes only in between elements
if( bytes > 0 ) // if not null
bytes -= d_separator.size_bytes(); // remove the last separator
return bytes;
};
auto offsets_transformer_itr = thrust::make_transform_iterator( thrust::make_counting_iterator<size_type>(0), offsets_transformer );
auto offsets_column = detail::make_offsets_child_column(offsets_transformer_itr,
offsets_transformer_itr+strings_count,
mr, stream);
auto d_results_offsets = offsets_column->view().data<int32_t>();
// create the chars column
size_type bytes = thrust::device_pointer_cast(d_results_offsets)[strings_count];
auto chars_column = strings::detail::create_chars_child_column( strings_count, null_count, bytes, mr, stream );
// fill the chars column
auto d_results_chars = chars_column->mutable_view().data<char>();
thrust::for_each_n(rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator<size_type>(0), strings_count,
[d_table, num_columns, d_separator, d_narep, d_results_offsets, d_results_chars] __device__(size_type idx){
bool null_element = thrust::any_of( thrust::seq, d_table.begin(), d_table.end(),
[idx] (column_device_view const& col) { return col.is_null(idx);});
if( null_element && d_narep.is_null() )
return; // do not write to buffer at all if any column element for this row is null
size_type offset = d_results_offsets[idx];
char* d_buffer = d_results_chars + offset;
// write out each column's entry for this row
for( size_type col_idx=0; col_idx < num_columns; ++col_idx )
{
auto d_column = d_table.column(col_idx);
string_view d_str = d_column.is_null(idx) ? d_narep : d_column.element<string_view>(idx);
d_buffer = detail::copy_string(d_buffer, d_str);
// separator goes only in between elements
if( col_idx+1 < num_columns )
d_buffer = detail::copy_string(d_buffer, d_separator);
}
});
return make_strings_column(strings_count, std::move(offsets_column), std::move(chars_column),
null_count, std::move(null_mask), stream, mr);
}
//
std::unique_ptr<column> join_strings( strings_column_view const& strings,
string_scalar const& separator = string_scalar(""),
string_scalar const& narep = string_scalar("",false),
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
hipStream_t stream=0 )
{
auto strings_count = strings.size();
if( strings_count == 0 )
return detail::make_empty_strings_column(mr,stream);
CUDF_EXPECTS( separator.is_valid(), "Parameter separator must be a valid string_scalar");
auto execpol = rmm::exec_policy(stream);
string_view d_separator(separator.data(),separator.size());
string_view const d_narep = [&narep] {
if( !narep.is_valid() )
return string_view(nullptr,0);
return narep.size()==0 ? string_view("",0) : string_view(narep.data(),narep.size());
} ();
auto strings_column = column_device_view::create(strings.parent(),stream);
auto d_strings = *strings_column;
// create an offsets array for building the output memory layout
rmm::device_vector<size_type> output_offsets(strings_count+1);
auto d_output_offsets = output_offsets.data().get();
// using inclusive-scan to compute last entry which is the total size
thrust::transform_inclusive_scan( execpol->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
d_output_offsets + 1,
[d_strings, d_separator, d_narep] __device__ (size_type idx) {
size_type bytes = 0;
if( d_strings.is_null(idx) )
{
if( d_narep.is_null() )
return 0; // skip nulls
bytes += d_narep.size_bytes();
}
else
bytes += d_strings.element<string_view>(idx).size_bytes();
if( (idx+1) < d_strings.size() )
bytes += d_separator.size_bytes();
return bytes;
},
thrust::plus<size_type>());
CUDA_TRY(hipMemsetAsync(d_output_offsets, 0, sizeof(size_type), stream));
// total size is the last entry
size_type bytes = output_offsets.back();
// build offsets column (only 1 string so 2 offset entries)
auto offsets_column = make_numeric_column( data_type{INT32}, 2, mask_state::UNALLOCATED,
stream, mr );
auto offsets_view = offsets_column->mutable_view();
// set the first entry to 0 and the last entry to bytes
int32_t new_offsets[] = {0, bytes};
CUDA_TRY(hipMemcpyAsync(offsets_view.data<int32_t>(), new_offsets,
sizeof(new_offsets), hipMemcpyHostToDevice,stream));
// build null mask
// only one entry so it is either all valid or all null
size_type null_count = 0;
rmm::device_buffer null_mask; // init to null null-mask
if( strings.null_count()==strings_count && d_narep.is_null() )
{
null_mask = create_null_mask(1,cudf::mask_state::ALL_NULL,stream,mr);
null_count = 1;
}
auto chars_column = detail::create_chars_child_column( strings_count, null_count, bytes, mr, stream );
auto chars_view = chars_column->mutable_view();
auto d_chars = chars_view.data<char>();
thrust::for_each_n(execpol->on(stream), thrust::make_counting_iterator<size_type>(0), strings_count,
[d_strings, d_separator, d_narep, d_output_offsets, d_chars] __device__(size_type idx){
size_type offset = d_output_offsets[idx];
char* d_buffer = d_chars + offset;
if( d_strings.is_null(idx) )
{
if( d_narep.is_null() )
return; // do not write to buffer if element is null (including separator)
d_buffer = detail::copy_string(d_buffer, d_narep);
}
else
{
string_view d_str = d_strings.element<string_view>(idx);
d_buffer = detail::copy_string(d_buffer, d_str);
}
if( (idx+1) < d_strings.size() )
d_buffer = detail::copy_string(d_buffer, d_separator);
});
return make_strings_column(1, std::move(offsets_column), std::move(chars_column),
null_count, std::move(null_mask), stream, mr);
}
} // namespace detail
// APIs
std::unique_ptr<column> concatenate( table_view const& strings_columns,
string_scalar const& separator,
string_scalar const& narep,
rmm::mr::device_memory_resource* mr)
{
return detail::concatenate(strings_columns, separator, narep, mr);
}
std::unique_ptr<column> join_strings( strings_column_view const& strings,
string_scalar const& separator,
string_scalar const& narep,
rmm::mr::device_memory_resource* mr )
{
return detail::join_strings(strings, separator, narep, mr);
}
} // namespace strings
} // namespace cudf
| a0879bf29eb1db4988f320d7ef5e4543a5a370b2.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/combine.hpp>
#include <cudf/table/table_device_view.cuh>
#include <cudf/utilities/error.hpp>
#include <cudf/detail/valid_if.cuh>
#include <strings/utilities.hpp>
#include <strings/utilities.cuh>
#include <algorithm>
#include <rmm/thrust_rmm_allocator.h>
#include <thrust/transform_scan.h>
#include <thrust/transform_reduce.h>
#include <thrust/logical.h>
namespace cudf
{
namespace strings
{
namespace detail
{
//
std::unique_ptr<column> concatenate( table_view const& strings_columns,
string_scalar const& separator = string_scalar(""),
string_scalar const& narep = string_scalar("",false),
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
cudaStream_t stream=0 )
{
auto num_columns = strings_columns.num_columns();
CUDF_EXPECTS( num_columns > 0, "At least one column must be specified");
// check all columns are of type string
CUDF_EXPECTS( std::all_of( strings_columns.begin(), strings_columns.end(), [] (auto c) {return c.type().id()==STRING;}),
"All columns must be of type string" );
if( num_columns==1 ) // single strings column returns a copy
return std::make_unique<column>(*(strings_columns.begin()),stream,mr);
auto strings_count = strings_columns.num_rows();
if( strings_count == 0 ) // empty begets empty
return detail::make_empty_strings_column(mr,stream);
CUDF_EXPECTS( separator.is_valid(), "Parameter separator must be a valid string_scalar");
string_view d_separator(separator.data(),separator.size());
string_view const d_narep = [&narep] {
if( !narep.is_valid() )
return string_view(nullptr,0);
return narep.size()==0 ? string_view("",0) : string_view(narep.data(),narep.size());
} ();
// Create device views from the strings columns.
auto table = table_device_view::create(strings_columns,stream);
auto d_table = *table;
// create resulting null mask
auto valid_mask = cudf::experimental::detail::valid_if(
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
[d_table, d_narep] __device__ (size_type idx) {
bool null_element = thrust::any_of( thrust::seq, d_table.begin(), d_table.end(),
[idx] (auto col) { return col.is_null(idx);});
return( !null_element || !d_narep.is_null() );
}, stream, mr );
rmm::device_buffer null_mask = valid_mask.first;
auto null_count = valid_mask.second;
// build offsets column by computing sizes of each string in the output
auto offsets_transformer = [d_table, num_columns, d_separator, d_narep] __device__ (size_type row_idx) {
// for this row (idx), iterate over each column and add up the bytes
bool null_element = thrust::any_of( thrust::seq, d_table.begin(), d_table.end(),
[row_idx] (auto const& d_column) { return d_column.is_null(row_idx);});
if( null_element && d_narep.is_null() )
return 0;
size_type bytes = thrust::transform_reduce( thrust::seq, d_table.begin(), d_table.end(),
[row_idx, d_separator, d_narep] __device__ (column_device_view const& d_column) {
return d_separator.size_bytes() +
(d_column.is_null(row_idx) ? d_narep.size_bytes()
: d_column.element<string_view>(row_idx).size_bytes());
}, 0, thrust::plus<size_type>());
// separator goes only in between elements
if( bytes > 0 ) // if not null
bytes -= d_separator.size_bytes(); // remove the last separator
return bytes;
};
auto offsets_transformer_itr = thrust::make_transform_iterator( thrust::make_counting_iterator<size_type>(0), offsets_transformer );
auto offsets_column = detail::make_offsets_child_column(offsets_transformer_itr,
offsets_transformer_itr+strings_count,
mr, stream);
auto d_results_offsets = offsets_column->view().data<int32_t>();
// create the chars column
size_type bytes = thrust::device_pointer_cast(d_results_offsets)[strings_count];
auto chars_column = strings::detail::create_chars_child_column( strings_count, null_count, bytes, mr, stream );
// fill the chars column
auto d_results_chars = chars_column->mutable_view().data<char>();
thrust::for_each_n(rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator<size_type>(0), strings_count,
[d_table, num_columns, d_separator, d_narep, d_results_offsets, d_results_chars] __device__(size_type idx){
bool null_element = thrust::any_of( thrust::seq, d_table.begin(), d_table.end(),
[idx] (column_device_view const& col) { return col.is_null(idx);});
if( null_element && d_narep.is_null() )
return; // do not write to buffer at all if any column element for this row is null
size_type offset = d_results_offsets[idx];
char* d_buffer = d_results_chars + offset;
// write out each column's entry for this row
for( size_type col_idx=0; col_idx < num_columns; ++col_idx )
{
auto d_column = d_table.column(col_idx);
string_view d_str = d_column.is_null(idx) ? d_narep : d_column.element<string_view>(idx);
d_buffer = detail::copy_string(d_buffer, d_str);
// separator goes only in between elements
if( col_idx+1 < num_columns )
d_buffer = detail::copy_string(d_buffer, d_separator);
}
});
return make_strings_column(strings_count, std::move(offsets_column), std::move(chars_column),
null_count, std::move(null_mask), stream, mr);
}
//
std::unique_ptr<column> join_strings( strings_column_view const& strings,
string_scalar const& separator = string_scalar(""),
string_scalar const& narep = string_scalar("",false),
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
cudaStream_t stream=0 )
{
auto strings_count = strings.size();
if( strings_count == 0 )
return detail::make_empty_strings_column(mr,stream);
CUDF_EXPECTS( separator.is_valid(), "Parameter separator must be a valid string_scalar");
auto execpol = rmm::exec_policy(stream);
string_view d_separator(separator.data(),separator.size());
string_view const d_narep = [&narep] {
if( !narep.is_valid() )
return string_view(nullptr,0);
return narep.size()==0 ? string_view("",0) : string_view(narep.data(),narep.size());
} ();
auto strings_column = column_device_view::create(strings.parent(),stream);
auto d_strings = *strings_column;
// create an offsets array for building the output memory layout
rmm::device_vector<size_type> output_offsets(strings_count+1);
auto d_output_offsets = output_offsets.data().get();
// using inclusive-scan to compute last entry which is the total size
thrust::transform_inclusive_scan( execpol->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
d_output_offsets + 1,
[d_strings, d_separator, d_narep] __device__ (size_type idx) {
size_type bytes = 0;
if( d_strings.is_null(idx) )
{
if( d_narep.is_null() )
return 0; // skip nulls
bytes += d_narep.size_bytes();
}
else
bytes += d_strings.element<string_view>(idx).size_bytes();
if( (idx+1) < d_strings.size() )
bytes += d_separator.size_bytes();
return bytes;
},
thrust::plus<size_type>());
CUDA_TRY(cudaMemsetAsync(d_output_offsets, 0, sizeof(size_type), stream));
// total size is the last entry
size_type bytes = output_offsets.back();
// build offsets column (only 1 string so 2 offset entries)
auto offsets_column = make_numeric_column( data_type{INT32}, 2, mask_state::UNALLOCATED,
stream, mr );
auto offsets_view = offsets_column->mutable_view();
// set the first entry to 0 and the last entry to bytes
int32_t new_offsets[] = {0, bytes};
CUDA_TRY(cudaMemcpyAsync(offsets_view.data<int32_t>(), new_offsets,
sizeof(new_offsets), cudaMemcpyHostToDevice,stream));
// build null mask
// only one entry so it is either all valid or all null
size_type null_count = 0;
rmm::device_buffer null_mask; // init to null null-mask
if( strings.null_count()==strings_count && d_narep.is_null() )
{
null_mask = create_null_mask(1,cudf::mask_state::ALL_NULL,stream,mr);
null_count = 1;
}
auto chars_column = detail::create_chars_child_column( strings_count, null_count, bytes, mr, stream );
auto chars_view = chars_column->mutable_view();
auto d_chars = chars_view.data<char>();
thrust::for_each_n(execpol->on(stream), thrust::make_counting_iterator<size_type>(0), strings_count,
[d_strings, d_separator, d_narep, d_output_offsets, d_chars] __device__(size_type idx){
size_type offset = d_output_offsets[idx];
char* d_buffer = d_chars + offset;
if( d_strings.is_null(idx) )
{
if( d_narep.is_null() )
return; // do not write to buffer if element is null (including separator)
d_buffer = detail::copy_string(d_buffer, d_narep);
}
else
{
string_view d_str = d_strings.element<string_view>(idx);
d_buffer = detail::copy_string(d_buffer, d_str);
}
if( (idx+1) < d_strings.size() )
d_buffer = detail::copy_string(d_buffer, d_separator);
});
return make_strings_column(1, std::move(offsets_column), std::move(chars_column),
null_count, std::move(null_mask), stream, mr);
}
} // namespace detail
// APIs
std::unique_ptr<column> concatenate( table_view const& strings_columns,
string_scalar const& separator,
string_scalar const& narep,
rmm::mr::device_memory_resource* mr)
{
return detail::concatenate(strings_columns, separator, narep, mr);
}
std::unique_ptr<column> join_strings( strings_column_view const& strings,
string_scalar const& separator,
string_scalar const& narep,
rmm::mr::device_memory_resource* mr )
{
return detail::join_strings(strings, separator, narep, mr);
}
} // namespace strings
} // namespace cudf
|
95c8e3b9b8f13c526970d95f4007b558b6b6c29d.hip | // !!! This is a file automatically generated by hipify!!!
/*
* triangular_cuda_kernel.cu
*
* Author(s):
* Matteo Spallanzani <[email protected]>
*
* Copyright (c) 2020-2021 ETH Zurich.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <torch/extension.h>
#include <vector>
// #include <stdio.h> // for debug
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "forward.h"
#define THREADS_PER_BLOCK 1024
#define SIGN(x) ((x < 0.0f) ? -1.0f : 1.0f)
#define ABS(x) ((x < 0.0f) ? -x : x)
#define PLUS_1(x) (x + 1)
// definitions of CUDA kernels (executed on: GPU)
template <typename scalar_t>
__global__ void triangular_forward_pmf_cuda_kernel(
scalar_t * const __restrict__ pmf,
const scalar_t * __restrict__ x_in,
const int64_t len_x,
const scalar_t * __restrict__ t,
const int64_t len_t,
const scalar_t * __restrict__ mi,
const scalar_t * __restrict__ sigma,
const scalar_t * __restrict__ training
)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
if (ix < len_x)
{
// pre-compute row offset from the beginning of the `pmf` array
int row_offset = ix * PLUS_1(len_t);
// compute shifted thresholds
for (int it = 0; it < len_t; ++it)
{
pmf[row_offset + it + 1] = x_in[ix] - *mi - t[it];
}
// compute CDF
for (int it = 0; it < PLUS_1(len_t); ++it)
{
if (it == 0)
{
pmf[row_offset + it] = 1.0f;
}
else
{
if (*training && (*sigma != 0.0f))
{
scalar_t sigma_inv = 1.0 / (*sigma);
scalar_t shifted_x_minus_t_over_s = pmf[row_offset + it] * sigma_inv;
scalar_t cdf = ((ABS(shifted_x_minus_t_over_s) < 1.0f) ? (shifted_x_minus_t_over_s * (2 - ABS(shifted_x_minus_t_over_s))) : SIGN(shifted_x_minus_t_over_s));
pmf[row_offset + it] = (cdf + 1.0f) / 2;
}
else
{
pmf[row_offset + it] = (scalar_t) (pmf[row_offset + it] >= 0.0f);
}
}
}
// compute the probability mass in each bin
for (int iq = 0; iq < PLUS_1(len_t) - 1; ++iq)
{
pmf[row_offset + iq] = pmf[row_offset + iq] - pmf[row_offset + iq + 1];
}
// the last bin (with index `row_offset + len_t`) would have mass `pmf[row_offset + len_t] - 0.0f`, so it's not necessary to compute it!
}
else // I am out of bounds!
{
return;
}
}
template <typename scalar_t>
__global__ void triangular_backward_cuda_kernel(
scalar_t * const __restrict__ grad_out,
const scalar_t * __restrict__ grad_in,
const scalar_t * __restrict__ x_in,
const int64_t len_x,
const scalar_t * __restrict__ q,
const scalar_t * __restrict__ t,
const int64_t len_t,
const scalar_t * __restrict__ mi,
const scalar_t * __restrict__ sigma
)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
if (ix < len_x)
{
scalar_t sum = 0.0f;
for (int it = 0; it < len_t; ++it)
{
// input position relative to the threshold
scalar_t shifted_x_minus_t = x_in[ix] - t[it] - *mi;
// the derivative of the expected (i.e., regularised) step function is the PDF of the triangular distribution
scalar_t pdf;
if (*sigma != 0.0f)
{
scalar_t sigma_inv = 1.0f / (*sigma);
scalar_t abs_shifted_x_minus_t_over_s = ABS(shifted_x_minus_t * sigma_inv);
pdf = ((abs_shifted_x_minus_t_over_s > 1.0f) ? 0.0f : (1.0f - abs_shifted_x_minus_t_over_s) * sigma_inv);
}
else
{
pdf = 0.0f; // no noise, no gradient!
}
// dilate and accumulate expected derivative
scalar_t dq = q[it + 1] - q[it];
sum += dq * pdf;
}
// compose gradients
grad_out[ix] = sum * grad_in[ix];
}
else // I am out of bounds!
{
return;
}
}
// definitions of C++\CUDA interface (executed on: CPU)
// goals:
// * allocate GPU memory for the output;
// * define the parameters for the GPU kernel;
// * call the kernel;
torch::Tensor triangular_forward_cuda_dispatch(
torch::Tensor x_in,
torch::Tensor q,
torch::Tensor t,
torch::Tensor mi,
torch::Tensor sigma,
torch::Tensor strategy,
torch::Tensor training
)
{
auto x_out = torch::zeros_like(x_in);
auto pmf = torch::zeros({x_in.numel(), PLUS_1(t.numel())}, torch::TensorOptions().dtype(x_in.dtype()).device(x_in.device()));
const dim3 blocks((x_in.numel() + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK);
// compute PMF over bins (i.e., the quantization levels)
AT_DISPATCH_FLOATING_TYPES(
x_in.type(),
"triangular_forward_pmf_cuda_kernel",
([&] {
hipLaunchKernelGGL(( triangular_forward_pmf_cuda_kernel<scalar_t>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0,
pmf.data_ptr<scalar_t>(),
x_in.data_ptr<scalar_t>(),
x_in.numel(),
t.data_ptr<scalar_t>(),
t.numel(),
mi.data_ptr<scalar_t>(),
sigma.data_ptr<scalar_t>(),
training.data_ptr<scalar_t>()
);
})
);
switch(strategy.item<int32_t>()) // how to read tensor's content using the C++ API: https://stackoverflow.com/a/54208912
{
case 0: // expectation
AT_DISPATCH_FLOATING_TYPES(
x_in.type(),
"triangular_forward_expectation_cuda_kernel",
([&] {
hipLaunchKernelGGL(( forward_expectation_cuda_kernel<scalar_t>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0,
x_out.data_ptr<scalar_t>(),
pmf.data_ptr<scalar_t>(),
x_in.numel(),
q.data_ptr<scalar_t>(),
t.numel()
);
})
);
break;
case 1: // argmax sampling (i.e., mode)
AT_DISPATCH_FLOATING_TYPES(
x_in.type(),
"triangular_forward_mode_cuda_kernel",
([&] {
hipLaunchKernelGGL(( forward_mode_cuda_kernel<scalar_t>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0,
x_out.data_ptr<scalar_t>(),
pmf.data_ptr<scalar_t>(),
x_in.numel(),
q.data_ptr<scalar_t>(),
t.numel()
);
})
);
break;
case 2: // random sampling
auto us = torch::rand_like(x_in);
AT_DISPATCH_FLOATING_TYPES(
x_in.type(),
"triangular_forward_random_cuda_kernel",
([&] {
hipLaunchKernelGGL(( forward_random_cuda_kernel<scalar_t>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0,
x_out.data_ptr<scalar_t>(),
us.data_ptr<scalar_t>(),
pmf.data_ptr<scalar_t>(),
x_in.numel(),
q.data_ptr<scalar_t>(),
t.numel()
);
})
);
break;
}
return x_out;
}
torch::Tensor triangular_backward_cuda_dispatch(
torch::Tensor grad_in,
torch::Tensor x_in,
torch::Tensor q,
torch::Tensor t,
torch::Tensor mi,
torch::Tensor sigma
)
{
auto grad_out = torch::zeros_like(x_in);
const dim3 blocks((x_in.numel() + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK);
AT_DISPATCH_FLOATING_TYPES(
x_in.type(),
"triangular_backward_cuda_kernel",
([&] {
hipLaunchKernelGGL(( triangular_backward_cuda_kernel<scalar_t>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0,
grad_out.data_ptr<scalar_t>(),
grad_in.data_ptr<scalar_t>(),
x_in.data_ptr<scalar_t>(),
x_in.numel(),
q.data_ptr<scalar_t>(),
t.data_ptr<scalar_t>(),
t.numel(),
mi.data_ptr<scalar_t>(),
sigma.data_ptr<scalar_t>()
);
})
);
return grad_out;
}
| 95c8e3b9b8f13c526970d95f4007b558b6b6c29d.cu | /*
* triangular_cuda_kernel.cu
*
* Author(s):
* Matteo Spallanzani <[email protected]>
*
* Copyright (c) 2020-2021 ETH Zurich.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <torch/extension.h>
#include <vector>
// #include <stdio.h> // for debug
#include <cuda.h>
#include <cuda_runtime.h>
#include "forward.h"
#define THREADS_PER_BLOCK 1024
#define SIGN(x) ((x < 0.0f) ? -1.0f : 1.0f)
#define ABS(x) ((x < 0.0f) ? -x : x)
#define PLUS_1(x) (x + 1)
// definitions of CUDA kernels (executed on: GPU)
template <typename scalar_t>
__global__ void triangular_forward_pmf_cuda_kernel(
scalar_t * const __restrict__ pmf,
const scalar_t * __restrict__ x_in,
const int64_t len_x,
const scalar_t * __restrict__ t,
const int64_t len_t,
const scalar_t * __restrict__ mi,
const scalar_t * __restrict__ sigma,
const scalar_t * __restrict__ training
)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
if (ix < len_x)
{
// pre-compute row offset from the beginning of the `pmf` array
int row_offset = ix * PLUS_1(len_t);
// compute shifted thresholds
for (int it = 0; it < len_t; ++it)
{
pmf[row_offset + it + 1] = x_in[ix] - *mi - t[it];
}
// compute CDF
for (int it = 0; it < PLUS_1(len_t); ++it)
{
if (it == 0)
{
pmf[row_offset + it] = 1.0f;
}
else
{
if (*training && (*sigma != 0.0f))
{
scalar_t sigma_inv = 1.0 / (*sigma);
scalar_t shifted_x_minus_t_over_s = pmf[row_offset + it] * sigma_inv;
scalar_t cdf = ((ABS(shifted_x_minus_t_over_s) < 1.0f) ? (shifted_x_minus_t_over_s * (2 - ABS(shifted_x_minus_t_over_s))) : SIGN(shifted_x_minus_t_over_s));
pmf[row_offset + it] = (cdf + 1.0f) / 2;
}
else
{
pmf[row_offset + it] = (scalar_t) (pmf[row_offset + it] >= 0.0f);
}
}
}
// compute the probability mass in each bin
for (int iq = 0; iq < PLUS_1(len_t) - 1; ++iq)
{
pmf[row_offset + iq] = pmf[row_offset + iq] - pmf[row_offset + iq + 1];
}
// the last bin (with index `row_offset + len_t`) would have mass `pmf[row_offset + len_t] - 0.0f`, so it's not necessary to compute it!
}
else // I am out of bounds!
{
return;
}
}
template <typename scalar_t>
__global__ void triangular_backward_cuda_kernel(
scalar_t * const __restrict__ grad_out,
const scalar_t * __restrict__ grad_in,
const scalar_t * __restrict__ x_in,
const int64_t len_x,
const scalar_t * __restrict__ q,
const scalar_t * __restrict__ t,
const int64_t len_t,
const scalar_t * __restrict__ mi,
const scalar_t * __restrict__ sigma
)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
if (ix < len_x)
{
scalar_t sum = 0.0f;
for (int it = 0; it < len_t; ++it)
{
// input position relative to the threshold
scalar_t shifted_x_minus_t = x_in[ix] - t[it] - *mi;
// the derivative of the expected (i.e., regularised) step function is the PDF of the triangular distribution
scalar_t pdf;
if (*sigma != 0.0f)
{
scalar_t sigma_inv = 1.0f / (*sigma);
scalar_t abs_shifted_x_minus_t_over_s = ABS(shifted_x_minus_t * sigma_inv);
pdf = ((abs_shifted_x_minus_t_over_s > 1.0f) ? 0.0f : (1.0f - abs_shifted_x_minus_t_over_s) * sigma_inv);
}
else
{
pdf = 0.0f; // no noise, no gradient!
}
// dilate and accumulate expected derivative
scalar_t dq = q[it + 1] - q[it];
sum += dq * pdf;
}
// compose gradients
grad_out[ix] = sum * grad_in[ix];
}
else // I am out of bounds!
{
return;
}
}
// definitions of C++\CUDA interface (executed on: CPU)
// goals:
// * allocate GPU memory for the output;
// * define the parameters for the GPU kernel;
// * call the kernel;
torch::Tensor triangular_forward_cuda_dispatch(
torch::Tensor x_in,
torch::Tensor q,
torch::Tensor t,
torch::Tensor mi,
torch::Tensor sigma,
torch::Tensor strategy,
torch::Tensor training
)
{
auto x_out = torch::zeros_like(x_in);
auto pmf = torch::zeros({x_in.numel(), PLUS_1(t.numel())}, torch::TensorOptions().dtype(x_in.dtype()).device(x_in.device()));
const dim3 blocks((x_in.numel() + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK);
// compute PMF over bins (i.e., the quantization levels)
AT_DISPATCH_FLOATING_TYPES(
x_in.type(),
"triangular_forward_pmf_cuda_kernel",
([&] {
triangular_forward_pmf_cuda_kernel<scalar_t><<<blocks, THREADS_PER_BLOCK>>>(
pmf.data_ptr<scalar_t>(),
x_in.data_ptr<scalar_t>(),
x_in.numel(),
t.data_ptr<scalar_t>(),
t.numel(),
mi.data_ptr<scalar_t>(),
sigma.data_ptr<scalar_t>(),
training.data_ptr<scalar_t>()
);
})
);
switch(strategy.item<int32_t>()) // how to read tensor's content using the C++ API: https://stackoverflow.com/a/54208912
{
case 0: // expectation
AT_DISPATCH_FLOATING_TYPES(
x_in.type(),
"triangular_forward_expectation_cuda_kernel",
([&] {
forward_expectation_cuda_kernel<scalar_t><<<blocks, THREADS_PER_BLOCK>>>(
x_out.data_ptr<scalar_t>(),
pmf.data_ptr<scalar_t>(),
x_in.numel(),
q.data_ptr<scalar_t>(),
t.numel()
);
})
);
break;
case 1: // argmax sampling (i.e., mode)
AT_DISPATCH_FLOATING_TYPES(
x_in.type(),
"triangular_forward_mode_cuda_kernel",
([&] {
forward_mode_cuda_kernel<scalar_t><<<blocks, THREADS_PER_BLOCK>>>(
x_out.data_ptr<scalar_t>(),
pmf.data_ptr<scalar_t>(),
x_in.numel(),
q.data_ptr<scalar_t>(),
t.numel()
);
})
);
break;
case 2: // random sampling
auto us = torch::rand_like(x_in);
AT_DISPATCH_FLOATING_TYPES(
x_in.type(),
"triangular_forward_random_cuda_kernel",
([&] {
forward_random_cuda_kernel<scalar_t><<<blocks, THREADS_PER_BLOCK>>>(
x_out.data_ptr<scalar_t>(),
us.data_ptr<scalar_t>(),
pmf.data_ptr<scalar_t>(),
x_in.numel(),
q.data_ptr<scalar_t>(),
t.numel()
);
})
);
break;
}
return x_out;
}
torch::Tensor triangular_backward_cuda_dispatch(
torch::Tensor grad_in,
torch::Tensor x_in,
torch::Tensor q,
torch::Tensor t,
torch::Tensor mi,
torch::Tensor sigma
)
{
auto grad_out = torch::zeros_like(x_in);
const dim3 blocks((x_in.numel() + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK);
AT_DISPATCH_FLOATING_TYPES(
x_in.type(),
"triangular_backward_cuda_kernel",
([&] {
triangular_backward_cuda_kernel<scalar_t><<<blocks, THREADS_PER_BLOCK>>>(
grad_out.data_ptr<scalar_t>(),
grad_in.data_ptr<scalar_t>(),
x_in.data_ptr<scalar_t>(),
x_in.numel(),
q.data_ptr<scalar_t>(),
t.data_ptr<scalar_t>(),
t.numel(),
mi.data_ptr<scalar_t>(),
sigma.data_ptr<scalar_t>()
);
})
);
return grad_out;
}
|
a08ec2df220d13bdbbd1d47faf7ca9c451a9a5f7.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <algorithm>
#include <hip/hip_runtime.h>
struct Task {
uint id;
float time;
Task(uint id, float time) {
this->id = id;
this->time = time;
}
Task() {
this->id = 0;
this->time = 0;
}
bool operator() (Task i,Task j) { return (i.time < j.time); }
};
struct Machine {
int id;
float cost;
Machine() {
this->id = 0;
this->cost = 0;
}
bool operator() (Machine i,Machine j) { return (i.cost < j.cost); }
};
void min_min(Task* tasks, float* completion_times, int* task_map, bool* task_scheduled, Machine* machines,
int t, int m, int max_time) {
uint count = 0;
uint q = 0;
while(count < t) {
float current_time = 0;
uint j = machines[q].id;
uint task_id = 0;
for (int k = 0; k < t; k++) {
bool valid_task = false;
float min_value = std::numeric_limits<float>::max();
for (int i = 0; i < t; i++) {
int id = tasks[j * t + i].id;
current_time = completion_times[j] + tasks[j * t + i].time;
if(current_time > max_time){
continue;
}
if (!task_scheduled[id]) {
if(tasks[j * t + i].time < min_value) {
task_id = id;
min_value = current_time;
valid_task = true;
}
}
}
if(valid_task){
task_scheduled[task_id] = true;
task_map[task_id] = j;
completion_times[j] = min_value;
count++;
}
}
q++;
if(q == m && count != t) {
printf("### ERROR ###\n");
}
}
}
void machine_sorting(Machine* machines, int m) {
std::stable_sort (&machines[0], &machines[0]+m, Machine());
}
void segmented_sorting(Task* tasks, int m, int t) {
for(int i = 0; i < m; i++) {
int j = i*t;
std::stable_sort (&tasks[j], &tasks[j]+t, Task());
}
}
template<typename T>
void print(T* vec, uint t, uint m) {
std::cout << "\n";
for (uint i = 0; i < t; i++) {
for (uint j = 0; j < m; j++) {
std::cout << vec[i * m + j] << " ";
}
std::cout << "\n";
}
}
template<typename T>
void print(T* vec, uint t) {
std::cout << "\n";
for (uint i = 0; i < t; i++) {
std::cout << vec[i] << " ";
}
std::cout << "\n";
}
void print(Task* vec, uint t, uint m) {
std::cout << "\n";
for (uint j = 0; j < m; j++) {
for (uint i = 0; i < t; i++) {
std::cout << "id=" << vec[j * t + i].id << " time="
<< vec[j * t + i].time << "\t";
}
std::cout << "\n";
}
}
void print(Machine* vec, uint m) {
std::cout << "\n";
for (uint j = 0; j < m; j++) {
std::cout << "id=" << vec[j].id << " time="
<< vec[j].cost << "\t";
}
std::cout << "\n";
}
void print(float* completion_times, Machine* vec, uint m) {
float sum = 0;
for (uint j = 0; j < m; j++) {
uint id = vec[j].id;
float cost = vec[j].cost * completion_times[id];
std::cout << vec[j].cost << " * " << completion_times[id] << " = " << cost << "\n";
sum += cost;
}
std::cout << "Custo Total: " << sum << "\n";
}
int main(int argc, char **argv) {
int t, m;
float max_time, aux;
aux = scanf("%d", &t);
aux = scanf("%d", &m);
aux = scanf("%f", &max_time);
//std::cout << "t=" << t << " m=" << m << "\n";
Task *tasks = (Task *) malloc(sizeof(Task) * (t * m));
bool *task_scheduled = (bool *) malloc(sizeof(bool) * t);
int *task_map = (int *) malloc(sizeof(int) * (t));
float *completion_times = (float *) malloc(sizeof(float) * (m));
Machine *machines = (Machine *) malloc(sizeof(Machine) * (m));
// Read matrix task machine
for (int i = 0; i < t; i++) {
for (int j = 0; j < m; j++) {
int a = scanf("%f", &aux);
tasks[j * t + i].id = i;
tasks[j * t + i].time = aux;
completion_times[j] = 0;
}
task_map[i] = -1;
task_scheduled[i] = false;
}
//print(tasks, t, m);
// Reading vector of costs for each machine
for (int j = 0; j < m; j++) {
int a = scanf("%f", &aux);
machines[j].id = j;
machines[j].cost = aux;
}
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
machine_sorting(machines, m);
//print(machines, m);
min_min(tasks, completion_times, task_map, task_scheduled, machines, t, m, max_time);
hipEventRecord(stop);
if (ELAPSED_TIME == 1) {
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
std::cout << milliseconds << "\n";
}
else {
//print(tasks, t, m);
//print(completion_times, m);
print(completion_times, machines, m);
//print(task_scheduled, t);
//print(task_map, t, m);
}
free(task_scheduled);
free(task_map);
free(tasks);
free(completion_times);
return 0;
}
| a08ec2df220d13bdbbd1d47faf7ca9c451a9a5f7.cu | /**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <algorithm>
#include <cuda.h>
struct Task {
uint id;
float time;
Task(uint id, float time) {
this->id = id;
this->time = time;
}
Task() {
this->id = 0;
this->time = 0;
}
bool operator() (Task i,Task j) { return (i.time < j.time); }
};
struct Machine {
int id;
float cost;
Machine() {
this->id = 0;
this->cost = 0;
}
bool operator() (Machine i,Machine j) { return (i.cost < j.cost); }
};
void min_min(Task* tasks, float* completion_times, int* task_map, bool* task_scheduled, Machine* machines,
int t, int m, int max_time) {
uint count = 0;
uint q = 0;
while(count < t) {
float current_time = 0;
uint j = machines[q].id;
uint task_id = 0;
for (int k = 0; k < t; k++) {
bool valid_task = false;
float min_value = std::numeric_limits<float>::max();
for (int i = 0; i < t; i++) {
int id = tasks[j * t + i].id;
current_time = completion_times[j] + tasks[j * t + i].time;
if(current_time > max_time){
continue;
}
if (!task_scheduled[id]) {
if(tasks[j * t + i].time < min_value) {
task_id = id;
min_value = current_time;
valid_task = true;
}
}
}
if(valid_task){
task_scheduled[task_id] = true;
task_map[task_id] = j;
completion_times[j] = min_value;
count++;
}
}
q++;
if(q == m && count != t) {
printf("### ERROR ###\n");
}
}
}
void machine_sorting(Machine* machines, int m) {
std::stable_sort (&machines[0], &machines[0]+m, Machine());
}
void segmented_sorting(Task* tasks, int m, int t) {
for(int i = 0; i < m; i++) {
int j = i*t;
std::stable_sort (&tasks[j], &tasks[j]+t, Task());
}
}
template<typename T>
void print(T* vec, uint t, uint m) {
std::cout << "\n";
for (uint i = 0; i < t; i++) {
for (uint j = 0; j < m; j++) {
std::cout << vec[i * m + j] << " ";
}
std::cout << "\n";
}
}
template<typename T>
void print(T* vec, uint t) {
std::cout << "\n";
for (uint i = 0; i < t; i++) {
std::cout << vec[i] << " ";
}
std::cout << "\n";
}
void print(Task* vec, uint t, uint m) {
std::cout << "\n";
for (uint j = 0; j < m; j++) {
for (uint i = 0; i < t; i++) {
std::cout << "id=" << vec[j * t + i].id << " time="
<< vec[j * t + i].time << "\t";
}
std::cout << "\n";
}
}
void print(Machine* vec, uint m) {
std::cout << "\n";
for (uint j = 0; j < m; j++) {
std::cout << "id=" << vec[j].id << " time="
<< vec[j].cost << "\t";
}
std::cout << "\n";
}
void print(float* completion_times, Machine* vec, uint m) {
float sum = 0;
for (uint j = 0; j < m; j++) {
uint id = vec[j].id;
float cost = vec[j].cost * completion_times[id];
std::cout << vec[j].cost << " * " << completion_times[id] << " = " << cost << "\n";
sum += cost;
}
std::cout << "Custo Total: " << sum << "\n";
}
int main(int argc, char **argv) {
int t, m;
float max_time, aux;
aux = scanf("%d", &t);
aux = scanf("%d", &m);
aux = scanf("%f", &max_time);
//std::cout << "t=" << t << " m=" << m << "\n";
Task *tasks = (Task *) malloc(sizeof(Task) * (t * m));
bool *task_scheduled = (bool *) malloc(sizeof(bool) * t);
int *task_map = (int *) malloc(sizeof(int) * (t));
float *completion_times = (float *) malloc(sizeof(float) * (m));
Machine *machines = (Machine *) malloc(sizeof(Machine) * (m));
// Read matrix task machine
for (int i = 0; i < t; i++) {
for (int j = 0; j < m; j++) {
int a = scanf("%f", &aux);
tasks[j * t + i].id = i;
tasks[j * t + i].time = aux;
completion_times[j] = 0;
}
task_map[i] = -1;
task_scheduled[i] = false;
}
//print(tasks, t, m);
// Reading vector of costs for each machine
for (int j = 0; j < m; j++) {
int a = scanf("%f", &aux);
machines[j].id = j;
machines[j].cost = aux;
}
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
machine_sorting(machines, m);
//print(machines, m);
min_min(tasks, completion_times, task_map, task_scheduled, machines, t, m, max_time);
cudaEventRecord(stop);
if (ELAPSED_TIME == 1) {
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
std::cout << milliseconds << "\n";
}
else {
//print(tasks, t, m);
//print(completion_times, m);
print(completion_times, machines, m);
//print(task_scheduled, t);
//print(task_map, t, m);
}
free(task_scheduled);
free(task_map);
free(tasks);
free(completion_times);
return 0;
}
|
12ee8b4ffcad4de6f3a82f1d4db1f5fe04b8da10.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdint>
#include <type_traits>
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <c10/hip/HIPMathCompat.h>
#include <c10/util/Exception.h>
#include <c10/util/bit_cast.h>
#include <c10/core/TensorImpl.h>
#include <ATen/native/nested/NestedTensorTransformerFunctions.h>
#include <ATen/native/nested/NestedTensorUtils.h>
#include <ATen/native/transformers/attention.h>
#include <ATen/native/transformers/hip/sdp_utils.h>
#include <ATen/native/transformers/sdp_utils_cpp.h>
#include <ATen/hip/HIPGeneratorImpl.h>
#ifdef USE_FLASH_ATTENTION
// FlashAttention Specific Imports
#include <ATen/native/transformers/hip/flash_attn/fmha_api.h>
#endif
#ifdef USE_MEM_EFF_ATTENTION
// MemoryEfficient Attention Specific Imports
#include <ATen/native/transformers/hip/mem_eff_attention/kernel_backward.h>
#include <ATen/native/transformers/hip/mem_eff_attention/kernels/cutlassB.h>
#include <ATen/native/transformers/hip/mem_eff_attention/gemm_kernel_utils.h>
#include <ATen/native/transformers/hip/mem_eff_attention/pytorch_utils.h>
#endif
namespace at {
namespace native {
std::tuple<Tensor, Tensor, Tensor> _flash_attention_backward(
const Tensor& grad_out,
const Tensor& query,
const Tensor& key,
const Tensor& value,
const Tensor& out,
const Tensor& logsumexp,
const Tensor& cumulative_sequence_length_q,
const Tensor& cumulative_sequence_length_k,
const int64_t max_seqlen_batch_q,
const int64_t max_seqlen_batch_k,
double dropout_p,
bool is_causal,
const Tensor& philox_seed,
const Tensor& philox_offset,
c10::optional<double> scale) {
#if defined(USE_FLASH_ATTENTION)
/*
num_splits determines how much to parallelize over the seqlen_q dimension
num_splits=0 means
it will be set by an internal heuristic. We're exposing num_splits mostly for
benchmarking. We will hard code it to 0 for now
*/
constexpr int num_splits{0};
const auto softmax_scale = sdp::calculate_scale(query, scale).as_float_unchecked();
// CUDA code assumes that dout is contiguous
auto contiguous_grad_out = grad_out.contiguous();
auto contiguous_out = out.contiguous();
Tensor dq = at::empty_like(query);
Tensor dk = at::empty_like(key);
Tensor dv = at::empty_like(value);
// The kernel computes irregadless we will drop for this functions return
Tensor grad_softmax;
std::tie(dq, dk, dv, grad_softmax) = pytorch_fmha::mha_bwd(
contiguous_grad_out,
query,
key,
value,
contiguous_out,
logsumexp,
dq,
dk,
dv,
cumulative_sequence_length_q,
cumulative_sequence_length_k,
max_seqlen_batch_q,
max_seqlen_batch_k,
dropout_p,
softmax_scale,
false, /*zero_tensors = false for all calls here*/
is_causal,
num_splits,
philox_seed,
philox_offset
);
return std::make_tuple(dq, dk, dv);
#endif
TORCH_CHECK(false, "USE_FLASH_ATTENTION was not enabled for build.");
return std::make_tuple(Tensor(), Tensor(), Tensor());
}
std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor>
_efficient_attention_backward(
const at::Tensor& grad_out_,
const at::Tensor& query,
const at::Tensor& key,
const at::Tensor& value,
const c10::optional<at::Tensor>& bias, // additive attention bias
const at::Tensor& out,
// (Mode 1MHK only) [b+1]: cu_seqlens_q[b] contains the
// position of the first query token for batch $b
const c10::optional<at::Tensor>& cu_seqlens_q,
// (Mode 1MHK only) [b+1]: cu_seqlens_k[b] contains the
// position of the first key token for batch $b
const c10::optional<at::Tensor>& cu_seqlens_k,
// (Mode 1MHK only) Maximum sequence length across batches
int64_t max_seqlen_q,
// (Mode 1MHK only) Maximum sequence length across batches
int64_t max_seqlen_k,
const at::Tensor& logsumexp,
double dropout_p, // dropout probability
const at::Tensor& philox_seed, // seed using for generating random numbers for dropout
const at::Tensor& philox_offset, // offset into random number sequence
int64_t custom_mask_type,
const bool bias_requires_grad,
const c10::optional<double> scale,
c10::optional <int64_t> num_splits_key) {
#if defined(USE_MEM_EFF_ATTENTION)
if (!grad_out_.defined()) {
return std::make_tuple(Tensor{}, Tensor{}, Tensor{}, Tensor{});
}
// ndim
TORCH_CHECK(query.dim() == grad_out_.dim());
TORCH_CHECK(query.dim() == key.dim());
TORCH_CHECK(query.dim() == value.dim());
TORCH_CHECK(query.dim() == 4);
// batch size
TORCH_CHECK(query.size(0) == grad_out_.size(0));
TORCH_CHECK(query.size(0) == key.size(0));
TORCH_CHECK(query.size(0) == value.size(0));
// seqlen
TORCH_CHECK(key.size(1) == value.size(1));
TORCH_CHECK(query.size(1) == grad_out_.size(1));
// Num heads
TORCH_CHECK(query.size(2) == key.size(2));
TORCH_CHECK(query.size(2) == value.size(2));
TORCH_CHECK(query.size(2) == grad_out_.size(2));
// Embedding per head
TORCH_CHECK(query.size(3) == key.size(3));
TORCH_CHECK(value.size(3) == grad_out_.size(3));
// handle potentially non-contiguous grad_out through a copy
auto grad_out = grad_out_.contiguous();
CHECK_NOSPARSE_CONTIGUOUS_CUDA(grad_out);
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(query);
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(key);
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(value);
TORCH_CHECK(cu_seqlens_q.has_value() == cu_seqlens_k.has_value());
TORCH_CHECK(
!(cu_seqlens_q.has_value() && bias.has_value()),
"cu seqlen + bias not supported");
if (cu_seqlens_q.has_value()) {
TORCH_CHECK(cu_seqlens_q->scalar_type() == at::ScalarType::Int);
TORCH_CHECK(cu_seqlens_k->scalar_type() == at::ScalarType::Int);
TORCH_CHECK(cu_seqlens_q->dim() == 1 && cu_seqlens_k->dim() == 1);
CHECK_NOSPARSE_CONTIGUOUS_CUDA((*cu_seqlens_q));
CHECK_NOSPARSE_CONTIGUOUS_CUDA((*cu_seqlens_k));
TORCH_CHECK(cu_seqlens_q->size(0) == cu_seqlens_k->size(0));
TORCH_CHECK(query.size(0) == 1, "cu_seqlen only supports batch_size=1");
TORCH_CHECK(max_seqlen_q > 0, "max_seqlen_q required with `cu_seqlens_q`");
TORCH_CHECK(max_seqlen_k > 0, "max_seqlen_q required with `cu_seqlens_q`");
TORCH_CHECK(
max_seqlen_k <= key.size(1), "Invalid max_seqlen_k:", max_seqlen_k);
TORCH_CHECK(
max_seqlen_q <= query.size(1), "Invalid max_seqlen_q:", max_seqlen_q);
} else {
max_seqlen_q = query.size(1);
max_seqlen_k = key.size(1);
}
at::hip::HIPGuardMasqueradingAsCUDA device_guard(query.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
int64_t B = query.size(0);
int64_t M = query.size(1);
int64_t N = key.size(1);
int64_t nH = query.size(2);
int64_t K = query.size(3);
int64_t Kv = value.size(3);
at::Tensor grad_q, grad_k, grad_v, grad_bias;
grad_q = at::empty(query.sizes(), query.options());
grad_k = at::empty(key.sizes(), key.options());
grad_v = at::empty(value.sizes(), value.options());
if (bias_requires_grad) {
// force alignment for the last dim
std::vector<int64_t> sz = bias->sizes().vec();
int64_t lastDim = sz[sz.size() - 1];
int64_t alignTo = 16;
sz[sz.size() - 1] = alignTo * ((lastDim + alignTo - 1) / alignTo);
grad_bias = at::empty(sz, bias->options())
.slice(/*dim=*/-1, /*start=*/0, /*end=*/lastDim);
}
at::Tensor workspace;
const bool use_dropout = std::fpclassify(dropout_p) != FP_ZERO;
// See Note [Seed and Offset Device]
at::PhiloxCudaState rng_engine_inputs;
if (use_dropout) {
if (at::cuda::currentStreamCaptureStatus() ==
at::cuda::CaptureStatus::None) {
rng_engine_inputs = at::PhiloxCudaState(
*philox_seed.data_ptr<int64_t>(),
*philox_offset.data_ptr<int64_t>());
} else { // dropout + capture
rng_engine_inputs = at::PhiloxCudaState(
philox_seed.data_ptr<int64_t>(),
philox_offset.data_ptr<int64_t>(),
0);
}
}
hipDeviceProp_t* p = at::cuda::getDeviceProperties(query.device().index());
const int computeCapability = p->major * 10 + p->minor;
bool kernel_launched = false;
const auto maxK = ::max(query.size(3), value.size(3));
const auto maxShmem = p->sharedMemPerBlockOptin;
auto launchKernel = [&](auto _k, auto kernel_fn) {
using Kernel = decltype(_k);
using scalar_t = typename Kernel::scalar_t;
(void)_k;
if (kernel_launched) {
return;
}
// Check if this kernel is compatible
if (Kernel::kMaxK < maxK) {
return;
}
// Dropout must be supported if we need it
if (use_dropout && !Kernel::kApplyDropout) {
return;
}
if (Kernel::kKeysQueriesAlignedToBlockSize &&
(cu_seqlens_q.has_value() || M % Kernel::kBlockSizeI ||
N % Kernel::kBlockSizeJ)) {
return;
}
// Alignment
if ((query.stride(2) % Kernel::kMinimumAlignment) ||
(key.stride(2) % Kernel::kMinimumAlignment) ||
(value.stride(2) % Kernel::kMinimumAlignment)) {
return;
}
// Uses too much shmem
size_t smem_bytes = sizeof(typename Kernel::SharedStorage);
if (smem_bytes > maxShmem) {
return;
}
kernel_launched = true;
// TODO: Fuse this into a kernel?
// This is a bottleneck for smaller sequences (M <= 128)
auto delta = Kernel::kKernelComputesDelta
? at::empty({B, nH, M}, query.options().dtype(at::ScalarType::Float))
: (grad_out.to(at::kFloat) * out.to(at::kFloat))
.sum(-1)
.transpose(-2, -1)
.contiguous();
TORCH_INTERNAL_ASSERT(delta.size(0) == B);
TORCH_INTERNAL_ASSERT(delta.size(1) == nH);
TORCH_INTERNAL_ASSERT(delta.size(2) == M);
typename Kernel::Params p;
p.query_ptr = (scalar_t*)query.data_ptr();
p.key_ptr = (scalar_t*)key.data_ptr();
p.value_ptr = (scalar_t*)value.data_ptr();
p.logsumexp_ptr = (typename Kernel::lse_scalar_t*)logsumexp.data_ptr();
p.output_ptr = (scalar_t*)out.data_ptr();
p.grad_output_ptr = (scalar_t*)grad_out.data_ptr();
p.grad_query_ptr = (scalar_t*)grad_q.data_ptr();
p.grad_key_ptr = (scalar_t*)grad_k.data_ptr();
p.grad_value_ptr = (scalar_t*)grad_v.data_ptr();
p.delta_ptr = (float*)delta.data_ptr();
p.head_dim = query.size(3);
p.head_dim_value = value.size(3);
p.num_queries = max_seqlen_q;
p.num_keys = max_seqlen_k;
p.num_batches = cu_seqlens_q.has_value() ? cu_seqlens_q->size(0) - 1 : B;
p.num_heads = nH;
p.custom_mask_type = custom_mask_type;
p.scale = sdp::calculate_scale(query, scale).as_float_unchecked();
if (cu_seqlens_q.has_value()) {
p.cu_seqlens_q_ptr = (int32_t*)cu_seqlens_q->data_ptr();
p.cu_seqlens_k_ptr = (int32_t*)cu_seqlens_k->data_ptr();
}
ASSIGN_CHECK_OVERFLOW(p.lse_strideB, logsumexp.stride(0));
ASSIGN_CHECK_OVERFLOW(p.lse_strideH, logsumexp.stride(1));
ASSIGN_CHECK_OVERFLOW(p.gO_strideB, grad_out.stride(0));
ASSIGN_CHECK_OVERFLOW(p.gO_strideM, grad_out.stride(1));
ASSIGN_CHECK_OVERFLOW(p.gO_strideH, grad_out.stride(2));
ASSIGN_CHECK_OVERFLOW(p.o_strideB, out.stride(0));
ASSIGN_CHECK_OVERFLOW(p.o_strideH, out.stride(2));
ASSIGN_CHECK_OVERFLOW(p.gQ_strideB, grad_q.stride(0));
ASSIGN_CHECK_OVERFLOW(p.gK_strideB, grad_k.stride(0));
ASSIGN_CHECK_OVERFLOW(p.gV_strideB, grad_v.stride(0));
ASSIGN_CHECK_OVERFLOW(p.gQ_strideH, grad_q.stride(2));
ASSIGN_CHECK_OVERFLOW(p.gK_strideH, grad_k.stride(2));
ASSIGN_CHECK_OVERFLOW(p.gV_strideH, grad_v.stride(2));
// We removed the chunk/cat optimization and the multiplier is always 1
p.gQKV_strideM_multiplier = 1;
TORCH_INTERNAL_ASSERT(p.gQ_strideM() == grad_q.stride(1));
TORCH_INTERNAL_ASSERT(p.gK_strideM() == grad_k.stride(1));
TORCH_INTERNAL_ASSERT(p.gV_strideM() == grad_v.stride(1));
ASSIGN_CHECK_OVERFLOW(p.q_strideB, query.stride(0));
ASSIGN_CHECK_OVERFLOW(p.k_strideB, key.stride(0));
ASSIGN_CHECK_OVERFLOW(p.v_strideB, value.stride(0));
ASSIGN_CHECK_OVERFLOW(p.q_strideM, query.stride(1));
ASSIGN_CHECK_OVERFLOW(p.k_strideM, key.stride(1));
ASSIGN_CHECK_OVERFLOW(p.v_strideM, value.stride(1));
ASSIGN_CHECK_OVERFLOW(p.q_strideH, query.stride(2));
ASSIGN_CHECK_OVERFLOW(p.k_strideH, key.stride(2));
ASSIGN_CHECK_OVERFLOW(p.v_strideH, value.stride(2));
ASSIGN_CHECK_OVERFLOW(p.delta_strideB, delta.stride(0));
ASSIGN_CHECK_OVERFLOW(p.delta_strideH, delta.stride(1));
if (bias.has_value()) {
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA((*bias));
TORCH_CHECK(
bias->scalar_type() == CutlassToAtenDtype<scalar_t>::atScalarType(),
"invalid dtype for bias - should match query's dtype");
p.bias_ptr = (scalar_t*)bias->data_ptr();
// assign strides for bias, viewed as:
// (batch_sz, n_heads, n_queries, n_keys)
// We make sure to expand prior to calling the kernel
const at::Tensor& bias_4d_view = *bias;
TORCH_CHECK(bias_4d_view.dim()==4);
TORCH_CHECK(bias_4d_view.size(0)==B);
TORCH_CHECK(bias_4d_view.size(1)==nH);
TORCH_CHECK(bias_4d_view.size(2)==M);
TORCH_CHECK(bias_4d_view.size(3)==N);
ASSIGN_CHECK_OVERFLOW(p.bias_strideB, bias_4d_view.stride(0));
ASSIGN_CHECK_OVERFLOW(p.bias_strideH, bias_4d_view.stride(1));
ASSIGN_CHECK_OVERFLOW(p.bias_strideM, bias_4d_view.stride(2));
if (bias_requires_grad) {
p.grad_bias_ptr = (scalar_t*)grad_bias.data_ptr();
// assign strides for gB, viewed as
// (batch_sz, n_heads, n_queries, n_keys). might have different strides
// than B, for example if bias tensor was created with
// torch.tensor((B * nH, 1, nK)).expand((B * nH, nQ, nK)),
// different values of Q will point to the same memory
// locations, meaning bias.stride(1) == 0, while we'd want
// grad_bias.stride(1) == nK
// We have expanded the input prior to calling the forward kernel
const at::Tensor& grad_bias_4d_view = grad_bias;
TORCH_CHECK(grad_bias_4d_view.dim()==4);
TORCH_CHECK(grad_bias_4d_view.size(0)==B);
TORCH_CHECK(grad_bias_4d_view.size(1)==nH);
TORCH_CHECK(grad_bias_4d_view.size(2)==M);
TORCH_CHECK(grad_bias_4d_view.size(3)==N);
ASSIGN_CHECK_OVERFLOW(p.gB_strideB, grad_bias_4d_view.stride(0));
ASSIGN_CHECK_OVERFLOW(p.gB_strideH, grad_bias_4d_view.stride(1));
ASSIGN_CHECK_OVERFLOW(p.gB_strideM, grad_bias_4d_view.stride(2));
}
}
if (use_dropout) {
p.rng_engine_inputs = rng_engine_inputs;
p.dropout_prob = dropout_p;
}
// Heuristic for finding optimal number of splits
auto parallelism_without_split_key =
p.getBlocksGrid().x * p.getBlocksGrid().y * p.getBlocksGrid().z;
p.num_splits_key = cutlass::ceil_div(p.num_keys, Kernel::kBlockSizeJ);
if (num_splits_key.has_value()) { // Skip heuristic, if user provided an explicit value
p.num_splits_key = std::max<int64_t>(p.num_splits_key, num_splits_key.value());
// If we already have enough parallelism, split-keys can help
// better use L2 cache.
// This is negligible when the seqlen is too small tho
if (parallelism_without_split_key >= 256 &&
p.num_keys <= 2 * Kernel::kBlockSizeJ) {
p.num_splits_key = 1;
}
// Increasing `split_keys` leads to using more gmem for temporary storage
// when we need a staging area for gK/gV. let's avoid that
if (Kernel::kNeedsAccumGradK || Kernel::kNeedsAccumGradV) {
p.num_splits_key = ::min(
int(p.num_splits_key), 200 / (p.num_batches * p.num_heads));
}
}
if (!Kernel::kEnableSplitKeys || p.num_splits_key < 1) {
p.num_splits_key = 1;
}
auto& ctx = at::globalContext();
if (ctx.deterministicAlgorithms()) {
if (ctx.deterministicAlgorithmsWarnOnly()) {
TORCH_WARN_ONCE(
"Memory Efficient attention defaults to a non-deterministic algorithm. ",
"To explicitly enable determinism call torch.use_deterministic_algorithms(True, warn_only=False).");
} else {
TORCH_CHECK(
num_splits_key.value_or(1) <= 1,
"Using `num_splits_key > 1` makes the algorithm non-deterministic, and pytorch's deterministic mode is enabled");
p.num_splits_key = 1;
}
}
int64_t size_bytes = p.workspace_size();
if (size_bytes) {
workspace =
at::empty({size_bytes}, query.options().dtype(at::ScalarType::Byte));
p.workspace = (float*)workspace.data_ptr();
if (p.should_zero_workspace()) {
workspace.zero_();
}
}
Kernel::check_supported(p);
if (smem_bytes > 0xc000) {
// https://docs.nvidia.com/cuda/cuda-c-programming-guide/#features-and-technical-specifications-technical-specifications-per-compute-capability
auto err = hipFuncSetAttribute(
kernel_fn, hipFuncAttributeMaxDynamicSharedMemorySize, smem_bytes);
TORCH_CHECK(
err != hipErrorInvalidValue,
"This GPU does not have enough shared-memory (kernel requires ",
smem_bytes / 1024,
" kb)");
AT_CUDA_CHECK(err);
}
// second syntax resulted in the error below on windows
// error C3495: 'kernel_fn': a simple capture must be a variable
// with automatic storage duration declared
// in the reaching scope of the lambda
#ifdef _WIN32
hipFuncAttributes attr;
AT_CUDA_CHECK(hipFuncGetAttributes(&attr, kernel_fn));
TORCH_INTERNAL_ASSERT(
attr.binaryVersion >= Kernel::ArchTag::kMinComputeCapability,
"Something went wrong in the build process");
#else
auto checkBinaryArchMatches = [&]() {
hipFuncAttributes attr;
AT_CUDA_CHECK(hipFuncGetAttributes(&attr, kernel_fn));
return attr.binaryVersion >= Kernel::ArchTag::kMinComputeCapability;
};
TORCH_INTERNAL_ASSERT(
checkBinaryArchMatches(), "Something went wrong in the build process");
#endif
hipLaunchKernelGGL(( kernel_fn), dim3(p.getBlocksGrid()), dim3(p.getThreadsGrid()), smem_bytes, stream, p);
};
DISPATCH_TYPES(query, ([&]() {
dispatch_cutlassB<scalar_t>(launchKernel, computeCapability);
}));
TORCH_CHECK(kernel_launched, "cutlassB: no kernel found to launch!");
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(grad_q, grad_k, grad_v, grad_bias);
#endif
TORCH_CHECK(false, "USE_MEM_EFF_ATTENTION was not enabled for build.")
return std::make_tuple(Tensor{}, Tensor{}, Tensor{}, Tensor{});
}
std::tuple<at::Tensor, at::Tensor, at::Tensor> _scaled_dot_product_flash_attention_backward_cuda(
const at::Tensor& grad_out_,
const at::Tensor& query,
const at::Tensor& key,
const at::Tensor& value,
const at::Tensor& out,
const at::Tensor& logsumexp,
const Tensor& cumulative_sequence_length_q,
const Tensor& cumulative_sequence_length_k,
const int64_t max_seqlen_batch_q,
const int64_t max_seqlen_batch_k,
double dropout_p,
bool is_causal,
const at::Tensor& philox_seed,
const at::Tensor& philox_offset,
c10::optional<double> scale){
if (!grad_out_.defined()) {
return std::make_tuple(Tensor{}, Tensor{}, Tensor{});
}
const int64_t batch_size = query.size(0);
const int64_t num_heads = query.size(1);
const int64_t head_dim = query.size(3);
Tensor q_t = query.transpose(1, 2);
Tensor k_t = key.transpose(1, 2);
Tensor v_t = value.transpose(1, 2);
int64_t Nnz_q{batch_size * max_seqlen_batch_q};
int64_t Nnz_kv{batch_size * max_seqlen_batch_k};
// For the standard MHA these will actually be views
Tensor query_reshaped = q_t.reshape({Nnz_q, num_heads, head_dim});
Tensor key_reshaped = k_t.reshape({Nnz_kv, num_heads, head_dim});
Tensor value_reshaped = v_t.reshape({Nnz_kv, num_heads, head_dim});
auto grad_out_reshaped = grad_out_.transpose(1,2).reshape({{Nnz_q, num_heads, head_dim}});
auto out_reshaped = out.transpose(1,2).reshape({Nnz_q, num_heads, head_dim});
Tensor grad_q, grad_k, grad_v;
std::tie(grad_q, grad_k, grad_v) = at::_flash_attention_backward(
grad_out_reshaped,
query_reshaped,
key_reshaped,
value_reshaped,
out_reshaped,
logsumexp,
cumulative_sequence_length_q,
cumulative_sequence_length_k,
max_seqlen_batch_q,
max_seqlen_batch_k,
dropout_p,
is_causal,
philox_seed,
philox_offset,
scale);
grad_q = grad_q.view({batch_size, max_seqlen_batch_q, num_heads, head_dim}).transpose(1,2);
grad_k = grad_k.view({batch_size, max_seqlen_batch_k, num_heads, head_dim}).transpose(1,2);
grad_v = grad_v.view({batch_size, max_seqlen_batch_k, num_heads, head_dim}).transpose(1,2);
return std::make_tuple(grad_q, grad_k, grad_v);
}
std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor> _scaled_dot_product_efficient_attention_backward_cuda(
const at::Tensor& grad_out_,
const at::Tensor& query,
const at::Tensor& key,
const at::Tensor& value,
const at::Tensor& attn_bias,
const at::Tensor& out,
const at::Tensor& logsumexp,
const at::Tensor& philox_seed,
const at::Tensor& philox_offset,
double dropout_p,
std::array<bool, 4> grad_input_mask,
bool causal,
c10::optional<double> scale) {
if (!grad_out_.defined()) {
return std::make_tuple(Tensor{}, Tensor{}, Tensor{}, Tensor{});
}
auto grad_out = grad_out_.transpose(1, 2);
auto out_t = out.transpose(1, 2);
auto q_t = query.transpose(1, 2);
auto k_t = key.transpose(1, 2);
auto v_t = value.transpose(1, 2);
Tensor grad_q, grad_k, grad_v, grad_bias;
// This is needed because SaveVarible automatically converts
// c10::optional to undefined tensor
c10::optional<Tensor> kernel_bias;
if (attn_bias.defined()) {
kernel_bias = attn_bias;
}
// Will add with signauter changes for dropout and bias
// We are only handiling Dense inputs, but this should be passed
// from forward to backward
int64_t max_seqlen_q = q_t.size(1);
int64_t max_seqlen_k = k_t.size(1);
sdp::CustomMaskType custom_mask_type = causal
? sdp::CustomMaskType::CausalFromTopLeft
: sdp::CustomMaskType::NoCustomMask;
std::tie(grad_q, grad_k, grad_v, grad_bias) =
at::_efficient_attention_backward(
grad_out,
q_t,
k_t,
v_t,
kernel_bias,
out_t,
c10::nullopt,
c10::nullopt,
max_seqlen_q,
max_seqlen_k,
logsumexp,
dropout_p,
philox_seed,
philox_offset,
static_cast<int64_t>(custom_mask_type),
grad_input_mask[3],
scale,
c10::nullopt); // num_split_keys
return std::make_tuple(
grad_q.transpose(1, 2), grad_k.transpose(1, 2), grad_v.transpose(1, 2), grad_bias);
}
} // namespace native
} // namespace at
| 12ee8b4ffcad4de6f3a82f1d4db1f5fe04b8da10.cu | #include <cstdint>
#include <type_traits>
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <c10/cuda/CUDAMathCompat.h>
#include <c10/util/Exception.h>
#include <c10/util/bit_cast.h>
#include <c10/core/TensorImpl.h>
#include <ATen/native/nested/NestedTensorTransformerFunctions.h>
#include <ATen/native/nested/NestedTensorUtils.h>
#include <ATen/native/transformers/attention.h>
#include <ATen/native/transformers/cuda/sdp_utils.h>
#include <ATen/native/transformers/sdp_utils_cpp.h>
#include <ATen/cuda/CUDAGeneratorImpl.h>
#ifdef USE_FLASH_ATTENTION
// FlashAttention Specific Imports
#include <ATen/native/transformers/cuda/flash_attn/fmha_api.h>
#endif
#ifdef USE_MEM_EFF_ATTENTION
// MemoryEfficient Attention Specific Imports
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
#include <ATen/native/transformers/cuda/mem_eff_attention/kernels/cutlassB.h>
#include <ATen/native/transformers/cuda/mem_eff_attention/gemm_kernel_utils.h>
#include <ATen/native/transformers/cuda/mem_eff_attention/pytorch_utils.h>
#endif
namespace at {
namespace native {
std::tuple<Tensor, Tensor, Tensor> _flash_attention_backward(
const Tensor& grad_out,
const Tensor& query,
const Tensor& key,
const Tensor& value,
const Tensor& out,
const Tensor& logsumexp,
const Tensor& cumulative_sequence_length_q,
const Tensor& cumulative_sequence_length_k,
const int64_t max_seqlen_batch_q,
const int64_t max_seqlen_batch_k,
double dropout_p,
bool is_causal,
const Tensor& philox_seed,
const Tensor& philox_offset,
c10::optional<double> scale) {
#if defined(USE_FLASH_ATTENTION)
/*
num_splits determines how much to parallelize over the seqlen_q dimension
num_splits=0 means
it will be set by an internal heuristic. We're exposing num_splits mostly for
benchmarking. We will hard code it to 0 for now
*/
constexpr int num_splits{0};
const auto softmax_scale = sdp::calculate_scale(query, scale).as_float_unchecked();
// CUDA code assumes that dout is contiguous
auto contiguous_grad_out = grad_out.contiguous();
auto contiguous_out = out.contiguous();
Tensor dq = at::empty_like(query);
Tensor dk = at::empty_like(key);
Tensor dv = at::empty_like(value);
// The kernel computes irregadless we will drop for this functions return
Tensor grad_softmax;
std::tie(dq, dk, dv, grad_softmax) = pytorch_fmha::mha_bwd(
contiguous_grad_out,
query,
key,
value,
contiguous_out,
logsumexp,
dq,
dk,
dv,
cumulative_sequence_length_q,
cumulative_sequence_length_k,
max_seqlen_batch_q,
max_seqlen_batch_k,
dropout_p,
softmax_scale,
false, /*zero_tensors = false for all calls here*/
is_causal,
num_splits,
philox_seed,
philox_offset
);
return std::make_tuple(dq, dk, dv);
#endif
TORCH_CHECK(false, "USE_FLASH_ATTENTION was not enabled for build.");
return std::make_tuple(Tensor(), Tensor(), Tensor());
}
std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor>
_efficient_attention_backward(
const at::Tensor& grad_out_,
const at::Tensor& query,
const at::Tensor& key,
const at::Tensor& value,
const c10::optional<at::Tensor>& bias, // additive attention bias
const at::Tensor& out,
// (Mode 1MHK only) [b+1]: cu_seqlens_q[b] contains the
// position of the first query token for batch $b
const c10::optional<at::Tensor>& cu_seqlens_q,
// (Mode 1MHK only) [b+1]: cu_seqlens_k[b] contains the
// position of the first key token for batch $b
const c10::optional<at::Tensor>& cu_seqlens_k,
// (Mode 1MHK only) Maximum sequence length across batches
int64_t max_seqlen_q,
// (Mode 1MHK only) Maximum sequence length across batches
int64_t max_seqlen_k,
const at::Tensor& logsumexp,
double dropout_p, // dropout probability
const at::Tensor& philox_seed, // seed using for generating random numbers for dropout
const at::Tensor& philox_offset, // offset into random number sequence
int64_t custom_mask_type,
const bool bias_requires_grad,
const c10::optional<double> scale,
c10::optional <int64_t> num_splits_key) {
#if defined(USE_MEM_EFF_ATTENTION)
if (!grad_out_.defined()) {
return std::make_tuple(Tensor{}, Tensor{}, Tensor{}, Tensor{});
}
// ndim
TORCH_CHECK(query.dim() == grad_out_.dim());
TORCH_CHECK(query.dim() == key.dim());
TORCH_CHECK(query.dim() == value.dim());
TORCH_CHECK(query.dim() == 4);
// batch size
TORCH_CHECK(query.size(0) == grad_out_.size(0));
TORCH_CHECK(query.size(0) == key.size(0));
TORCH_CHECK(query.size(0) == value.size(0));
// seqlen
TORCH_CHECK(key.size(1) == value.size(1));
TORCH_CHECK(query.size(1) == grad_out_.size(1));
// Num heads
TORCH_CHECK(query.size(2) == key.size(2));
TORCH_CHECK(query.size(2) == value.size(2));
TORCH_CHECK(query.size(2) == grad_out_.size(2));
// Embedding per head
TORCH_CHECK(query.size(3) == key.size(3));
TORCH_CHECK(value.size(3) == grad_out_.size(3));
// handle potentially non-contiguous grad_out through a copy
auto grad_out = grad_out_.contiguous();
CHECK_NOSPARSE_CONTIGUOUS_CUDA(grad_out);
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(query);
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(key);
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(value);
TORCH_CHECK(cu_seqlens_q.has_value() == cu_seqlens_k.has_value());
TORCH_CHECK(
!(cu_seqlens_q.has_value() && bias.has_value()),
"cu seqlen + bias not supported");
if (cu_seqlens_q.has_value()) {
TORCH_CHECK(cu_seqlens_q->scalar_type() == at::ScalarType::Int);
TORCH_CHECK(cu_seqlens_k->scalar_type() == at::ScalarType::Int);
TORCH_CHECK(cu_seqlens_q->dim() == 1 && cu_seqlens_k->dim() == 1);
CHECK_NOSPARSE_CONTIGUOUS_CUDA((*cu_seqlens_q));
CHECK_NOSPARSE_CONTIGUOUS_CUDA((*cu_seqlens_k));
TORCH_CHECK(cu_seqlens_q->size(0) == cu_seqlens_k->size(0));
TORCH_CHECK(query.size(0) == 1, "cu_seqlen only supports batch_size=1");
TORCH_CHECK(max_seqlen_q > 0, "max_seqlen_q required with `cu_seqlens_q`");
TORCH_CHECK(max_seqlen_k > 0, "max_seqlen_q required with `cu_seqlens_q`");
TORCH_CHECK(
max_seqlen_k <= key.size(1), "Invalid max_seqlen_k:", max_seqlen_k);
TORCH_CHECK(
max_seqlen_q <= query.size(1), "Invalid max_seqlen_q:", max_seqlen_q);
} else {
max_seqlen_q = query.size(1);
max_seqlen_k = key.size(1);
}
at::cuda::CUDAGuard device_guard(query.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
int64_t B = query.size(0);
int64_t M = query.size(1);
int64_t N = key.size(1);
int64_t nH = query.size(2);
int64_t K = query.size(3);
int64_t Kv = value.size(3);
at::Tensor grad_q, grad_k, grad_v, grad_bias;
grad_q = at::empty(query.sizes(), query.options());
grad_k = at::empty(key.sizes(), key.options());
grad_v = at::empty(value.sizes(), value.options());
if (bias_requires_grad) {
// force alignment for the last dim
std::vector<int64_t> sz = bias->sizes().vec();
int64_t lastDim = sz[sz.size() - 1];
int64_t alignTo = 16;
sz[sz.size() - 1] = alignTo * ((lastDim + alignTo - 1) / alignTo);
grad_bias = at::empty(sz, bias->options())
.slice(/*dim=*/-1, /*start=*/0, /*end=*/lastDim);
}
at::Tensor workspace;
const bool use_dropout = std::fpclassify(dropout_p) != FP_ZERO;
// See Note [Seed and Offset Device]
at::PhiloxCudaState rng_engine_inputs;
if (use_dropout) {
if (at::cuda::currentStreamCaptureStatus() ==
at::cuda::CaptureStatus::None) {
rng_engine_inputs = at::PhiloxCudaState(
*philox_seed.data_ptr<int64_t>(),
*philox_offset.data_ptr<int64_t>());
} else { // dropout + capture
rng_engine_inputs = at::PhiloxCudaState(
philox_seed.data_ptr<int64_t>(),
philox_offset.data_ptr<int64_t>(),
0);
}
}
cudaDeviceProp* p = at::cuda::getDeviceProperties(query.device().index());
const int computeCapability = p->major * 10 + p->minor;
bool kernel_launched = false;
const auto maxK = std::max(query.size(3), value.size(3));
const auto maxShmem = p->sharedMemPerBlockOptin;
auto launchKernel = [&](auto _k, auto kernel_fn) {
using Kernel = decltype(_k);
using scalar_t = typename Kernel::scalar_t;
(void)_k;
if (kernel_launched) {
return;
}
// Check if this kernel is compatible
if (Kernel::kMaxK < maxK) {
return;
}
// Dropout must be supported if we need it
if (use_dropout && !Kernel::kApplyDropout) {
return;
}
if (Kernel::kKeysQueriesAlignedToBlockSize &&
(cu_seqlens_q.has_value() || M % Kernel::kBlockSizeI ||
N % Kernel::kBlockSizeJ)) {
return;
}
// Alignment
if ((query.stride(2) % Kernel::kMinimumAlignment) ||
(key.stride(2) % Kernel::kMinimumAlignment) ||
(value.stride(2) % Kernel::kMinimumAlignment)) {
return;
}
// Uses too much shmem
size_t smem_bytes = sizeof(typename Kernel::SharedStorage);
if (smem_bytes > maxShmem) {
return;
}
kernel_launched = true;
// TODO: Fuse this into a kernel?
// This is a bottleneck for smaller sequences (M <= 128)
auto delta = Kernel::kKernelComputesDelta
? at::empty({B, nH, M}, query.options().dtype(at::ScalarType::Float))
: (grad_out.to(at::kFloat) * out.to(at::kFloat))
.sum(-1)
.transpose(-2, -1)
.contiguous();
TORCH_INTERNAL_ASSERT(delta.size(0) == B);
TORCH_INTERNAL_ASSERT(delta.size(1) == nH);
TORCH_INTERNAL_ASSERT(delta.size(2) == M);
typename Kernel::Params p;
p.query_ptr = (scalar_t*)query.data_ptr();
p.key_ptr = (scalar_t*)key.data_ptr();
p.value_ptr = (scalar_t*)value.data_ptr();
p.logsumexp_ptr = (typename Kernel::lse_scalar_t*)logsumexp.data_ptr();
p.output_ptr = (scalar_t*)out.data_ptr();
p.grad_output_ptr = (scalar_t*)grad_out.data_ptr();
p.grad_query_ptr = (scalar_t*)grad_q.data_ptr();
p.grad_key_ptr = (scalar_t*)grad_k.data_ptr();
p.grad_value_ptr = (scalar_t*)grad_v.data_ptr();
p.delta_ptr = (float*)delta.data_ptr();
p.head_dim = query.size(3);
p.head_dim_value = value.size(3);
p.num_queries = max_seqlen_q;
p.num_keys = max_seqlen_k;
p.num_batches = cu_seqlens_q.has_value() ? cu_seqlens_q->size(0) - 1 : B;
p.num_heads = nH;
p.custom_mask_type = custom_mask_type;
p.scale = sdp::calculate_scale(query, scale).as_float_unchecked();
if (cu_seqlens_q.has_value()) {
p.cu_seqlens_q_ptr = (int32_t*)cu_seqlens_q->data_ptr();
p.cu_seqlens_k_ptr = (int32_t*)cu_seqlens_k->data_ptr();
}
ASSIGN_CHECK_OVERFLOW(p.lse_strideB, logsumexp.stride(0));
ASSIGN_CHECK_OVERFLOW(p.lse_strideH, logsumexp.stride(1));
ASSIGN_CHECK_OVERFLOW(p.gO_strideB, grad_out.stride(0));
ASSIGN_CHECK_OVERFLOW(p.gO_strideM, grad_out.stride(1));
ASSIGN_CHECK_OVERFLOW(p.gO_strideH, grad_out.stride(2));
ASSIGN_CHECK_OVERFLOW(p.o_strideB, out.stride(0));
ASSIGN_CHECK_OVERFLOW(p.o_strideH, out.stride(2));
ASSIGN_CHECK_OVERFLOW(p.gQ_strideB, grad_q.stride(0));
ASSIGN_CHECK_OVERFLOW(p.gK_strideB, grad_k.stride(0));
ASSIGN_CHECK_OVERFLOW(p.gV_strideB, grad_v.stride(0));
ASSIGN_CHECK_OVERFLOW(p.gQ_strideH, grad_q.stride(2));
ASSIGN_CHECK_OVERFLOW(p.gK_strideH, grad_k.stride(2));
ASSIGN_CHECK_OVERFLOW(p.gV_strideH, grad_v.stride(2));
// We removed the chunk/cat optimization and the multiplier is always 1
p.gQKV_strideM_multiplier = 1;
TORCH_INTERNAL_ASSERT(p.gQ_strideM() == grad_q.stride(1));
TORCH_INTERNAL_ASSERT(p.gK_strideM() == grad_k.stride(1));
TORCH_INTERNAL_ASSERT(p.gV_strideM() == grad_v.stride(1));
ASSIGN_CHECK_OVERFLOW(p.q_strideB, query.stride(0));
ASSIGN_CHECK_OVERFLOW(p.k_strideB, key.stride(0));
ASSIGN_CHECK_OVERFLOW(p.v_strideB, value.stride(0));
ASSIGN_CHECK_OVERFLOW(p.q_strideM, query.stride(1));
ASSIGN_CHECK_OVERFLOW(p.k_strideM, key.stride(1));
ASSIGN_CHECK_OVERFLOW(p.v_strideM, value.stride(1));
ASSIGN_CHECK_OVERFLOW(p.q_strideH, query.stride(2));
ASSIGN_CHECK_OVERFLOW(p.k_strideH, key.stride(2));
ASSIGN_CHECK_OVERFLOW(p.v_strideH, value.stride(2));
ASSIGN_CHECK_OVERFLOW(p.delta_strideB, delta.stride(0));
ASSIGN_CHECK_OVERFLOW(p.delta_strideH, delta.stride(1));
if (bias.has_value()) {
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA((*bias));
TORCH_CHECK(
bias->scalar_type() == CutlassToAtenDtype<scalar_t>::atScalarType(),
"invalid dtype for bias - should match query's dtype");
p.bias_ptr = (scalar_t*)bias->data_ptr();
// assign strides for bias, viewed as:
// (batch_sz, n_heads, n_queries, n_keys)
// We make sure to expand prior to calling the kernel
const at::Tensor& bias_4d_view = *bias;
TORCH_CHECK(bias_4d_view.dim()==4);
TORCH_CHECK(bias_4d_view.size(0)==B);
TORCH_CHECK(bias_4d_view.size(1)==nH);
TORCH_CHECK(bias_4d_view.size(2)==M);
TORCH_CHECK(bias_4d_view.size(3)==N);
ASSIGN_CHECK_OVERFLOW(p.bias_strideB, bias_4d_view.stride(0));
ASSIGN_CHECK_OVERFLOW(p.bias_strideH, bias_4d_view.stride(1));
ASSIGN_CHECK_OVERFLOW(p.bias_strideM, bias_4d_view.stride(2));
if (bias_requires_grad) {
p.grad_bias_ptr = (scalar_t*)grad_bias.data_ptr();
// assign strides for gB, viewed as
// (batch_sz, n_heads, n_queries, n_keys). might have different strides
// than B, for example if bias tensor was created with
// torch.tensor((B * nH, 1, nK)).expand((B * nH, nQ, nK)),
// different values of Q will point to the same memory
// locations, meaning bias.stride(1) == 0, while we'd want
// grad_bias.stride(1) == nK
// We have expanded the input prior to calling the forward kernel
const at::Tensor& grad_bias_4d_view = grad_bias;
TORCH_CHECK(grad_bias_4d_view.dim()==4);
TORCH_CHECK(grad_bias_4d_view.size(0)==B);
TORCH_CHECK(grad_bias_4d_view.size(1)==nH);
TORCH_CHECK(grad_bias_4d_view.size(2)==M);
TORCH_CHECK(grad_bias_4d_view.size(3)==N);
ASSIGN_CHECK_OVERFLOW(p.gB_strideB, grad_bias_4d_view.stride(0));
ASSIGN_CHECK_OVERFLOW(p.gB_strideH, grad_bias_4d_view.stride(1));
ASSIGN_CHECK_OVERFLOW(p.gB_strideM, grad_bias_4d_view.stride(2));
}
}
if (use_dropout) {
p.rng_engine_inputs = rng_engine_inputs;
p.dropout_prob = dropout_p;
}
// Heuristic for finding optimal number of splits
auto parallelism_without_split_key =
p.getBlocksGrid().x * p.getBlocksGrid().y * p.getBlocksGrid().z;
p.num_splits_key = cutlass::ceil_div(p.num_keys, Kernel::kBlockSizeJ);
if (num_splits_key.has_value()) { // Skip heuristic, if user provided an explicit value
p.num_splits_key = std::max<int64_t>(p.num_splits_key, num_splits_key.value());
// If we already have enough parallelism, split-keys can help
// better use L2 cache.
// This is negligible when the seqlen is too small tho
if (parallelism_without_split_key >= 256 &&
p.num_keys <= 2 * Kernel::kBlockSizeJ) {
p.num_splits_key = 1;
}
// Increasing `split_keys` leads to using more gmem for temporary storage
// when we need a staging area for gK/gV. let's avoid that
if (Kernel::kNeedsAccumGradK || Kernel::kNeedsAccumGradV) {
p.num_splits_key = std::min(
int(p.num_splits_key), 200 / (p.num_batches * p.num_heads));
}
}
if (!Kernel::kEnableSplitKeys || p.num_splits_key < 1) {
p.num_splits_key = 1;
}
auto& ctx = at::globalContext();
if (ctx.deterministicAlgorithms()) {
if (ctx.deterministicAlgorithmsWarnOnly()) {
TORCH_WARN_ONCE(
"Memory Efficient attention defaults to a non-deterministic algorithm. ",
"To explicitly enable determinism call torch.use_deterministic_algorithms(True, warn_only=False).");
} else {
TORCH_CHECK(
num_splits_key.value_or(1) <= 1,
"Using `num_splits_key > 1` makes the algorithm non-deterministic, and pytorch's deterministic mode is enabled");
p.num_splits_key = 1;
}
}
int64_t size_bytes = p.workspace_size();
if (size_bytes) {
workspace =
at::empty({size_bytes}, query.options().dtype(at::ScalarType::Byte));
p.workspace = (float*)workspace.data_ptr();
if (p.should_zero_workspace()) {
workspace.zero_();
}
}
Kernel::check_supported(p);
if (smem_bytes > 0xc000) {
// https://docs.nvidia.com/cuda/cuda-c-programming-guide/#features-and-technical-specifications-technical-specifications-per-compute-capability
auto err = cudaFuncSetAttribute(
kernel_fn, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_bytes);
TORCH_CHECK(
err != cudaErrorInvalidValue,
"This GPU does not have enough shared-memory (kernel requires ",
smem_bytes / 1024,
" kb)");
AT_CUDA_CHECK(err);
}
// second syntax resulted in the error below on windows
// error C3495: 'kernel_fn': a simple capture must be a variable
// with automatic storage duration declared
// in the reaching scope of the lambda
#ifdef _WIN32
cudaFuncAttributes attr;
AT_CUDA_CHECK(cudaFuncGetAttributes(&attr, kernel_fn));
TORCH_INTERNAL_ASSERT(
attr.binaryVersion >= Kernel::ArchTag::kMinComputeCapability,
"Something went wrong in the build process");
#else
auto checkBinaryArchMatches = [&]() {
cudaFuncAttributes attr;
AT_CUDA_CHECK(cudaFuncGetAttributes(&attr, kernel_fn));
return attr.binaryVersion >= Kernel::ArchTag::kMinComputeCapability;
};
TORCH_INTERNAL_ASSERT(
checkBinaryArchMatches(), "Something went wrong in the build process");
#endif
kernel_fn<<<p.getBlocksGrid(), p.getThreadsGrid(), smem_bytes, stream>>>(p);
};
DISPATCH_TYPES(query, ([&]() {
dispatch_cutlassB<scalar_t>(launchKernel, computeCapability);
}));
TORCH_CHECK(kernel_launched, "cutlassB: no kernel found to launch!");
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(grad_q, grad_k, grad_v, grad_bias);
#endif
TORCH_CHECK(false, "USE_MEM_EFF_ATTENTION was not enabled for build.")
return std::make_tuple(Tensor{}, Tensor{}, Tensor{}, Tensor{});
}
std::tuple<at::Tensor, at::Tensor, at::Tensor> _scaled_dot_product_flash_attention_backward_cuda(
const at::Tensor& grad_out_,
const at::Tensor& query,
const at::Tensor& key,
const at::Tensor& value,
const at::Tensor& out,
const at::Tensor& logsumexp,
const Tensor& cumulative_sequence_length_q,
const Tensor& cumulative_sequence_length_k,
const int64_t max_seqlen_batch_q,
const int64_t max_seqlen_batch_k,
double dropout_p,
bool is_causal,
const at::Tensor& philox_seed,
const at::Tensor& philox_offset,
c10::optional<double> scale){
if (!grad_out_.defined()) {
return std::make_tuple(Tensor{}, Tensor{}, Tensor{});
}
const int64_t batch_size = query.size(0);
const int64_t num_heads = query.size(1);
const int64_t head_dim = query.size(3);
Tensor q_t = query.transpose(1, 2);
Tensor k_t = key.transpose(1, 2);
Tensor v_t = value.transpose(1, 2);
int64_t Nnz_q{batch_size * max_seqlen_batch_q};
int64_t Nnz_kv{batch_size * max_seqlen_batch_k};
// For the standard MHA these will actually be views
Tensor query_reshaped = q_t.reshape({Nnz_q, num_heads, head_dim});
Tensor key_reshaped = k_t.reshape({Nnz_kv, num_heads, head_dim});
Tensor value_reshaped = v_t.reshape({Nnz_kv, num_heads, head_dim});
auto grad_out_reshaped = grad_out_.transpose(1,2).reshape({{Nnz_q, num_heads, head_dim}});
auto out_reshaped = out.transpose(1,2).reshape({Nnz_q, num_heads, head_dim});
Tensor grad_q, grad_k, grad_v;
std::tie(grad_q, grad_k, grad_v) = at::_flash_attention_backward(
grad_out_reshaped,
query_reshaped,
key_reshaped,
value_reshaped,
out_reshaped,
logsumexp,
cumulative_sequence_length_q,
cumulative_sequence_length_k,
max_seqlen_batch_q,
max_seqlen_batch_k,
dropout_p,
is_causal,
philox_seed,
philox_offset,
scale);
grad_q = grad_q.view({batch_size, max_seqlen_batch_q, num_heads, head_dim}).transpose(1,2);
grad_k = grad_k.view({batch_size, max_seqlen_batch_k, num_heads, head_dim}).transpose(1,2);
grad_v = grad_v.view({batch_size, max_seqlen_batch_k, num_heads, head_dim}).transpose(1,2);
return std::make_tuple(grad_q, grad_k, grad_v);
}
std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor> _scaled_dot_product_efficient_attention_backward_cuda(
const at::Tensor& grad_out_,
const at::Tensor& query,
const at::Tensor& key,
const at::Tensor& value,
const at::Tensor& attn_bias,
const at::Tensor& out,
const at::Tensor& logsumexp,
const at::Tensor& philox_seed,
const at::Tensor& philox_offset,
double dropout_p,
std::array<bool, 4> grad_input_mask,
bool causal,
c10::optional<double> scale) {
if (!grad_out_.defined()) {
return std::make_tuple(Tensor{}, Tensor{}, Tensor{}, Tensor{});
}
auto grad_out = grad_out_.transpose(1, 2);
auto out_t = out.transpose(1, 2);
auto q_t = query.transpose(1, 2);
auto k_t = key.transpose(1, 2);
auto v_t = value.transpose(1, 2);
Tensor grad_q, grad_k, grad_v, grad_bias;
// This is needed because SaveVarible automatically converts
// c10::optional to undefined tensor
c10::optional<Tensor> kernel_bias;
if (attn_bias.defined()) {
kernel_bias = attn_bias;
}
// Will add with signauter changes for dropout and bias
// We are only handiling Dense inputs, but this should be passed
// from forward to backward
int64_t max_seqlen_q = q_t.size(1);
int64_t max_seqlen_k = k_t.size(1);
sdp::CustomMaskType custom_mask_type = causal
? sdp::CustomMaskType::CausalFromTopLeft
: sdp::CustomMaskType::NoCustomMask;
std::tie(grad_q, grad_k, grad_v, grad_bias) =
at::_efficient_attention_backward(
grad_out,
q_t,
k_t,
v_t,
kernel_bias,
out_t,
c10::nullopt,
c10::nullopt,
max_seqlen_q,
max_seqlen_k,
logsumexp,
dropout_p,
philox_seed,
philox_offset,
static_cast<int64_t>(custom_mask_type),
grad_input_mask[3],
scale,
c10::nullopt); // num_split_keys
return std::make_tuple(
grad_q.transpose(1, 2), grad_k.transpose(1, 2), grad_v.transpose(1, 2), grad_bias);
}
} // namespace native
} // namespace at
|
c0d7bb9f760df3528ff86937b18d78389a0e5933.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2012, Thomas Schutzmeier
// FreeBSD License
// See https://github.com/unvirtual/cutl/blob/master/LICENSE
#include "../primitives.h"
/**********************************************************************************
*
* AABBArray implementation
*
**********************************************************************************/
DevAABBArray
AABBArray::dev_array() {
DevAABBArray dev_aabb_array;
dev_aabb_array.minima = minima.pointer();
dev_aabb_array.maxima = maxima.pointer();
dev_aabb_array.length = size();
return dev_aabb_array;
}
void
AABBArray::resize(int size) {
minima.resize(size);
maxima.resize(size);
}
void
AABBArray::set(int index, const UFloat4 & min, const UFloat4 & max) {
minima.set(index, min);
maxima.set(index, max);
}
void
AABBArray::print(std::string prefix="") const {
minima.print(prefix + "min");
maxima.print(prefix + "max");
}
RayArray::RayArray(const std::vector<cukdRay> & rays) {
std::vector<UFloat4> orig, dir;
for(std::vector<cukdRay>::const_iterator it = rays.begin(); it != rays.end(); ++it) {
orig.push_back(it->origin);
dir.push_back(it->direction);
}
origins.populate(orig);
directions.populate(dir);
};
/**********************************************************************************
*
* TriangleArray implementation
*
**********************************************************************************/
TriangleArray::TriangleArray(const std::vector<Triangle> & triangles) {
std::vector<UFloat4> vv[3], nn[3];
for(std::vector<Triangle>::const_iterator it = triangles.begin();
it != triangles.end(); ++it) {
for(int j = 0; j < 3; ++j) {
vv[j].push_back(it->v[j]);
nn[j].push_back(it->n[j]);
}
}
for(int i = 0; i < 3; ++i) {
v[i].populate(vv[i]);
n[i].populate(nn[i]);
}
aabbs.resize(triangles.size());
}
void
TriangleArray::compute_aabbs() {
DevTriangleArray dev_tris = dev_array();
triangle_aabbs(dev_tris, size());
}
void
TriangleArray::append(const TriangleArray & tris) {
int old_size = size();
resize(old_size + tris.size());
for(int i = 0; i < 3; ++i) {
thrust::copy(tris.v[i].begin(), tris.v[i].end(), v[i].begin() + old_size);
thrust::copy(tris.n[i].begin(), tris.n[i].end(), n[i].begin() + old_size);
}
}
DevTriangleArray
TriangleArray::dev_array() {
DevTriangleArray dev_triangle_array;
for(int i = 0; i < 3; ++i) {
dev_triangle_array.v[i] = v[i].pointer();
dev_triangle_array.n[i] = n[i].pointer();
}
dev_triangle_array.length = size();
dev_triangle_array.aabbs = aabbs.dev_array();
return dev_triangle_array;
}
void
TriangleArray::resize(int size) {
for(int i = 0; i < 3; ++i) {
v[i].resize(size);
n[i].resize(size);
}
aabbs.resize(size);
}
// Trivial kernel for triangle AABB computation
__global__
void
triangle_aabbs_kernel(DevTriangleArray tris) {
int tid = blockDim.x*blockIdx.x + threadIdx.x;
UFloat4 min, max;
if(tid < tris.length) {
for(int i = 0; i < 3; ++i) {
min.component[i] =
min_three(tris.v[0][tid].component[i],
tris.v[1][tid].component[i],
tris.v[2][tid].component[i]);
max.component[i] =
max_three(tris.v[0][tid].component[i],
tris.v[1][tid].component[i],
tris.v[2][tid].component[i]);
}
tris.aabbs.minima[tid] = min;
tris.aabbs.maxima[tid] = max;
}
}
void
triangle_aabbs(DevTriangleArray tris, int length) {
dim3 grid(IntegerDivide(256)(length),1,1);
dim3 blocks(256, 1, 1);
hipLaunchKernelGGL(( triangle_aabbs_kernel), dim3(grid),dim3(blocks), 0, 0, tris);
}
| c0d7bb9f760df3528ff86937b18d78389a0e5933.cu | // Copyright (c) 2012, Thomas Schutzmeier
// FreeBSD License
// See https://github.com/unvirtual/cutl/blob/master/LICENSE
#include "../primitives.h"
/**********************************************************************************
*
* AABBArray implementation
*
**********************************************************************************/
DevAABBArray
AABBArray::dev_array() {
DevAABBArray dev_aabb_array;
dev_aabb_array.minima = minima.pointer();
dev_aabb_array.maxima = maxima.pointer();
dev_aabb_array.length = size();
return dev_aabb_array;
}
void
AABBArray::resize(int size) {
minima.resize(size);
maxima.resize(size);
}
void
AABBArray::set(int index, const UFloat4 & min, const UFloat4 & max) {
minima.set(index, min);
maxima.set(index, max);
}
void
AABBArray::print(std::string prefix="") const {
minima.print(prefix + "min");
maxima.print(prefix + "max");
}
RayArray::RayArray(const std::vector<cukdRay> & rays) {
std::vector<UFloat4> orig, dir;
for(std::vector<cukdRay>::const_iterator it = rays.begin(); it != rays.end(); ++it) {
orig.push_back(it->origin);
dir.push_back(it->direction);
}
origins.populate(orig);
directions.populate(dir);
};
/**********************************************************************************
*
* TriangleArray implementation
*
**********************************************************************************/
TriangleArray::TriangleArray(const std::vector<Triangle> & triangles) {
std::vector<UFloat4> vv[3], nn[3];
for(std::vector<Triangle>::const_iterator it = triangles.begin();
it != triangles.end(); ++it) {
for(int j = 0; j < 3; ++j) {
vv[j].push_back(it->v[j]);
nn[j].push_back(it->n[j]);
}
}
for(int i = 0; i < 3; ++i) {
v[i].populate(vv[i]);
n[i].populate(nn[i]);
}
aabbs.resize(triangles.size());
}
void
TriangleArray::compute_aabbs() {
DevTriangleArray dev_tris = dev_array();
triangle_aabbs(dev_tris, size());
}
void
TriangleArray::append(const TriangleArray & tris) {
int old_size = size();
resize(old_size + tris.size());
for(int i = 0; i < 3; ++i) {
thrust::copy(tris.v[i].begin(), tris.v[i].end(), v[i].begin() + old_size);
thrust::copy(tris.n[i].begin(), tris.n[i].end(), n[i].begin() + old_size);
}
}
DevTriangleArray
TriangleArray::dev_array() {
DevTriangleArray dev_triangle_array;
for(int i = 0; i < 3; ++i) {
dev_triangle_array.v[i] = v[i].pointer();
dev_triangle_array.n[i] = n[i].pointer();
}
dev_triangle_array.length = size();
dev_triangle_array.aabbs = aabbs.dev_array();
return dev_triangle_array;
}
void
TriangleArray::resize(int size) {
for(int i = 0; i < 3; ++i) {
v[i].resize(size);
n[i].resize(size);
}
aabbs.resize(size);
}
// Trivial kernel for triangle AABB computation
__global__
void
triangle_aabbs_kernel(DevTriangleArray tris) {
int tid = blockDim.x*blockIdx.x + threadIdx.x;
UFloat4 min, max;
if(tid < tris.length) {
for(int i = 0; i < 3; ++i) {
min.component[i] =
min_three(tris.v[0][tid].component[i],
tris.v[1][tid].component[i],
tris.v[2][tid].component[i]);
max.component[i] =
max_three(tris.v[0][tid].component[i],
tris.v[1][tid].component[i],
tris.v[2][tid].component[i]);
}
tris.aabbs.minima[tid] = min;
tris.aabbs.maxima[tid] = max;
}
}
void
triangle_aabbs(DevTriangleArray tris, int length) {
dim3 grid(IntegerDivide(256)(length),1,1);
dim3 blocks(256, 1, 1);
triangle_aabbs_kernel<<<grid,blocks>>>(tris);
}
|
c24a4101885799ff9ef41659541ab2bf27f1b38f.hip | // !!! This is a file automatically generated by hipify!!!
#include "CUDAWorker.h"
#include "CUDAKernel.h"
#include <iostream>
#include <cstdio>
#define CUDA_CHECK(value) { \
hipError_t _m_cudaStat = value; \
if (_m_cudaStat != hipSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
hipGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} }
#define CUDA_CHECK_RETURN(value) { \
hipError_t _m_cudaStat = value; \
if (_m_cudaStat != hipSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
hipGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} }
inline double getTime(timeb& start, timeb& stop)
{
return (double) (stop.millitm - start.millitm) / 1000.0 + (stop.time - start.time);
}
timeb progStartTime;
CUDAWorker::CUDAWorker(DataPool* loader, Param* params, RecalcWorker* recalcWorker, int deviceId):
Worker(loader, params, recalcWorker, 4, 32, sizeof(int)),
deviceQueryProfile(NULL),
deviceBuffer(),
deviceMap(),
queryProfileFlag(false),
warpSize(32),
numBlocks(24 * 10),
numThreads(64),
streams(),
maxMapSize(0),
recalcWorker(recalcWorker),
deviceId(deviceId)
{
ftime(&progStartTime);
bufferSize = 1<<28;
numWarps = numBlocks * numThreads / 32;
maxMapSize = bufferSize / 16 / 32;
}
CUDAWorker::~CUDAWorker() {
}
void CUDAWorker::alloc(){
/*buffers for packing*/
CUDA_CHECK(hipHostMalloc((void** ) &dbSeqPacked[0], bufferSize));
CUDA_CHECK(hipHostMalloc((void** ) &dbSeqPacked[1], bufferSize));
//sequence buffer
CUDA_CHECK(hipMalloc((void** ) &deviceBuffer[0], bufferSize));
CUDA_CHECK(hipMalloc((void** ) &deviceBuffer[1], bufferSize));
//maps
CUDA_CHECK(hipHostMalloc((void** ) &fillMap[0], sizeof(unsigned) * maxMapSize * 2));
CUDA_CHECK(hipHostMalloc((void** ) &fillMap[1], sizeof(unsigned) * maxMapSize * 2));
CUDA_CHECK(hipMalloc((void** ) &deviceMap[0], sizeof(unsigned) * maxMapSize * 2));
CUDA_CHECK(hipMalloc((void** ) &deviceMap[1], sizeof(unsigned) * maxMapSize * 2));
//results
CUDA_CHECK(hipHostMalloc((void** ) &score[0], sizeof(int) * maxMapSize* 32));
CUDA_CHECK(hipHostMalloc((void** ) &score[1], sizeof(int) * maxMapSize* 32));
CUDA_CHECK(hipMalloc((void** ) &devResult, sizeof(int) * maxMapSize * 32));
buf[0] = (char*) dbSeqPacked[0];
buf[1] = (char*) dbSeqPacked[1];
entry[0].setEntry(buf[0], (int*) fillMap[0], score[0]);
entry[1].setEntry(buf[1], (int*) fillMap[1], score[1]);
compEntry = entry + 1;
auxEntry = entry;
makeQueryProfile(query, queryLen);
CUDA_CHECK(hipStreamCreate(&streams[0]));
CUDA_CHECK(hipStreamCreate(&streams[1]));
}
void CUDAWorker::free(){
CUDA_CHECK(hipStreamDestroy(streams[0]));
CUDA_CHECK(hipStreamDestroy(streams[1]));
CUDA_CHECK(hipHostFree(dbSeqPacked[0]));
CUDA_CHECK(hipHostFree(dbSeqPacked[1]));
CUDA_CHECK(hipHostFree(fillMap[0]));
CUDA_CHECK(hipHostFree(fillMap[1]));
CUDA_CHECK(hipFree(deviceBuffer[0]));
CUDA_CHECK(hipFree(deviceBuffer[1]));
CUDA_CHECK(hipFree(deviceMap[0]));
CUDA_CHECK(hipFree(deviceMap[1]));
CUDA_CHECK(hipHostFree(score[0]));
CUDA_CHECK(hipHostFree(score[1]));
CUDA_CHECK(hipFree(devResult));
}
void CUDAWorker::showPerformance(){
double elapsedTime = getTime(computeStartTime, computeStopTime);
double gcups = queryLen * TotalAminoAcidResidue / elapsedTime / 1000000000.0;
printf("CUDA computing time: %lf, AARes %ld, GCUPs %lf\n", elapsedTime,TotalAminoAcidResidue, gcups);
}
void CUDAWorker::setQuery(const char* query, const size_t qlen) {
CUDA_CHECK(hipSetDevice(deviceId));
if (deviceQueryProfile != NULL)
CUDA_CHECK(hipFree(deviceQueryProfile));
this->queryLen = qlen;
this->query = new char[qlen];
memcpy(this->query, query, sizeof(char) * qlen);
}
void CUDAWorker::makeQueryProfile(const char* query, const size_t qlen) {
int queryLenQuad = qlen >> 2;
int i, j;
int4* hostQueryPrf;
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<int4>();
CUDA_CHECK_RETURN(hipMallocArray(&cu_array, &channelDesc, queryLenQuad, 32));
CUDA_CHECK_RETURN(hipHostMalloc((void **) &hostQueryPrf, sizeof(int4) * queryLenQuad * 32));
for (i = 0; i < 32; i++) {
int4* p = hostQueryPrf + i * queryLenQuad;
for (j = 0; j < qlen; j += 4) {
p->x = matrix[i][query[j]];
p->y = matrix[i][query[j + 1]];
p->w = matrix[i][query[j + 2]];
p->z = matrix[i][query[j + 3]];
p++;
}
}
//set attribute of query profile
CUDA_CHECK_RETURN(hipMemcpy2DToArray(cu_array, 0, 0, hostQueryPrf, queryLenQuad * sizeof(int4), queryLenQuad * sizeof(int4), 32, hipMemcpyHostToDevice));
bindQueryPrf(cu_array);
queryProfileFlag = true;
CUDA_CHECK_RETURN(hipHostFree(hostQueryPrf));
}
void CUDAWorker::packDB(){
//printf("CUDA packing\n");
Entry* pEntry = entry + !bufFlag;
fillBatchNum = 0;
#if 0
char* bufWorkp = dbSeqBuf;
int* pMap = fillMap[!bufFlag];
char* dstWorkp = buf[!bufFlag];
#else
char* bufWorkp = dbSeqBuf;
char* dstWorkp = pEntry->getBuf();
int* pMap = pEntry->getMap();
#endif
batchInfo* infoData = info;
int curPos = 0;
/*copy out the buffer and transfer symbol to code*/
this->TotalAminoAcidResidue += filledSize;
//Vectorization
for (int idx = 0; idx < infoSize; ++idx) {
int num = infoData[idx].numSeqs;
int len = infoData[idx].seqLen;
/*limited length for intel MIC and 32-bit version CUDA kernel*/
if(len >= 3072 * 2){
for(int i = 0; i != num; ++i){
recalcWorker->pushOverflowSeq((char*) bufWorkp, len, fillBaseIndex + fillBatchNum * 16 + i);
bufWorkp += len;
}
continue;
}
int pass = num / batchSize;
for (int i = 0; i < pass; ++i) {
for (int j = 0; j < len; j += 4) {
/*inner loop: every sequences are interleaved*/
for(int k = 0; k < 32; ++k){
*(dstWorkp++) = bufWorkp[j + k * len + 0];
*(dstWorkp++) = bufWorkp[j + k * len + 1];
*(dstWorkp++) = bufWorkp[j + k * len + 2];
*(dstWorkp++) = bufWorkp[j + k * len + 3];
//dstWorkp += packSize;
}
}
*(pMap++) = curPos;
*(pMap++) = len;
++fillBatchNum;
curPos += batchSize * len / 4;
bufWorkp += batchSize * len;
}
}
pEntry->setBatchNum(fillBatchNum);
pEntry->setBaseIdx(fillBaseIndex);
pEntry->setResultSize(fillBatchNum * batchSize);
}
void CUDAWorker::asyncCopy(){
unsigned* dbSeqSrc = dbSeqPacked[bufFlag];
int batchNum = compEntry->getBatchNum();
CUDA_CHECK_RETURN(
hipMemcpyAsync(
deviceBuffer[bufFlag],
dbSeqSrc,
bufferSize,
hipMemcpyHostToDevice,
streams[bufFlag]);
);
CUDA_CHECK_RETURN(
hipMemcpyAsync(
deviceMap[bufFlag],
fillMap[bufFlag],
sizeof(unsigned) * batchNum * 2,
hipMemcpyHostToDevice,
streams[bufFlag])
);
}
void CUDAWorker::search(){
uint32_t batchNum = compEntry->getBatchNum();
kernelLaunch(
numBlocks,
numThreads,
streams[bufFlag],
deviceBuffer[bufFlag],
deviceMap[bufFlag],
batchNum,
queryLen,
devResult,
globalArray,
globalPitch
);
CUDA_CHECK_RETURN(
hipMemcpy(
compEntry->getScores(),
devResult,
sizeof(int) * batchNum * 32,
hipMemcpyDeviceToHost
)
);
}
| c24a4101885799ff9ef41659541ab2bf27f1b38f.cu | #include "CUDAWorker.h"
#include "CUDAKernel.h"
#include <iostream>
#include <cstdio>
#define CUDA_CHECK(value) { \
cudaError_t _m_cudaStat = value; \
if (_m_cudaStat != cudaSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} }
#define CUDA_CHECK_RETURN(value) { \
cudaError_t _m_cudaStat = value; \
if (_m_cudaStat != cudaSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} }
inline double getTime(timeb& start, timeb& stop)
{
return (double) (stop.millitm - start.millitm) / 1000.0 + (stop.time - start.time);
}
timeb progStartTime;
CUDAWorker::CUDAWorker(DataPool* loader, Param* params, RecalcWorker* recalcWorker, int deviceId):
Worker(loader, params, recalcWorker, 4, 32, sizeof(int)),
deviceQueryProfile(NULL),
deviceBuffer(),
deviceMap(),
queryProfileFlag(false),
warpSize(32),
numBlocks(24 * 10),
numThreads(64),
streams(),
maxMapSize(0),
recalcWorker(recalcWorker),
deviceId(deviceId)
{
ftime(&progStartTime);
bufferSize = 1<<28;
numWarps = numBlocks * numThreads / 32;
maxMapSize = bufferSize / 16 / 32;
}
CUDAWorker::~CUDAWorker() {
}
void CUDAWorker::alloc(){
/*buffers for packing*/
CUDA_CHECK(cudaMallocHost((void** ) &dbSeqPacked[0], bufferSize));
CUDA_CHECK(cudaMallocHost((void** ) &dbSeqPacked[1], bufferSize));
//sequence buffer
CUDA_CHECK(cudaMalloc((void** ) &deviceBuffer[0], bufferSize));
CUDA_CHECK(cudaMalloc((void** ) &deviceBuffer[1], bufferSize));
//maps
CUDA_CHECK(cudaMallocHost((void** ) &fillMap[0], sizeof(unsigned) * maxMapSize * 2));
CUDA_CHECK(cudaMallocHost((void** ) &fillMap[1], sizeof(unsigned) * maxMapSize * 2));
CUDA_CHECK(cudaMalloc((void** ) &deviceMap[0], sizeof(unsigned) * maxMapSize * 2));
CUDA_CHECK(cudaMalloc((void** ) &deviceMap[1], sizeof(unsigned) * maxMapSize * 2));
//results
CUDA_CHECK(cudaMallocHost((void** ) &score[0], sizeof(int) * maxMapSize* 32));
CUDA_CHECK(cudaMallocHost((void** ) &score[1], sizeof(int) * maxMapSize* 32));
CUDA_CHECK(cudaMalloc((void** ) &devResult, sizeof(int) * maxMapSize * 32));
buf[0] = (char*) dbSeqPacked[0];
buf[1] = (char*) dbSeqPacked[1];
entry[0].setEntry(buf[0], (int*) fillMap[0], score[0]);
entry[1].setEntry(buf[1], (int*) fillMap[1], score[1]);
compEntry = entry + 1;
auxEntry = entry;
makeQueryProfile(query, queryLen);
CUDA_CHECK(cudaStreamCreate(&streams[0]));
CUDA_CHECK(cudaStreamCreate(&streams[1]));
}
void CUDAWorker::free(){
CUDA_CHECK(cudaStreamDestroy(streams[0]));
CUDA_CHECK(cudaStreamDestroy(streams[1]));
CUDA_CHECK(cudaFreeHost(dbSeqPacked[0]));
CUDA_CHECK(cudaFreeHost(dbSeqPacked[1]));
CUDA_CHECK(cudaFreeHost(fillMap[0]));
CUDA_CHECK(cudaFreeHost(fillMap[1]));
CUDA_CHECK(cudaFree(deviceBuffer[0]));
CUDA_CHECK(cudaFree(deviceBuffer[1]));
CUDA_CHECK(cudaFree(deviceMap[0]));
CUDA_CHECK(cudaFree(deviceMap[1]));
CUDA_CHECK(cudaFreeHost(score[0]));
CUDA_CHECK(cudaFreeHost(score[1]));
CUDA_CHECK(cudaFree(devResult));
}
void CUDAWorker::showPerformance(){
double elapsedTime = getTime(computeStartTime, computeStopTime);
double gcups = queryLen * TotalAminoAcidResidue / elapsedTime / 1000000000.0;
printf("CUDA computing time: %lf, AARes %ld, GCUPs %lf\n", elapsedTime,TotalAminoAcidResidue, gcups);
}
void CUDAWorker::setQuery(const char* query, const size_t qlen) {
CUDA_CHECK(cudaSetDevice(deviceId));
if (deviceQueryProfile != NULL)
CUDA_CHECK(cudaFree(deviceQueryProfile));
this->queryLen = qlen;
this->query = new char[qlen];
memcpy(this->query, query, sizeof(char) * qlen);
}
void CUDAWorker::makeQueryProfile(const char* query, const size_t qlen) {
int queryLenQuad = qlen >> 2;
int i, j;
int4* hostQueryPrf;
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<int4>();
CUDA_CHECK_RETURN(cudaMallocArray(&cu_array, &channelDesc, queryLenQuad, 32));
CUDA_CHECK_RETURN(cudaMallocHost((void **) &hostQueryPrf, sizeof(int4) * queryLenQuad * 32));
for (i = 0; i < 32; i++) {
int4* p = hostQueryPrf + i * queryLenQuad;
for (j = 0; j < qlen; j += 4) {
p->x = matrix[i][query[j]];
p->y = matrix[i][query[j + 1]];
p->w = matrix[i][query[j + 2]];
p->z = matrix[i][query[j + 3]];
p++;
}
}
//set attribute of query profile
CUDA_CHECK_RETURN(cudaMemcpy2DToArray(cu_array, 0, 0, hostQueryPrf, queryLenQuad * sizeof(int4), queryLenQuad * sizeof(int4), 32, cudaMemcpyHostToDevice));
bindQueryPrf(cu_array);
queryProfileFlag = true;
CUDA_CHECK_RETURN(cudaFreeHost(hostQueryPrf));
}
void CUDAWorker::packDB(){
//printf("CUDA packing\n");
Entry* pEntry = entry + !bufFlag;
fillBatchNum = 0;
#if 0
char* bufWorkp = dbSeqBuf;
int* pMap = fillMap[!bufFlag];
char* dstWorkp = buf[!bufFlag];
#else
char* bufWorkp = dbSeqBuf;
char* dstWorkp = pEntry->getBuf();
int* pMap = pEntry->getMap();
#endif
batchInfo* infoData = info;
int curPos = 0;
/*copy out the buffer and transfer symbol to code*/
this->TotalAminoAcidResidue += filledSize;
//Vectorization
for (int idx = 0; idx < infoSize; ++idx) {
int num = infoData[idx].numSeqs;
int len = infoData[idx].seqLen;
/*limited length for intel MIC and 32-bit version CUDA kernel*/
if(len >= 3072 * 2){
for(int i = 0; i != num; ++i){
recalcWorker->pushOverflowSeq((char*) bufWorkp, len, fillBaseIndex + fillBatchNum * 16 + i);
bufWorkp += len;
}
continue;
}
int pass = num / batchSize;
for (int i = 0; i < pass; ++i) {
for (int j = 0; j < len; j += 4) {
/*inner loop: every sequences are interleaved*/
for(int k = 0; k < 32; ++k){
*(dstWorkp++) = bufWorkp[j + k * len + 0];
*(dstWorkp++) = bufWorkp[j + k * len + 1];
*(dstWorkp++) = bufWorkp[j + k * len + 2];
*(dstWorkp++) = bufWorkp[j + k * len + 3];
//dstWorkp += packSize;
}
}
*(pMap++) = curPos;
*(pMap++) = len;
++fillBatchNum;
curPos += batchSize * len / 4;
bufWorkp += batchSize * len;
}
}
pEntry->setBatchNum(fillBatchNum);
pEntry->setBaseIdx(fillBaseIndex);
pEntry->setResultSize(fillBatchNum * batchSize);
}
void CUDAWorker::asyncCopy(){
unsigned* dbSeqSrc = dbSeqPacked[bufFlag];
int batchNum = compEntry->getBatchNum();
CUDA_CHECK_RETURN(
cudaMemcpyAsync(
deviceBuffer[bufFlag],
dbSeqSrc,
bufferSize,
cudaMemcpyHostToDevice,
streams[bufFlag]);
);
CUDA_CHECK_RETURN(
cudaMemcpyAsync(
deviceMap[bufFlag],
fillMap[bufFlag],
sizeof(unsigned) * batchNum * 2,
cudaMemcpyHostToDevice,
streams[bufFlag])
);
}
void CUDAWorker::search(){
uint32_t batchNum = compEntry->getBatchNum();
kernelLaunch(
numBlocks,
numThreads,
streams[bufFlag],
deviceBuffer[bufFlag],
deviceMap[bufFlag],
batchNum,
queryLen,
devResult,
globalArray,
globalPitch
);
CUDA_CHECK_RETURN(
cudaMemcpy(
compEntry->getScores(),
devResult,
sizeof(int) * batchNum * 32,
cudaMemcpyDeviceToHost
)
);
}
|
0123e98660d18c0c21d2e65521eabff7366d367d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "set_coords_2D.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *coords = NULL;
hipMalloc(&coords, XSIZE*YSIZE);
size_t y = 1;
size_t x = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
set_coords_2D), dim3(gridBlock),dim3(threadBlock), 0, 0, coords,y,x);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
set_coords_2D), dim3(gridBlock),dim3(threadBlock), 0, 0, coords,y,x);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
set_coords_2D), dim3(gridBlock),dim3(threadBlock), 0, 0, coords,y,x);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 0123e98660d18c0c21d2e65521eabff7366d367d.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "set_coords_2D.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *coords = NULL;
cudaMalloc(&coords, XSIZE*YSIZE);
size_t y = 1;
size_t x = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
set_coords_2D<<<gridBlock,threadBlock>>>(coords,y,x);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
set_coords_2D<<<gridBlock,threadBlock>>>(coords,y,x);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
set_coords_2D<<<gridBlock,threadBlock>>>(coords,y,x);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
99dac9f05a474c7b2e2a7729ed579ae8c06ab069.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* created by chenhao li
*/
#include "./binary-scale.h"
#include <vector>
#include <cstdlib>
#include <cmath>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
namespace mshadow {
namespace cuda {
template <typename DType>
__global__ void BinaryScaleForwardKernel(DType *data, DType *wValue, DType *out, int Y, int data_stride, int out_stride, int N) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= N) return;
int idx1 = i / Y * data_stride + i % Y;
int idx2 = i / Y * out_stride + i % Y;
*(out + idx2) = (*(data + idx1)) * (*wValue);
}
template<typename DType>
void BinaryScaleForward(Tensor<gpu, 2, DType> &data, Tensor<gpu, 2, DType> &wValue, Tensor<gpu, 2, DType> &out) {
DType *data_ptr = data.dptr_;
DType *wValue_ptr = wValue.dptr_;
DType *out_ptr = out.dptr_;
hipStream_t stream = Stream<gpu>::GetStream(out.stream_);
int size = data.shape_[0] * data.shape_[1];
dim3 numBlocks((size + kMaxThreadsPerBlock - 1)/kMaxThreadsPerBlock);
dim3 threadsPerBlock(kMaxThreadsPerBlock);
hipLaunchKernelGGL(( BinaryScaleForwardKernel<DType>), dim3(numBlocks), dim3(threadsPerBlock), 0, stream, data_ptr, wValue_ptr, out_ptr,
data.shape_[1], data.stride_, out.stride_, size);
}
template <typename DType>
__global__ void BinaryScaleBackwardKernel(DType *data, DType *grad, DType *wValue, DType *gwValue, DType *gdata,
int Y, int data_stride, int grad_stride, int gdata_stride, int N) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= N) return;
int idx1 = i / Y * data_stride + i % Y;
int idx2 = i / Y * grad_stride + i % Y;
int idx3 = i / Y * gdata_stride + i % Y;
*gwValue += (*(data + idx1)) * (*(grad + idx2));
*(gdata + idx3) = (*(grad + idx2)) * (*wValue);
}
template<typename DType>
void BinaryScaleBackward(Tensor<gpu, 2, DType> &data, Tensor<gpu, 2, DType> &grad, Tensor<gpu, 2, DType> &wValue,
Tensor<gpu, 2, DType> &gwValue, Tensor<gpu, 2, DType> &gdata) {
DType *grad_ptr = grad.dptr_;
DType *gwValue_ptr = gwValue.dptr_;
DType *data_ptr = data.dptr_;
DType *wValue_ptr = wValue.dptr_;
DType *gdata_ptr = gdata.dptr_;
hipStream_t stream = Stream<gpu>::GetStream(gdata.stream_);
int size = grad.shape_[0] * grad.shape_[1];
dim3 numBlocks((size + kMaxThreadsPerBlock - 1)/kMaxThreadsPerBlock);
dim3 threadsPerBlock(kMaxThreadsPerBlock);
hipLaunchKernelGGL(( BinaryScaleBackwardKernel<DType>), dim3(numBlocks), dim3(threadsPerBlock), 0, stream, data_ptr, grad_ptr, wValue_ptr, gwValue_ptr, gdata_ptr,
data.shape_[1], data.stride_, grad.stride_, gdata.stride_, size);
}
}//cuda
template<typename DType>
void BinaryScaleForward(Tensor<gpu, 2, DType> &data, Tensor<gpu, 2, DType> &wValue, Tensor<gpu, 2, DType> &out) {
cuda::BinaryScaleForward(data, wValue, out);
}
template<typename DType>
void BinaryScaleBackward(Tensor<gpu, 2, DType> &data, Tensor<gpu, 2, DType> &grad, Tensor<gpu, 2, DType> &wValue,
Tensor<gpu, 2, DType> &gwValue, Tensor<gpu, 2, DType> &gdata) {
cuda::BinaryScaleBackward(data, grad, wValue, gwValue, gdata);
}
}
namespace mxnet {
namespace op {
template<>
Operator* CreateOp<gpu>(BinaryScaleParam param, int dtype) {
Operator *op = NULL;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new BinaryScaleOp<gpu, DType>(param);
})
return op;
}
} // namespace op
} // namespace mxnet
| 99dac9f05a474c7b2e2a7729ed579ae8c06ab069.cu | /*!
* created by chenhao li
*/
#include "./binary-scale.h"
#include <vector>
#include <cstdlib>
#include <cmath>
#include <curand.h>
#include <curand_kernel.h>
namespace mshadow {
namespace cuda {
template <typename DType>
__global__ void BinaryScaleForwardKernel(DType *data, DType *wValue, DType *out, int Y, int data_stride, int out_stride, int N) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= N) return;
int idx1 = i / Y * data_stride + i % Y;
int idx2 = i / Y * out_stride + i % Y;
*(out + idx2) = (*(data + idx1)) * (*wValue);
}
template<typename DType>
void BinaryScaleForward(Tensor<gpu, 2, DType> &data, Tensor<gpu, 2, DType> &wValue, Tensor<gpu, 2, DType> &out) {
DType *data_ptr = data.dptr_;
DType *wValue_ptr = wValue.dptr_;
DType *out_ptr = out.dptr_;
cudaStream_t stream = Stream<gpu>::GetStream(out.stream_);
int size = data.shape_[0] * data.shape_[1];
dim3 numBlocks((size + kMaxThreadsPerBlock - 1)/kMaxThreadsPerBlock);
dim3 threadsPerBlock(kMaxThreadsPerBlock);
BinaryScaleForwardKernel<DType><<<numBlocks, threadsPerBlock, 0, stream>>>(data_ptr, wValue_ptr, out_ptr,
data.shape_[1], data.stride_, out.stride_, size);
}
template <typename DType>
__global__ void BinaryScaleBackwardKernel(DType *data, DType *grad, DType *wValue, DType *gwValue, DType *gdata,
int Y, int data_stride, int grad_stride, int gdata_stride, int N) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= N) return;
int idx1 = i / Y * data_stride + i % Y;
int idx2 = i / Y * grad_stride + i % Y;
int idx3 = i / Y * gdata_stride + i % Y;
*gwValue += (*(data + idx1)) * (*(grad + idx2));
*(gdata + idx3) = (*(grad + idx2)) * (*wValue);
}
template<typename DType>
void BinaryScaleBackward(Tensor<gpu, 2, DType> &data, Tensor<gpu, 2, DType> &grad, Tensor<gpu, 2, DType> &wValue,
Tensor<gpu, 2, DType> &gwValue, Tensor<gpu, 2, DType> &gdata) {
DType *grad_ptr = grad.dptr_;
DType *gwValue_ptr = gwValue.dptr_;
DType *data_ptr = data.dptr_;
DType *wValue_ptr = wValue.dptr_;
DType *gdata_ptr = gdata.dptr_;
cudaStream_t stream = Stream<gpu>::GetStream(gdata.stream_);
int size = grad.shape_[0] * grad.shape_[1];
dim3 numBlocks((size + kMaxThreadsPerBlock - 1)/kMaxThreadsPerBlock);
dim3 threadsPerBlock(kMaxThreadsPerBlock);
BinaryScaleBackwardKernel<DType><<<numBlocks, threadsPerBlock, 0, stream>>>(data_ptr, grad_ptr, wValue_ptr, gwValue_ptr, gdata_ptr,
data.shape_[1], data.stride_, grad.stride_, gdata.stride_, size);
}
}//cuda
template<typename DType>
void BinaryScaleForward(Tensor<gpu, 2, DType> &data, Tensor<gpu, 2, DType> &wValue, Tensor<gpu, 2, DType> &out) {
cuda::BinaryScaleForward(data, wValue, out);
}
template<typename DType>
void BinaryScaleBackward(Tensor<gpu, 2, DType> &data, Tensor<gpu, 2, DType> &grad, Tensor<gpu, 2, DType> &wValue,
Tensor<gpu, 2, DType> &gwValue, Tensor<gpu, 2, DType> &gdata) {
cuda::BinaryScaleBackward(data, grad, wValue, gwValue, gdata);
}
}
namespace mxnet {
namespace op {
template<>
Operator* CreateOp<gpu>(BinaryScaleParam param, int dtype) {
Operator *op = NULL;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new BinaryScaleOp<gpu, DType>(param);
})
return op;
}
} // namespace op
} // namespace mxnet
|
0b6a858475972ad16073294a8f4c85f295071450.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <iostream>
#include <hip/hip_runtime_api.h>
//#include <cutil.h>
#include <hip/hip_runtime.h>
#define SHARED_MEM_ELEMENTS 1024
#define GLOBAL_MEM_ELEMENTS 4096
int num_blocks;
int num_threads_per_block;
int num_iterations;
int divergence;
float* h_A;
float* h_B;
float* h_C;
float* h_res;
float* d_A;
float* d_B;
float* d_C;
float* d_res;
__global__ void init_memory (unsigned long long ** my_ptr_array, unsigned long long * my_array, int stride, int num_blocks_k, int num_threads_per_block_k) {
int block_id;
int warp_id;
int i;
int index;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
void **ptr_array = (void **)my_ptr_array;
unsigned long long *array = (unsigned long long *)my_array;
if (tid == 0) {
// int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
//int elements_per_warp = elements_per_block / num_warps_per_block;
int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
// for (block_id = 0; block_id < num_blocks_k; block_id++) {
for (warp_id = 0; warp_id < num_warps_per_block; warp_id++) {
for (i = 0; i < elements_per_warp; i++) {
//index = (block_id * elements_per_block) + (warp_id * elements_per_warp);
index = (warp_id * elements_per_warp);
ptr_array[index + i] = (void*)&array[(index + ((i + 16) % elements_per_warp))];
}
}
/* for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
ptr_array[i] = (void*)&array[(i + 32)%GLOBAL_MEM_ELEMENTS];
}
*/
for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
//array[i] = (unsigned long long)ptr_array[(i+stride)%GLOBAL_MEM_ELEMENTS];
array[i] = (unsigned long long)ptr_array[i];
}
}
__syncthreads();
}
__global__ void shared_latency (unsigned long long ** my_ptr_array, unsigned long long * my_array, int array_length, int iterations, unsigned long long * duration, int stride, int divergence, int num_blocks_k, int num_threads_per_block_k) {
// unsigned long long int start_time, end_time;
unsigned long long int sum_time = 0;
int i, k;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int block_id = blockIdx.x;
int warp_id = threadIdx.x / 32;
int warp_thread_id = threadIdx.x % 32;
// int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
// int elements_per_warp = elements_per_block / num_warps_per_block;
int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
//int index1 = (block_id * elements_per_block) + (warp_id * elements_per_warp) + warp_thread_id;
int index1 = (warp_id * elements_per_warp) + warp_thread_id;
void **ptr_array = (void **)my_ptr_array;
unsigned long long int *array = (unsigned long long int *)my_array;
void **tmp_ptr;
//tmp_ptr = (void *)sdata;
//tmp_ptr = (void **)(&(ptr_array[(threadIdx.x * stride)%GLOBAL_MEM_ELEMENTS]));
//tmp_ptr = (void **)(&(ptr_array[(tid * stride)%GLOBAL_MEM_ELEMENTS]));
tmp_ptr = (void **)(&(ptr_array[index1]));
double f1, f2, f3;
f1 = 1.1;
f2 = 2.5;
if (warp_thread_id < divergence) {
/* __asm volatile (
".reg .f32 %r14;\n\t"
"mov.f32 %r14, 2.2;\n\t"
);
*/
for (int l = 0; l < iterations; l++) {
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
}
}
// __syncthreads();
// if ((blockDim.x * blockIdx.x + threadIdx.x) == 0)
duration[tid] = (unsigned long long)(*tmp_ptr) + (f1 * tid);
// __syncthreads();
}
void usage() {
std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl;
}
void parametric_measure_shared(int N, int iterations, int stride) {
hipProfilerStop();
int i;
unsigned long long int * h_a;
unsigned long long int * d_a;
unsigned long long ** h_ptr_a;
unsigned long long ** d_ptr_a;
unsigned long long * duration;
unsigned long long * latency;
hipError_t error_id;
/* allocate array on CPU */
h_a = (unsigned long long *)malloc(sizeof(unsigned long long int) * N);
h_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*)*N);
latency = (unsigned long long *)malloc(sizeof(unsigned long long) * num_threads_per_block * num_blocks);
/* initialize array elements on CPU */
for (i = 0; i < N; i++) {
h_ptr_a[i] = (unsigned long long *)&h_a[i];
}
for (i = 0; i < N; i++) {
h_a[i] = (unsigned long long)h_ptr_a[(i + 1 + stride) % N];
}
/* allocate arrays on GPU */
hipMalloc ((void **) &d_a, sizeof(unsigned long long int) * N );
hipMalloc ((void **) &d_ptr_a, sizeof(unsigned long long int*) * N );
hipMalloc ((void **) &duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks);
hipDeviceSynchronize ();
error_id = hipGetLastError();
if (error_id != hipSuccess) {
printf("Error 1 is %s\n", hipGetErrorString(error_id));
}
/* copy array elements from CPU to GPU */
hipMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long long int) * N, hipMemcpyHostToDevice);
hipMemcpy((void *)d_ptr_a, (void *)h_ptr_a, sizeof(unsigned long long int *) * N, hipMemcpyHostToDevice);
hipMemcpy((void *)duration, (void *)latency, sizeof(unsigned long long) * num_threads_per_block * num_blocks, hipMemcpyHostToDevice);
hipDeviceSynchronize ();
error_id = hipGetLastError();
if (error_id != hipSuccess) {
printf("Error 2 is %s\n", hipGetErrorString(error_id));
}
hipLaunchKernelGGL(( init_memory) , dim3(1), dim3(1), 0, 0, d_ptr_a, d_a, stride, num_blocks, num_threads_per_block);
hipDeviceSynchronize();
/* launch kernel*/
//dim3 Db = dim3(13);
//dim3 Dg = dim3(768,1,1);
//printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride);
// int sharedMemSize = sizeof(unsigned long long int) * N ;
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
hipProfilerStart();
hipFuncSetCacheConfig(shared_latency, hipFuncCachePreferL1);
//shared_latency <<<Dg, Db, sharedMemSize>>>(d_a, N, iterations, duration);
//shared_latency <<<num_blocks, num_threads_per_block, sharedMemSize>>>(d_a, N, num_iterations, duration, stride, divergence);
hipLaunchKernelGGL(( shared_latency) , dim3(num_blocks), dim3(num_threads_per_block), 0, 0, d_ptr_a, d_a, N, num_iterations, duration, stride, divergence, num_blocks, num_threads_per_block);
hipDeviceSynchronize();
///hipDeviceSynchronize ();
hipProfilerStop();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
error_id = hipGetLastError();
if (error_id != hipSuccess) {
printf("Error 3 is %s\n", hipGetErrorString(error_id));
}
/* copy results from GPU to CPU */
hipMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long long int) * N, hipMemcpyDeviceToHost);
hipMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks, hipMemcpyDeviceToHost);
hipDeviceSynchronize ();
/* print results*/
unsigned long long max_dur = latency[0];
unsigned long long min_dur = latency[0];
unsigned long long avg_lat = latency[0];
for (int i = 1; i < num_threads_per_block * num_blocks; i++) {
avg_lat += latency[i];
if (latency[i] > max_dur) {
max_dur = latency[i];
} else if (latency[i] < min_dur) {
min_dur = latency[i];
}
}
// printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * num_blocks * 256.0 *num_iterations)), (double)(min_dur/(256.0 * num_iterations)), (double)(max_dur/(256.0 * num_iterations)), time);
printf("%f\n", time);
/* free memory on GPU */
hipFree(d_a);
hipFree(d_ptr_a);
hipFree(duration);
hipDeviceSynchronize ();
/*free memory on CPU */
free(h_a);
free(h_ptr_a);
free(latency);
}
int main(int argc, char **argv)
{
int N;
if (argc != 6) {
usage();
exit(1);
}
num_blocks = atoi(argv[1]);
num_threads_per_block = atoi(argv[2]);
num_iterations = atoi(argv[3]);
divergence = atoi(argv[4]);
int stride = atoi(argv[5]);
N = GLOBAL_MEM_ELEMENTS;
parametric_measure_shared(N, 10, stride);
return 0;
}
| 0b6a858475972ad16073294a8f4c85f295071450.cu | #include <stdio.h>
#include <iostream>
#include <cuda_profiler_api.h>
//#include <cutil.h>
#include <cuda_runtime.h>
#define SHARED_MEM_ELEMENTS 1024
#define GLOBAL_MEM_ELEMENTS 4096
int num_blocks;
int num_threads_per_block;
int num_iterations;
int divergence;
float* h_A;
float* h_B;
float* h_C;
float* h_res;
float* d_A;
float* d_B;
float* d_C;
float* d_res;
__global__ void init_memory (unsigned long long ** my_ptr_array, unsigned long long * my_array, int stride, int num_blocks_k, int num_threads_per_block_k) {
int block_id;
int warp_id;
int i;
int index;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
void **ptr_array = (void **)my_ptr_array;
unsigned long long *array = (unsigned long long *)my_array;
if (tid == 0) {
// int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
//int elements_per_warp = elements_per_block / num_warps_per_block;
int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
// for (block_id = 0; block_id < num_blocks_k; block_id++) {
for (warp_id = 0; warp_id < num_warps_per_block; warp_id++) {
for (i = 0; i < elements_per_warp; i++) {
//index = (block_id * elements_per_block) + (warp_id * elements_per_warp);
index = (warp_id * elements_per_warp);
ptr_array[index + i] = (void*)&array[(index + ((i + 16) % elements_per_warp))];
}
}
/* for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
ptr_array[i] = (void*)&array[(i + 32)%GLOBAL_MEM_ELEMENTS];
}
*/
for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
//array[i] = (unsigned long long)ptr_array[(i+stride)%GLOBAL_MEM_ELEMENTS];
array[i] = (unsigned long long)ptr_array[i];
}
}
__syncthreads();
}
__global__ void shared_latency (unsigned long long ** my_ptr_array, unsigned long long * my_array, int array_length, int iterations, unsigned long long * duration, int stride, int divergence, int num_blocks_k, int num_threads_per_block_k) {
// unsigned long long int start_time, end_time;
unsigned long long int sum_time = 0;
int i, k;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int block_id = blockIdx.x;
int warp_id = threadIdx.x / 32;
int warp_thread_id = threadIdx.x % 32;
// int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
// int elements_per_warp = elements_per_block / num_warps_per_block;
int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
//int index1 = (block_id * elements_per_block) + (warp_id * elements_per_warp) + warp_thread_id;
int index1 = (warp_id * elements_per_warp) + warp_thread_id;
void **ptr_array = (void **)my_ptr_array;
unsigned long long int *array = (unsigned long long int *)my_array;
void **tmp_ptr;
//tmp_ptr = (void *)sdata;
//tmp_ptr = (void **)(&(ptr_array[(threadIdx.x * stride)%GLOBAL_MEM_ELEMENTS]));
//tmp_ptr = (void **)(&(ptr_array[(tid * stride)%GLOBAL_MEM_ELEMENTS]));
tmp_ptr = (void **)(&(ptr_array[index1]));
double f1, f2, f3;
f1 = 1.1;
f2 = 2.5;
if (warp_thread_id < divergence) {
/* __asm volatile (
".reg .f32 %r14;\n\t"
"mov.f32 %r14, 2.2;\n\t"
);
*/
for (int l = 0; l < iterations; l++) {
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
}
}
// __syncthreads();
// if ((blockDim.x * blockIdx.x + threadIdx.x) == 0)
duration[tid] = (unsigned long long)(*tmp_ptr) + (f1 * tid);
// __syncthreads();
}
void usage() {
std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl;
}
void parametric_measure_shared(int N, int iterations, int stride) {
cudaProfilerStop();
int i;
unsigned long long int * h_a;
unsigned long long int * d_a;
unsigned long long ** h_ptr_a;
unsigned long long ** d_ptr_a;
unsigned long long * duration;
unsigned long long * latency;
cudaError_t error_id;
/* allocate array on CPU */
h_a = (unsigned long long *)malloc(sizeof(unsigned long long int) * N);
h_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*)*N);
latency = (unsigned long long *)malloc(sizeof(unsigned long long) * num_threads_per_block * num_blocks);
/* initialize array elements on CPU */
for (i = 0; i < N; i++) {
h_ptr_a[i] = (unsigned long long *)&h_a[i];
}
for (i = 0; i < N; i++) {
h_a[i] = (unsigned long long)h_ptr_a[(i + 1 + stride) % N];
}
/* allocate arrays on GPU */
cudaMalloc ((void **) &d_a, sizeof(unsigned long long int) * N );
cudaMalloc ((void **) &d_ptr_a, sizeof(unsigned long long int*) * N );
cudaMalloc ((void **) &duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks);
cudaThreadSynchronize ();
error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error 1 is %s\n", cudaGetErrorString(error_id));
}
/* copy array elements from CPU to GPU */
cudaMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long long int) * N, cudaMemcpyHostToDevice);
cudaMemcpy((void *)d_ptr_a, (void *)h_ptr_a, sizeof(unsigned long long int *) * N, cudaMemcpyHostToDevice);
cudaMemcpy((void *)duration, (void *)latency, sizeof(unsigned long long) * num_threads_per_block * num_blocks, cudaMemcpyHostToDevice);
cudaThreadSynchronize ();
error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error 2 is %s\n", cudaGetErrorString(error_id));
}
init_memory <<<1, 1>>>(d_ptr_a, d_a, stride, num_blocks, num_threads_per_block);
cudaDeviceSynchronize();
/* launch kernel*/
//dim3 Db = dim3(13);
//dim3 Dg = dim3(768,1,1);
//printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride);
// int sharedMemSize = sizeof(unsigned long long int) * N ;
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaProfilerStart();
cudaFuncSetCacheConfig(shared_latency, cudaFuncCachePreferL1);
//shared_latency <<<Dg, Db, sharedMemSize>>>(d_a, N, iterations, duration);
//shared_latency <<<num_blocks, num_threads_per_block, sharedMemSize>>>(d_a, N, num_iterations, duration, stride, divergence);
shared_latency <<<num_blocks, num_threads_per_block>>>(d_ptr_a, d_a, N, num_iterations, duration, stride, divergence, num_blocks, num_threads_per_block);
cudaDeviceSynchronize();
///cudaThreadSynchronize ();
cudaProfilerStop();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error 3 is %s\n", cudaGetErrorString(error_id));
}
/* copy results from GPU to CPU */
cudaMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long long int) * N, cudaMemcpyDeviceToHost);
cudaMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks, cudaMemcpyDeviceToHost);
cudaThreadSynchronize ();
/* print results*/
unsigned long long max_dur = latency[0];
unsigned long long min_dur = latency[0];
unsigned long long avg_lat = latency[0];
for (int i = 1; i < num_threads_per_block * num_blocks; i++) {
avg_lat += latency[i];
if (latency[i] > max_dur) {
max_dur = latency[i];
} else if (latency[i] < min_dur) {
min_dur = latency[i];
}
}
// printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * num_blocks * 256.0 *num_iterations)), (double)(min_dur/(256.0 * num_iterations)), (double)(max_dur/(256.0 * num_iterations)), time);
printf("%f\n", time);
/* free memory on GPU */
cudaFree(d_a);
cudaFree(d_ptr_a);
cudaFree(duration);
cudaThreadSynchronize ();
/*free memory on CPU */
free(h_a);
free(h_ptr_a);
free(latency);
}
int main(int argc, char **argv)
{
int N;
if (argc != 6) {
usage();
exit(1);
}
num_blocks = atoi(argv[1]);
num_threads_per_block = atoi(argv[2]);
num_iterations = atoi(argv[3]);
divergence = atoi(argv[4]);
int stride = atoi(argv[5]);
N = GLOBAL_MEM_ELEMENTS;
parametric_measure_shared(N, 10, stride);
return 0;
}
|
95d8401932fbf06186ebe86a4296ddfb3f75395c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/kernel/util/cuda_blas_interface.h"
#include "oneflow/core/device/cuda_util.h"
#include "oneflow/core/register/blob.h"
#include "oneflow/core/kernel/util/cuda_half_util.h"
namespace oneflow {
namespace {
hipblasOperation_t CblasTrans2CublasTrans(CBLAS_TRANSPOSE trans) {
hipblasOperation_t cublas_trans;
if (trans == CBLAS_TRANSPOSE::CblasNoTrans) {
cublas_trans = hipblasOperation_t::HIPBLAS_OP_N;
} else if (trans == CBLAS_TRANSPOSE::CblasTrans) {
cublas_trans = hipblasOperation_t::HIPBLAS_OP_T;
} else if (trans == CBLAS_TRANSPOSE::CblasConjTrans) {
cublas_trans = hipblasOperation_t::HIPBLAS_OP_C;
} else {
// do nothing
}
return cublas_trans;
}
std::tuple<int, int, int, hipblasOperation_t, hipblasOperation_t> PrepareToCallCublasGemm(
enum CBLAS_TRANSPOSE trans_a, enum CBLAS_TRANSPOSE trans_b, const int m, const int n,
const int k) {
int lda = (trans_a == CblasNoTrans) ? k : m;
int ldb = (trans_b == CblasNoTrans) ? n : k;
int ldc = n;
hipblasOperation_t cublas_trans_a = CblasTrans2CublasTrans(trans_a);
hipblasOperation_t cublas_trans_b = CblasTrans2CublasTrans(trans_b);
return std::make_tuple(lda, ldb, ldc, cublas_trans_a, cublas_trans_b);
}
template<typename T>
void Gemm(DeviceCtx* ctx, const enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE trans_a,
enum CBLAS_TRANSPOSE trans_b, const int m, const int n, const int k, const T* alpha,
const T* a, const T* b, const T* beta, T* c) {
int lda, ldb, ldc;
hipblasOperation_t cublas_trans_a, cublas_trans_b;
std::tie(lda, ldb, ldc, cublas_trans_a, cublas_trans_b) =
PrepareToCallCublasGemm(trans_a, trans_b, m, n, k);
hipblasHandle_t handle;
if (std::is_same<T, half>::value) {
handle = ctx->cublas_tensor_op_math_handle();
} else {
handle = ctx->cublas_pmh_handle();
}
cublas_gemm<T>(handle, cublas_trans_b, cublas_trans_a, n, m, k, alpha, b, ldb, a, lda, beta, c,
ldc);
}
template<>
void Gemm(DeviceCtx* ctx, const enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE trans_a,
enum CBLAS_TRANSPOSE trans_b, const int m, const int n, const int k, const half* alpha,
const half* a, const half* b, const half* beta, half* c) {
const float alpha_f = __half2float(*alpha);
const float beta_f = __half2float(*beta);
int lda, ldb, ldc;
hipblasOperation_t cublas_trans_a, cublas_trans_b;
std::tie(lda, ldb, ldc, cublas_trans_a, cublas_trans_b) =
PrepareToCallCublasGemm(trans_a, trans_b, m, n, k);
OF_CUBLAS_CHECK(hipblasGemmEx(ctx->cublas_tensor_op_math_handle(), cublas_trans_b, cublas_trans_a,
n, m, k, &alpha_f, b, HIP_R_16F, ldb, a, HIP_R_16F, lda, &beta_f,
c, HIP_R_16F, ldc, HIP_R_32F, CUBLAS_GEMM_DFALT_TENSOR_OP));
}
void HGemmWithFloat(DeviceCtx* ctx, const enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE trans_a,
enum CBLAS_TRANSPOSE trans_b, const int m, const int n, const int k,
const float* alpha, const half* a, const half* b, const float* beta, half* c) {
int lda, ldb, ldc;
hipblasOperation_t cublas_trans_a, cublas_trans_b;
std::tie(lda, ldb, ldc, cublas_trans_a, cublas_trans_b) =
PrepareToCallCublasGemm(trans_a, trans_b, m, n, k);
hipDataType data_type = GetCudaDataType(DataType::kFloat16);
OF_CUBLAS_CHECK(cublasSgemmEx(ctx->cublas_tensor_op_math_handle(), cublas_trans_b, cublas_trans_a,
n, m, k, alpha, b, data_type, ldb, a, data_type, lda, beta, c,
data_type, ldc));
}
std::tuple<int, int, int> CalcMNKForGemm(enum CBLAS_TRANSPOSE trans_a, const Blob* a,
const Blob* c) {
const auto& a_shape = a->shape_view();
const auto& c_shape = c->shape_view();
int m = c_shape.At(0);
int n = c_shape.Count(1);
int k = (trans_a == CblasNoTrans) ? a_shape.Count(1) : a_shape.At(0);
return std::make_tuple(m, n, k);
}
template<typename T>
void BlobGemmImpl(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a, enum CBLAS_TRANSPOSE trans_b,
T alpha, T beta, const Blob* a, const Blob* b, Blob* c) {
int m, n, k;
std::tie(m, n, k) = CalcMNKForGemm(trans_a, a, c);
BlasIf<DeviceType::kGPU>::OFGemm(ctx, trans_a, trans_b, m, n, k, alpha, a->dptr<T>(),
b->dptr<T>(), beta, c->mut_dptr<T>());
}
template<typename T>
__global__ void AssignStridedAddrGpu(T** dev_ptrs, T* start_ptr, int32_t stride_len,
int32_t stride_num) {
CUDA_1D_KERNEL_LOOP(i, stride_num) { dev_ptrs[i] = start_ptr + i * stride_len; }
}
template<typename T>
void AssignStridedAddr(DeviceCtx* ctx, T** dev_ptrs, T* start_ptr, int stride_len, int stride_num) {
hipLaunchKernelGGL(( AssignStridedAddrGpu<T>)
, dim3(BlocksNum4ThreadsNum(stride_num)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
dev_ptrs, start_ptr, stride_len, stride_num);
}
template<typename T>
std::tuple<int, int, int, int, int, int, hipblasOperation_t, hipblasOperation_t, T**, T**, T**>
PrepareToCallBatchedGemm(DeviceCtx* ctx, const enum CBLAS_TRANSPOSE trans_a,
const enum CBLAS_TRANSPOSE trans_b, int batch_size, int m, int n, int k,
const T* a, const T* b, T* c, T** buf) {
const int a_stride = m * k;
const int b_stride = k * n;
const int c_stride = m * n;
const int lda = (trans_a == CblasNoTrans) ? k : m;
const int ldb = (trans_b == CblasNoTrans) ? n : k;
const int ldc = n;
hipblasOperation_t cublas_trans_a = CblasTrans2CublasTrans(trans_a);
hipblasOperation_t cublas_trans_b = CblasTrans2CublasTrans(trans_b);
T** dev_a_ptrs = buf;
T** dev_b_ptrs = buf + batch_size;
T** dev_c_ptrs = buf + 2 * batch_size;
AssignStridedAddr<T>(ctx, dev_a_ptrs, const_cast<T*>(a), a_stride, batch_size);
AssignStridedAddr<T>(ctx, dev_b_ptrs, const_cast<T*>(b), b_stride, batch_size);
AssignStridedAddr<T>(ctx, dev_c_ptrs, c, c_stride, batch_size);
return std::make_tuple(a_stride, b_stride, c_stride, lda, ldb, ldc, cublas_trans_a,
cublas_trans_b, dev_a_ptrs, dev_b_ptrs, dev_c_ptrs);
}
template<typename T>
hipDataType GetCudaDataType4BatchedGemm() {
return CudaDataType<T>::value;
}
template<>
hipDataType GetCudaDataType4BatchedGemm<half>() {
return HIP_R_16F;
}
template<typename T>
void BatchedGemmImpl(DeviceCtx* ctx, const enum CBLAS_ORDER order,
const enum CBLAS_TRANSPOSE trans_a, const enum CBLAS_TRANSPOSE trans_b,
int batch_size, int m, int n, int k, const T* alpha, const T* a, const T* b,
const T* beta, T* c, T** buf) {
int a_stride, b_stride, c_stride;
int lda, ldb, ldc;
hipblasOperation_t cublas_trans_a, cublas_trans_b;
T** dev_a_ptrs;
T** dev_b_ptrs;
T** dev_c_ptrs;
std::tie(a_stride, b_stride, c_stride, lda, ldb, ldc, cublas_trans_a, cublas_trans_b, dev_a_ptrs,
dev_b_ptrs, dev_c_ptrs) =
PrepareToCallBatchedGemm<T>(ctx, trans_a, trans_b, batch_size, m, n, k, a, b, c, buf);
#if TORCH_HIP_VERSION >= 9010
hipDataType data_type = GetCudaDataType4BatchedGemm<T>();
hipblasGemmBatchedEx(ctx->cublas_pmh_handle(), cublas_trans_b, cublas_trans_a, n, m, k,
reinterpret_cast<const void*>(alpha),
reinterpret_cast<const void**>(const_cast<const T**>(dev_b_ptrs)), data_type,
ldb, reinterpret_cast<const void**>(const_cast<const T**>(dev_a_ptrs)),
data_type, lda, reinterpret_cast<const void*>(beta),
reinterpret_cast<void**>(dev_c_ptrs), data_type, ldc, batch_size, data_type,
HIPBLAS_GEMM_DEFAULT);
#else
cublas_gemmBatched<T>(ctx->cublas_pmh_handle(), cublas_trans_b, cublas_trans_a, n, m, k, alpha,
const_cast<const T**>(dev_b_ptrs), ldb, const_cast<const T**>(dev_a_ptrs),
lda, beta, dev_c_ptrs, ldc, batch_size);
#endif
}
#if TORCH_HIP_VERSION >= 9010
template<>
void BatchedGemmImpl(DeviceCtx* ctx, const enum CBLAS_ORDER order,
const enum CBLAS_TRANSPOSE trans_a, const enum CBLAS_TRANSPOSE trans_b,
int batch_size, int m, int n, int k, const half* alpha, const half* a,
const half* b, const half* beta, half* c, half** buf) {
float alpha_f = __half2float(*alpha);
float beta_f = __half2float(*beta);
int a_stride, b_stride, c_stride;
int lda, ldb, ldc;
hipblasOperation_t cublas_trans_a, cublas_trans_b;
half** dev_a_ptrs;
half** dev_b_ptrs;
half** dev_c_ptrs;
std::tie(a_stride, b_stride, c_stride, lda, ldb, ldc, cublas_trans_a, cublas_trans_b, dev_a_ptrs,
dev_b_ptrs, dev_c_ptrs) =
PrepareToCallBatchedGemm<half>(ctx, trans_a, trans_b, batch_size, m, n, k, a, b, c, buf);
OF_CUBLAS_CHECK(hipblasGemmBatchedEx(
ctx->cublas_tensor_op_math_handle(), CblasTrans2CublasTrans(trans_b),
CblasTrans2CublasTrans(trans_a), n, m, k, &alpha_f,
reinterpret_cast<const void**>(const_cast<const half**>(dev_b_ptrs)), HIP_R_16F, ldb,
reinterpret_cast<const void**>(const_cast<const half**>(dev_a_ptrs)), HIP_R_16F, lda,
&beta_f, reinterpret_cast<void**>(dev_c_ptrs), HIP_R_16F, ldc, batch_size, HIP_R_32F,
CUBLAS_GEMM_DFALT_TENSOR_OP));
}
#endif
void BatchedHGemmWithFloatImpl(DeviceCtx* ctx, const enum CBLAS_ORDER order,
const enum CBLAS_TRANSPOSE trans_a,
const enum CBLAS_TRANSPOSE trans_b, int batch_size, int m, int n,
int k, const float* alpha, const half* a, const half* b,
const float* beta, half* c, half** buf) {
int a_stride, b_stride, c_stride;
int lda, ldb, ldc;
hipblasOperation_t cublas_trans_a, cublas_trans_b;
half** dev_a_ptrs;
half** dev_b_ptrs;
half** dev_c_ptrs;
std::tie(a_stride, b_stride, c_stride, lda, ldb, ldc, cublas_trans_a, cublas_trans_b, dev_a_ptrs,
dev_b_ptrs, dev_c_ptrs) =
PrepareToCallBatchedGemm<half>(ctx, trans_a, trans_b, batch_size, m, n, k, a, b, c, buf);
#if TORCH_HIP_VERSION >= 9010
hipblasGemmBatchedEx(
ctx->cublas_pmh_handle(), cublas_trans_b, cublas_trans_a, n, m, k,
reinterpret_cast<const void*>(alpha),
reinterpret_cast<const void**>(const_cast<const half**>(dev_b_ptrs)), HIP_R_16F, ldb,
reinterpret_cast<const void**>(const_cast<const half**>(dev_a_ptrs)), HIP_R_16F, lda,
reinterpret_cast<const void*>(beta), reinterpret_cast<void**>(dev_c_ptrs), HIP_R_16F, ldc,
batch_size, HIP_R_32F, HIPBLAS_GEMM_DEFAULT);
#else
LOG(FATAL) << "BatchedHGemmWithFloatImpl() does not support TORCH_HIP_VERSION below 9010";
#endif
}
__global__ void AxpyHalfGpu(const int n, const half alpha, const half* x, const int incx, half* y,
const int incy) {
#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)
CUDA_1D_KERNEL_LOOP(i, n) { y[i * incy] = __hfma(alpha, x[i * incx], y[i * incy]); }
#else
HALF_CHECK_FAILED;
#endif // __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)
}
} // namespace
void BlasIf<DeviceType::kGPU>::BlobGemm(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a,
enum CBLAS_TRANSPOSE trans_b, float alpha, float beta,
const Blob* a, const Blob* b, Blob* c) {
BlobGemmImpl<float>(ctx, trans_a, trans_b, alpha, beta, a, b, c);
}
void BlasIf<DeviceType::kGPU>::BlobGemm(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a,
enum CBLAS_TRANSPOSE trans_b, double alpha, double beta,
const Blob* a, const Blob* b, Blob* c) {
BlobGemmImpl<double>(ctx, trans_a, trans_b, alpha, beta, a, b, c);
}
void BlasIf<DeviceType::kGPU>::BlobGemm(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a,
enum CBLAS_TRANSPOSE trans_b, float16 alpha, float16 beta,
const Blob* a, const Blob* b, Blob* c) {
BlobGemmImpl<float16>(ctx, trans_a, trans_b, alpha, beta, a, b, c);
}
void BlasIf<DeviceType::kGPU>::BlobHGemmWithFloat(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a,
enum CBLAS_TRANSPOSE trans_b, float alpha,
float beta, const Blob* a, const Blob* b,
Blob* c) {
int m, n, k;
std::tie(m, n, k) = CalcMNKForGemm(trans_a, a, c);
BlasIf<DeviceType::kGPU>::OFHGemmWithFloat(ctx, trans_a, trans_b, m, n, k, alpha,
a->dptr<float16>(), b->dptr<float16>(), beta,
c->mut_dptr<float16>());
}
void BlasIf<DeviceType::kGPU>::OFGemm(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a,
enum CBLAS_TRANSPOSE trans_b, const int m, const int n,
const int k, const float alpha, const float* a,
const float* b, const float beta, float* c) {
Gemm<float>(ctx, CblasRowMajor, trans_a, trans_b, m, n, k, &alpha, a, b, &beta, c);
}
void BlasIf<DeviceType::kGPU>::OFGemm(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a,
enum CBLAS_TRANSPOSE trans_b, const int m, const int n,
const int k, const double alpha, const double* a,
const double* b, const double beta, double* c) {
Gemm<double>(ctx, CblasRowMajor, trans_a, trans_b, m, n, k, &alpha, a, b, &beta, c);
}
void BlasIf<DeviceType::kGPU>::OFGemm(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a,
enum CBLAS_TRANSPOSE trans_b, const int m, const int n,
const int k, const float16 alpha, const float16* a,
const float16* b, const float16 beta, float16* c) {
Gemm<half>(ctx, CblasRowMajor, trans_a, trans_b, m, n, k, reinterpret_cast<const half*>(&alpha),
reinterpret_cast<const half*>(a), reinterpret_cast<const half*>(b),
reinterpret_cast<const half*>(&beta), reinterpret_cast<half*>(c));
}
void BlasIf<DeviceType::kGPU>::OFHGemmWithFloat(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a,
enum CBLAS_TRANSPOSE trans_b, const int m,
const int n, const int k, const float alpha,
const float16* a, const float16* b,
const float beta, float16* c) {
HGemmWithFloat(ctx, CblasRowMajor, trans_a, trans_b, m, n, k, &alpha,
reinterpret_cast<const half*>(a), reinterpret_cast<const half*>(b), &beta,
reinterpret_cast<half*>(c));
}
void BlasIf<DeviceType::kGPU>::OFBatchedGemm(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a,
enum CBLAS_TRANSPOSE trans_b, const int batch_size,
const int m, const int n, const int k,
const float alpha, const float* a, const float* b,
const float beta, float* c, float** buf) {
BatchedGemmImpl<float>(ctx, CblasRowMajor, trans_a, trans_b, batch_size, m, n, k, &alpha, a, b,
&beta, c, buf);
}
void BlasIf<DeviceType::kGPU>::OFBatchedGemm(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a,
enum CBLAS_TRANSPOSE trans_b, const int batch_size,
const int m, const int n, const int k,
const double alpha, const double* a, const double* b,
const double beta, double* c, double** buf) {
BatchedGemmImpl<double>(ctx, CblasRowMajor, trans_a, trans_b, batch_size, m, n, k, &alpha, a, b,
&beta, c, buf);
}
void BlasIf<DeviceType::kGPU>::OFBatchedGemm(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a,
enum CBLAS_TRANSPOSE trans_b, const int batch_size,
const int m, const int n, const int k,
const float16 alpha, const float16* a,
const float16* b, const float16 beta, float16* c,
float16** buf) {
BatchedGemmImpl<half>(ctx, CblasRowMajor, trans_a, trans_b, batch_size, m, n, k,
reinterpret_cast<const half*>(&alpha), reinterpret_cast<const half*>(a),
reinterpret_cast<const half*>(b), reinterpret_cast<const half*>(&beta),
reinterpret_cast<half*>(c), reinterpret_cast<half**>(buf));
}
void BlasIf<DeviceType::kGPU>::OFBatchedHGemmWithFloat(
DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a, enum CBLAS_TRANSPOSE trans_b,
const int batch_size, const int m, const int n, const int k, const float alpha,
const float16* a, const float16* b, const float beta, float16* c, float16** buf) {
BatchedHGemmWithFloatImpl(ctx, CblasRowMajor, trans_a, trans_b, batch_size, m, n, k, &alpha,
reinterpret_cast<const half*>(a), reinterpret_cast<const half*>(b),
&beta, reinterpret_cast<half*>(c), reinterpret_cast<half**>(buf));
}
void BlasIf<DeviceType::kGPU>::Axpy(DeviceCtx* ctx, const int n, const float alpha, const float* x,
const int incx, float* y, const int incy) {
cublas_axpy<float>(ctx->cublas_pmh_handle(), n, &alpha, x, incx, y, incy);
}
void BlasIf<DeviceType::kGPU>::Axpy(DeviceCtx* ctx, const int n, const double alpha,
const double* x, const int incx, double* y, const int incy) {
cublas_axpy<double>(ctx->cublas_pmh_handle(), n, &alpha, x, incx, y, incy);
}
void BlasIf<DeviceType::kGPU>::Axpy(DeviceCtx* ctx, const int n, const float16 alpha,
const float16* x, const int incx, float16* y, const int incy) {
hipLaunchKernelGGL(( AxpyHalfGpu), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
n, float16_2half(alpha), reinterpret_cast<const half*>(x), incx, reinterpret_cast<half*>(y),
incy);
}
} // namespace oneflow
| 95d8401932fbf06186ebe86a4296ddfb3f75395c.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/kernel/util/cuda_blas_interface.h"
#include "oneflow/core/device/cuda_util.h"
#include "oneflow/core/register/blob.h"
#include "oneflow/core/kernel/util/cuda_half_util.h"
namespace oneflow {
namespace {
cublasOperation_t CblasTrans2CublasTrans(CBLAS_TRANSPOSE trans) {
cublasOperation_t cublas_trans;
if (trans == CBLAS_TRANSPOSE::CblasNoTrans) {
cublas_trans = cublasOperation_t::CUBLAS_OP_N;
} else if (trans == CBLAS_TRANSPOSE::CblasTrans) {
cublas_trans = cublasOperation_t::CUBLAS_OP_T;
} else if (trans == CBLAS_TRANSPOSE::CblasConjTrans) {
cublas_trans = cublasOperation_t::CUBLAS_OP_C;
} else {
// do nothing
}
return cublas_trans;
}
std::tuple<int, int, int, cublasOperation_t, cublasOperation_t> PrepareToCallCublasGemm(
enum CBLAS_TRANSPOSE trans_a, enum CBLAS_TRANSPOSE trans_b, const int m, const int n,
const int k) {
int lda = (trans_a == CblasNoTrans) ? k : m;
int ldb = (trans_b == CblasNoTrans) ? n : k;
int ldc = n;
cublasOperation_t cublas_trans_a = CblasTrans2CublasTrans(trans_a);
cublasOperation_t cublas_trans_b = CblasTrans2CublasTrans(trans_b);
return std::make_tuple(lda, ldb, ldc, cublas_trans_a, cublas_trans_b);
}
template<typename T>
void Gemm(DeviceCtx* ctx, const enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE trans_a,
enum CBLAS_TRANSPOSE trans_b, const int m, const int n, const int k, const T* alpha,
const T* a, const T* b, const T* beta, T* c) {
int lda, ldb, ldc;
cublasOperation_t cublas_trans_a, cublas_trans_b;
std::tie(lda, ldb, ldc, cublas_trans_a, cublas_trans_b) =
PrepareToCallCublasGemm(trans_a, trans_b, m, n, k);
cublasHandle_t handle;
if (std::is_same<T, half>::value) {
handle = ctx->cublas_tensor_op_math_handle();
} else {
handle = ctx->cublas_pmh_handle();
}
cublas_gemm<T>(handle, cublas_trans_b, cublas_trans_a, n, m, k, alpha, b, ldb, a, lda, beta, c,
ldc);
}
template<>
void Gemm(DeviceCtx* ctx, const enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE trans_a,
enum CBLAS_TRANSPOSE trans_b, const int m, const int n, const int k, const half* alpha,
const half* a, const half* b, const half* beta, half* c) {
const float alpha_f = __half2float(*alpha);
const float beta_f = __half2float(*beta);
int lda, ldb, ldc;
cublasOperation_t cublas_trans_a, cublas_trans_b;
std::tie(lda, ldb, ldc, cublas_trans_a, cublas_trans_b) =
PrepareToCallCublasGemm(trans_a, trans_b, m, n, k);
OF_CUBLAS_CHECK(cublasGemmEx(ctx->cublas_tensor_op_math_handle(), cublas_trans_b, cublas_trans_a,
n, m, k, &alpha_f, b, CUDA_R_16F, ldb, a, CUDA_R_16F, lda, &beta_f,
c, CUDA_R_16F, ldc, CUDA_R_32F, CUBLAS_GEMM_DFALT_TENSOR_OP));
}
void HGemmWithFloat(DeviceCtx* ctx, const enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE trans_a,
enum CBLAS_TRANSPOSE trans_b, const int m, const int n, const int k,
const float* alpha, const half* a, const half* b, const float* beta, half* c) {
int lda, ldb, ldc;
cublasOperation_t cublas_trans_a, cublas_trans_b;
std::tie(lda, ldb, ldc, cublas_trans_a, cublas_trans_b) =
PrepareToCallCublasGemm(trans_a, trans_b, m, n, k);
cudaDataType_t data_type = GetCudaDataType(DataType::kFloat16);
OF_CUBLAS_CHECK(cublasSgemmEx(ctx->cublas_tensor_op_math_handle(), cublas_trans_b, cublas_trans_a,
n, m, k, alpha, b, data_type, ldb, a, data_type, lda, beta, c,
data_type, ldc));
}
std::tuple<int, int, int> CalcMNKForGemm(enum CBLAS_TRANSPOSE trans_a, const Blob* a,
const Blob* c) {
const auto& a_shape = a->shape_view();
const auto& c_shape = c->shape_view();
int m = c_shape.At(0);
int n = c_shape.Count(1);
int k = (trans_a == CblasNoTrans) ? a_shape.Count(1) : a_shape.At(0);
return std::make_tuple(m, n, k);
}
template<typename T>
void BlobGemmImpl(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a, enum CBLAS_TRANSPOSE trans_b,
T alpha, T beta, const Blob* a, const Blob* b, Blob* c) {
int m, n, k;
std::tie(m, n, k) = CalcMNKForGemm(trans_a, a, c);
BlasIf<DeviceType::kGPU>::OFGemm(ctx, trans_a, trans_b, m, n, k, alpha, a->dptr<T>(),
b->dptr<T>(), beta, c->mut_dptr<T>());
}
template<typename T>
__global__ void AssignStridedAddrGpu(T** dev_ptrs, T* start_ptr, int32_t stride_len,
int32_t stride_num) {
CUDA_1D_KERNEL_LOOP(i, stride_num) { dev_ptrs[i] = start_ptr + i * stride_len; }
}
template<typename T>
void AssignStridedAddr(DeviceCtx* ctx, T** dev_ptrs, T* start_ptr, int stride_len, int stride_num) {
AssignStridedAddrGpu<T>
<<<BlocksNum4ThreadsNum(stride_num), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
dev_ptrs, start_ptr, stride_len, stride_num);
}
template<typename T>
std::tuple<int, int, int, int, int, int, cublasOperation_t, cublasOperation_t, T**, T**, T**>
PrepareToCallBatchedGemm(DeviceCtx* ctx, const enum CBLAS_TRANSPOSE trans_a,
const enum CBLAS_TRANSPOSE trans_b, int batch_size, int m, int n, int k,
const T* a, const T* b, T* c, T** buf) {
const int a_stride = m * k;
const int b_stride = k * n;
const int c_stride = m * n;
const int lda = (trans_a == CblasNoTrans) ? k : m;
const int ldb = (trans_b == CblasNoTrans) ? n : k;
const int ldc = n;
cublasOperation_t cublas_trans_a = CblasTrans2CublasTrans(trans_a);
cublasOperation_t cublas_trans_b = CblasTrans2CublasTrans(trans_b);
T** dev_a_ptrs = buf;
T** dev_b_ptrs = buf + batch_size;
T** dev_c_ptrs = buf + 2 * batch_size;
AssignStridedAddr<T>(ctx, dev_a_ptrs, const_cast<T*>(a), a_stride, batch_size);
AssignStridedAddr<T>(ctx, dev_b_ptrs, const_cast<T*>(b), b_stride, batch_size);
AssignStridedAddr<T>(ctx, dev_c_ptrs, c, c_stride, batch_size);
return std::make_tuple(a_stride, b_stride, c_stride, lda, ldb, ldc, cublas_trans_a,
cublas_trans_b, dev_a_ptrs, dev_b_ptrs, dev_c_ptrs);
}
template<typename T>
cudaDataType_t GetCudaDataType4BatchedGemm() {
return CudaDataType<T>::value;
}
template<>
cudaDataType_t GetCudaDataType4BatchedGemm<half>() {
return CUDA_R_16F;
}
template<typename T>
void BatchedGemmImpl(DeviceCtx* ctx, const enum CBLAS_ORDER order,
const enum CBLAS_TRANSPOSE trans_a, const enum CBLAS_TRANSPOSE trans_b,
int batch_size, int m, int n, int k, const T* alpha, const T* a, const T* b,
const T* beta, T* c, T** buf) {
int a_stride, b_stride, c_stride;
int lda, ldb, ldc;
cublasOperation_t cublas_trans_a, cublas_trans_b;
T** dev_a_ptrs;
T** dev_b_ptrs;
T** dev_c_ptrs;
std::tie(a_stride, b_stride, c_stride, lda, ldb, ldc, cublas_trans_a, cublas_trans_b, dev_a_ptrs,
dev_b_ptrs, dev_c_ptrs) =
PrepareToCallBatchedGemm<T>(ctx, trans_a, trans_b, batch_size, m, n, k, a, b, c, buf);
#if CUDA_VERSION >= 9010
cudaDataType_t data_type = GetCudaDataType4BatchedGemm<T>();
cublasGemmBatchedEx(ctx->cublas_pmh_handle(), cublas_trans_b, cublas_trans_a, n, m, k,
reinterpret_cast<const void*>(alpha),
reinterpret_cast<const void**>(const_cast<const T**>(dev_b_ptrs)), data_type,
ldb, reinterpret_cast<const void**>(const_cast<const T**>(dev_a_ptrs)),
data_type, lda, reinterpret_cast<const void*>(beta),
reinterpret_cast<void**>(dev_c_ptrs), data_type, ldc, batch_size, data_type,
CUBLAS_GEMM_DEFAULT);
#else
cublas_gemmBatched<T>(ctx->cublas_pmh_handle(), cublas_trans_b, cublas_trans_a, n, m, k, alpha,
const_cast<const T**>(dev_b_ptrs), ldb, const_cast<const T**>(dev_a_ptrs),
lda, beta, dev_c_ptrs, ldc, batch_size);
#endif
}
#if CUDA_VERSION >= 9010
template<>
void BatchedGemmImpl(DeviceCtx* ctx, const enum CBLAS_ORDER order,
const enum CBLAS_TRANSPOSE trans_a, const enum CBLAS_TRANSPOSE trans_b,
int batch_size, int m, int n, int k, const half* alpha, const half* a,
const half* b, const half* beta, half* c, half** buf) {
float alpha_f = __half2float(*alpha);
float beta_f = __half2float(*beta);
int a_stride, b_stride, c_stride;
int lda, ldb, ldc;
cublasOperation_t cublas_trans_a, cublas_trans_b;
half** dev_a_ptrs;
half** dev_b_ptrs;
half** dev_c_ptrs;
std::tie(a_stride, b_stride, c_stride, lda, ldb, ldc, cublas_trans_a, cublas_trans_b, dev_a_ptrs,
dev_b_ptrs, dev_c_ptrs) =
PrepareToCallBatchedGemm<half>(ctx, trans_a, trans_b, batch_size, m, n, k, a, b, c, buf);
OF_CUBLAS_CHECK(cublasGemmBatchedEx(
ctx->cublas_tensor_op_math_handle(), CblasTrans2CublasTrans(trans_b),
CblasTrans2CublasTrans(trans_a), n, m, k, &alpha_f,
reinterpret_cast<const void**>(const_cast<const half**>(dev_b_ptrs)), CUDA_R_16F, ldb,
reinterpret_cast<const void**>(const_cast<const half**>(dev_a_ptrs)), CUDA_R_16F, lda,
&beta_f, reinterpret_cast<void**>(dev_c_ptrs), CUDA_R_16F, ldc, batch_size, CUDA_R_32F,
CUBLAS_GEMM_DFALT_TENSOR_OP));
}
#endif
void BatchedHGemmWithFloatImpl(DeviceCtx* ctx, const enum CBLAS_ORDER order,
const enum CBLAS_TRANSPOSE trans_a,
const enum CBLAS_TRANSPOSE trans_b, int batch_size, int m, int n,
int k, const float* alpha, const half* a, const half* b,
const float* beta, half* c, half** buf) {
int a_stride, b_stride, c_stride;
int lda, ldb, ldc;
cublasOperation_t cublas_trans_a, cublas_trans_b;
half** dev_a_ptrs;
half** dev_b_ptrs;
half** dev_c_ptrs;
std::tie(a_stride, b_stride, c_stride, lda, ldb, ldc, cublas_trans_a, cublas_trans_b, dev_a_ptrs,
dev_b_ptrs, dev_c_ptrs) =
PrepareToCallBatchedGemm<half>(ctx, trans_a, trans_b, batch_size, m, n, k, a, b, c, buf);
#if CUDA_VERSION >= 9010
cublasGemmBatchedEx(
ctx->cublas_pmh_handle(), cublas_trans_b, cublas_trans_a, n, m, k,
reinterpret_cast<const void*>(alpha),
reinterpret_cast<const void**>(const_cast<const half**>(dev_b_ptrs)), CUDA_R_16F, ldb,
reinterpret_cast<const void**>(const_cast<const half**>(dev_a_ptrs)), CUDA_R_16F, lda,
reinterpret_cast<const void*>(beta), reinterpret_cast<void**>(dev_c_ptrs), CUDA_R_16F, ldc,
batch_size, CUDA_R_32F, CUBLAS_GEMM_DEFAULT);
#else
LOG(FATAL) << "BatchedHGemmWithFloatImpl() does not support CUDA_VERSION below 9010";
#endif
}
__global__ void AxpyHalfGpu(const int n, const half alpha, const half* x, const int incx, half* y,
const int incy) {
#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)
CUDA_1D_KERNEL_LOOP(i, n) { y[i * incy] = __hfma(alpha, x[i * incx], y[i * incy]); }
#else
HALF_CHECK_FAILED;
#endif // __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)
}
} // namespace
void BlasIf<DeviceType::kGPU>::BlobGemm(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a,
enum CBLAS_TRANSPOSE trans_b, float alpha, float beta,
const Blob* a, const Blob* b, Blob* c) {
BlobGemmImpl<float>(ctx, trans_a, trans_b, alpha, beta, a, b, c);
}
void BlasIf<DeviceType::kGPU>::BlobGemm(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a,
enum CBLAS_TRANSPOSE trans_b, double alpha, double beta,
const Blob* a, const Blob* b, Blob* c) {
BlobGemmImpl<double>(ctx, trans_a, trans_b, alpha, beta, a, b, c);
}
void BlasIf<DeviceType::kGPU>::BlobGemm(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a,
enum CBLAS_TRANSPOSE trans_b, float16 alpha, float16 beta,
const Blob* a, const Blob* b, Blob* c) {
BlobGemmImpl<float16>(ctx, trans_a, trans_b, alpha, beta, a, b, c);
}
void BlasIf<DeviceType::kGPU>::BlobHGemmWithFloat(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a,
enum CBLAS_TRANSPOSE trans_b, float alpha,
float beta, const Blob* a, const Blob* b,
Blob* c) {
int m, n, k;
std::tie(m, n, k) = CalcMNKForGemm(trans_a, a, c);
BlasIf<DeviceType::kGPU>::OFHGemmWithFloat(ctx, trans_a, trans_b, m, n, k, alpha,
a->dptr<float16>(), b->dptr<float16>(), beta,
c->mut_dptr<float16>());
}
void BlasIf<DeviceType::kGPU>::OFGemm(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a,
enum CBLAS_TRANSPOSE trans_b, const int m, const int n,
const int k, const float alpha, const float* a,
const float* b, const float beta, float* c) {
Gemm<float>(ctx, CblasRowMajor, trans_a, trans_b, m, n, k, &alpha, a, b, &beta, c);
}
void BlasIf<DeviceType::kGPU>::OFGemm(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a,
enum CBLAS_TRANSPOSE trans_b, const int m, const int n,
const int k, const double alpha, const double* a,
const double* b, const double beta, double* c) {
Gemm<double>(ctx, CblasRowMajor, trans_a, trans_b, m, n, k, &alpha, a, b, &beta, c);
}
void BlasIf<DeviceType::kGPU>::OFGemm(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a,
enum CBLAS_TRANSPOSE trans_b, const int m, const int n,
const int k, const float16 alpha, const float16* a,
const float16* b, const float16 beta, float16* c) {
Gemm<half>(ctx, CblasRowMajor, trans_a, trans_b, m, n, k, reinterpret_cast<const half*>(&alpha),
reinterpret_cast<const half*>(a), reinterpret_cast<const half*>(b),
reinterpret_cast<const half*>(&beta), reinterpret_cast<half*>(c));
}
void BlasIf<DeviceType::kGPU>::OFHGemmWithFloat(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a,
enum CBLAS_TRANSPOSE trans_b, const int m,
const int n, const int k, const float alpha,
const float16* a, const float16* b,
const float beta, float16* c) {
HGemmWithFloat(ctx, CblasRowMajor, trans_a, trans_b, m, n, k, &alpha,
reinterpret_cast<const half*>(a), reinterpret_cast<const half*>(b), &beta,
reinterpret_cast<half*>(c));
}
void BlasIf<DeviceType::kGPU>::OFBatchedGemm(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a,
enum CBLAS_TRANSPOSE trans_b, const int batch_size,
const int m, const int n, const int k,
const float alpha, const float* a, const float* b,
const float beta, float* c, float** buf) {
BatchedGemmImpl<float>(ctx, CblasRowMajor, trans_a, trans_b, batch_size, m, n, k, &alpha, a, b,
&beta, c, buf);
}
void BlasIf<DeviceType::kGPU>::OFBatchedGemm(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a,
enum CBLAS_TRANSPOSE trans_b, const int batch_size,
const int m, const int n, const int k,
const double alpha, const double* a, const double* b,
const double beta, double* c, double** buf) {
BatchedGemmImpl<double>(ctx, CblasRowMajor, trans_a, trans_b, batch_size, m, n, k, &alpha, a, b,
&beta, c, buf);
}
void BlasIf<DeviceType::kGPU>::OFBatchedGemm(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a,
enum CBLAS_TRANSPOSE trans_b, const int batch_size,
const int m, const int n, const int k,
const float16 alpha, const float16* a,
const float16* b, const float16 beta, float16* c,
float16** buf) {
BatchedGemmImpl<half>(ctx, CblasRowMajor, trans_a, trans_b, batch_size, m, n, k,
reinterpret_cast<const half*>(&alpha), reinterpret_cast<const half*>(a),
reinterpret_cast<const half*>(b), reinterpret_cast<const half*>(&beta),
reinterpret_cast<half*>(c), reinterpret_cast<half**>(buf));
}
void BlasIf<DeviceType::kGPU>::OFBatchedHGemmWithFloat(
DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a, enum CBLAS_TRANSPOSE trans_b,
const int batch_size, const int m, const int n, const int k, const float alpha,
const float16* a, const float16* b, const float beta, float16* c, float16** buf) {
BatchedHGemmWithFloatImpl(ctx, CblasRowMajor, trans_a, trans_b, batch_size, m, n, k, &alpha,
reinterpret_cast<const half*>(a), reinterpret_cast<const half*>(b),
&beta, reinterpret_cast<half*>(c), reinterpret_cast<half**>(buf));
}
void BlasIf<DeviceType::kGPU>::Axpy(DeviceCtx* ctx, const int n, const float alpha, const float* x,
const int incx, float* y, const int incy) {
cublas_axpy<float>(ctx->cublas_pmh_handle(), n, &alpha, x, incx, y, incy);
}
void BlasIf<DeviceType::kGPU>::Axpy(DeviceCtx* ctx, const int n, const double alpha,
const double* x, const int incx, double* y, const int incy) {
cublas_axpy<double>(ctx->cublas_pmh_handle(), n, &alpha, x, incx, y, incy);
}
void BlasIf<DeviceType::kGPU>::Axpy(DeviceCtx* ctx, const int n, const float16 alpha,
const float16* x, const int incx, float16* y, const int incy) {
AxpyHalfGpu<<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
n, float16_2half(alpha), reinterpret_cast<const half*>(x), incx, reinterpret_cast<half*>(y),
incy);
}
} // namespace oneflow
|
2ea8fd45d489d1ef14f4b38bee28d17b4d7a8dcc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define bidx (blockIdx.x)
#define bidy (blockIdx.y)
#define tidx (threadIdx.x)
#define tidy (threadIdx.y)
#define gridDimX (gridDim.x)
#define gridDimY (gridDim.y)
#define COALESCED_NUM 16
#define blockDimX 16
#define blockDimY 1
#define idx (bidx*blockDimX+tidx)
#define idy (bidy*blockDimY+tidy)
#define merger_y 1
#define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM)
#define B(y,x) B[(y)*WIDTH_B+(x)]
#define WIDTH_C 2048
#define WIDTH_B 2048
#define C(y,x) C[(y)*WIDTH_C+(x)]
#define WIDTH_A 2048
#define A(y,x) A[(y)*WIDTH_A+(x)]
__global__ void matmul(float * A, float * B, float * C, int width, int height)
{
__shared__ float shared_0[16];
int i;
float sum;
sum=0;
for (i=0; i<width; i=(i+16))
{
int it_1;
shared_0[(tidx+0)]=A(idy, (i+tidx));
__syncthreads();
#pragma unroll
for (it_1=0; it_1<16; it_1=(it_1+1))
{
float a;
float b;
a=shared_0[it_1];
b=B((it_1+i), idx);
sum+=(a*b);
}
__syncthreads();
}
{
C(idy, idx)=sum;
}
}
| 2ea8fd45d489d1ef14f4b38bee28d17b4d7a8dcc.cu | #define bidx (blockIdx.x)
#define bidy (blockIdx.y)
#define tidx (threadIdx.x)
#define tidy (threadIdx.y)
#define gridDimX (gridDim.x)
#define gridDimY (gridDim.y)
#define COALESCED_NUM 16
#define blockDimX 16
#define blockDimY 1
#define idx (bidx*blockDimX+tidx)
#define idy (bidy*blockDimY+tidy)
#define merger_y 1
#define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM)
#define B(y,x) B[(y)*WIDTH_B+(x)]
#define WIDTH_C 2048
#define WIDTH_B 2048
#define C(y,x) C[(y)*WIDTH_C+(x)]
#define WIDTH_A 2048
#define A(y,x) A[(y)*WIDTH_A+(x)]
__global__ void matmul(float * A, float * B, float * C, int width, int height)
{
__shared__ float shared_0[16];
int i;
float sum;
sum=0;
for (i=0; i<width; i=(i+16))
{
int it_1;
shared_0[(tidx+0)]=A(idy, (i+tidx));
__syncthreads();
#pragma unroll
for (it_1=0; it_1<16; it_1=(it_1+1))
{
float a;
float b;
a=shared_0[it_1];
b=B((it_1+i), idx);
sum+=(a*b);
}
__syncthreads();
}
{
C(idy, idx)=sum;
}
}
|
c1cd3820a9060a5a3e0861d3f27176596cd98ba1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define tamBloque 64
__constant__ int mascaraX[3][3];
__constant__ int mascaraY[3][3];
// Kernel que se ejecutar en el grid de la GPU
__global__ void sobelKernel(float *entrada, float *salida, int filas, int columnas){
int sumx = 0;
int sumy = 0;
int SUM = 0;
int x, y;
int i, j, pix, piy;
int R, G, B, NC, posR, posG, posB;
float newPixel;
y= blockIdx.y;
x= blockIdx.x * blockDim.x + threadIdx.x;
if (y == 0 || y == filas-1 || x==0 || x == columnas-1){
SUM = 0;
}
else{
for(i=-1; i<=1; i++) {
for(j=-1; j<=1; j++) {
pix = j + x;
piy = i + y;
posR= piy*columnas + pix; // posicin en el vector del componente R del pixel sobre el que trabajamos
R = (int)entrada[posR]; // imagen(pix,piy,0,0);
posG= filas*columnas + piy*columnas + pix; // posicin en el vector del componente G del pixel sobre el que trabajamos
G = (int)entrada[posG]; // imagen(pix,piy,0,1);
posB= 2*filas*columnas + piy*columnas + pix; // posicin en el vector del componente B del pixel sobre el que trabajamos
B = (int)entrada[posB]; // imagen(pix,piy,0,2);
NC = (R+G+B)/3;
sumx = sumx + (NC) * mascaraX[j+1][i+1];
sumy = sumy + (NC) * mascaraY[j+1][i+1];
}
}
SUM = abs(sumx) + abs(sumy);
}
if(SUM>255){
SUM=255;
}
newPixel = 255 - (float)(SUM);
salida[y*columnas + x] = newPixel; // componente R
salida[filas*columnas + y*columnas + x] = newPixel; // componente G
salida[2*filas*columnas + y*columnas + x] = newPixel; // componente B
}
// Funcin que lanza la ejecucin de vectores en la GPU
void calcularSobelCuda (float *hEntrada, float *hSalida, int filas, int columnas){
float *dEntrada, *dSalida;
int tam;
dim3 DimGrid, DimBlock;
int Gx [3][3]; int Gy [3][3];
// Sobel Horizontal Mask
Gx[0][0] = 1; Gx[0][1] = 0; Gx[0][2] = -1;
Gx[1][0] = 2; Gx[1][1] = 0; Gx[1][2] = -2;
Gx[2][0] = 1; Gx[2][1] = 0; Gx[2][2] = -1;
// Sobel Vertical Mask
Gy[0][0] = 1; Gy[0][1] = 2; Gy[0][2] = 1;
Gy[1][0] = 0; Gy[1][1] = 0; Gy[1][2] = 0;
Gy[2][0] = -1; Gy[2][1] =-2; Gy[2][2] = -1;
// Transferimos las mscaras a la memoria constante de la GPU
hipMemcpyToSymbol(mascaraX, Gx, 3*3*sizeof(int));
hipMemcpyToSymbol(mascaraY, Gy, 3*3*sizeof(int));
// Espacio que ocupa en memoria la imagen
tam= filas * columnas * 3 * sizeof(float); // 3 colores (R, G, B)
// Reservamos espacio y copiamos en GPU la imagen de entrada
hipMalloc((void **) &dEntrada, tam);
hipMemcpy(dEntrada,hEntrada,tam,hipMemcpyHostToDevice);
// Reservamos espacio en GPU para la imagen de salida
hipMalloc((void **) &dSalida, tam);
// tamao del grid y de los bloques de hebras
DimBlock= dim3(tamBloque, 1, 1); // bloques de tamBloque hebras
DimGrid= dim3( ((columnas-1)/tamBloque)+1, filas, 1); // grid 2D, x= bloques necesarios para cubrir 1 fila de la imagen, y= n filas imagen
// Llamada al kernel
hipLaunchKernelGGL(( sobelKernel), dim3(DimGrid),dim3(DimBlock), 0, 0, dEntrada,dSalida,filas,columnas);
// Copia de resultados GPU -> host
hipMemcpy(hSalida,dSalida,tam,hipMemcpyDeviceToHost);
// Liberacin de memoria en GPU
hipFree(dEntrada);
hipFree(dSalida);
}
| c1cd3820a9060a5a3e0861d3f27176596cd98ba1.cu | #define tamBloque 64
__constant__ int mascaraX[3][3];
__constant__ int mascaraY[3][3];
// Kernel que se ejecutará en el grid de la GPU
__global__ void sobelKernel(float *entrada, float *salida, int filas, int columnas){
int sumx = 0;
int sumy = 0;
int SUM = 0;
int x, y;
int i, j, pix, piy;
int R, G, B, NC, posR, posG, posB;
float newPixel;
y= blockIdx.y;
x= blockIdx.x * blockDim.x + threadIdx.x;
if (y == 0 || y == filas-1 || x==0 || x == columnas-1){
SUM = 0;
}
else{
for(i=-1; i<=1; i++) {
for(j=-1; j<=1; j++) {
pix = j + x;
piy = i + y;
posR= piy*columnas + pix; // posición en el vector del componente R del pixel sobre el que trabajamos
R = (int)entrada[posR]; // imagen(pix,piy,0,0);
posG= filas*columnas + piy*columnas + pix; // posición en el vector del componente G del pixel sobre el que trabajamos
G = (int)entrada[posG]; // imagen(pix,piy,0,1);
posB= 2*filas*columnas + piy*columnas + pix; // posición en el vector del componente B del pixel sobre el que trabajamos
B = (int)entrada[posB]; // imagen(pix,piy,0,2);
NC = (R+G+B)/3;
sumx = sumx + (NC) * mascaraX[j+1][i+1];
sumy = sumy + (NC) * mascaraY[j+1][i+1];
}
}
SUM = abs(sumx) + abs(sumy);
}
if(SUM>255){
SUM=255;
}
newPixel = 255 - (float)(SUM);
salida[y*columnas + x] = newPixel; // componente R
salida[filas*columnas + y*columnas + x] = newPixel; // componente G
salida[2*filas*columnas + y*columnas + x] = newPixel; // componente B
}
// Función que lanza la ejecución de vectores en la GPU
void calcularSobelCuda (float *hEntrada, float *hSalida, int filas, int columnas){
float *dEntrada, *dSalida;
int tam;
dim3 DimGrid, DimBlock;
int Gx [3][3]; int Gy [3][3];
// Sobel Horizontal Mask
Gx[0][0] = 1; Gx[0][1] = 0; Gx[0][2] = -1;
Gx[1][0] = 2; Gx[1][1] = 0; Gx[1][2] = -2;
Gx[2][0] = 1; Gx[2][1] = 0; Gx[2][2] = -1;
// Sobel Vertical Mask
Gy[0][0] = 1; Gy[0][1] = 2; Gy[0][2] = 1;
Gy[1][0] = 0; Gy[1][1] = 0; Gy[1][2] = 0;
Gy[2][0] = -1; Gy[2][1] =-2; Gy[2][2] = -1;
// Transferimos las máscaras a la memoria constante de la GPU
cudaMemcpyToSymbol(mascaraX, Gx, 3*3*sizeof(int));
cudaMemcpyToSymbol(mascaraY, Gy, 3*3*sizeof(int));
// Espacio que ocupa en memoria la imagen
tam= filas * columnas * 3 * sizeof(float); // 3 colores (R, G, B)
// Reservamos espacio y copiamos en GPU la imagen de entrada
cudaMalloc((void **) &dEntrada, tam);
cudaMemcpy(dEntrada,hEntrada,tam,cudaMemcpyHostToDevice);
// Reservamos espacio en GPU para la imagen de salida
cudaMalloc((void **) &dSalida, tam);
// tamaño del grid y de los bloques de hebras
DimBlock= dim3(tamBloque, 1, 1); // bloques de tamBloque hebras
DimGrid= dim3( ((columnas-1)/tamBloque)+1, filas, 1); // grid 2D, x= bloques necesarios para cubrir 1 fila de la imagen, y= n filas imagen
// Llamada al kernel
sobelKernel<<<DimGrid,DimBlock>>>(dEntrada,dSalida,filas,columnas);
// Copia de resultados GPU -> host
cudaMemcpy(hSalida,dSalida,tam,cudaMemcpyDeviceToHost);
// Liberación de memoria en GPU
cudaFree(dEntrada);
cudaFree(dSalida);
}
|
59136881d23560636c54753427f42bd0b6dd27bd.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
__global__ void hello_cuda(){
printf("Hello CUDA world\n");
}
int main(int argc, char ** argv) {
dim3 grid(2,2);
dim3 block(1,3);
hello_cuda << <grid,block>> > ();
hipDeviceSynchronize();
hipDeviceReset();
return 0;
}
| 59136881d23560636c54753427f42bd0b6dd27bd.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
__global__ void hello_cuda(){
printf("Hello CUDA world\n");
}
int main(int argc, char ** argv) {
dim3 grid(2,2);
dim3 block(1,3);
hello_cuda << <grid,block>> > ();
cudaDeviceSynchronize();
cudaDeviceReset();
return 0;
}
|
e7be12e13076f857664ce739d573c90642ceb789.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../utils.hpp"
#include <string>
#include <iostream>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/transform.h>
#include <thrust/functional.h>
#include <thrust/reduce.h>
#include <thrust/extrema.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/sequence.h>
#include "../loadSaveImage.cpp"
#include <stdio.h>
//simple cross correlation kernel copied from Mike's IPython Notebook
__global__ void naive_normalized_cross_correlation(
float* d_response,
unsigned char* d_original,
unsigned char* d_template,
int num_pixels_y,
int num_pixels_x,
int template_half_height,
int template_height,
int template_half_width,
int template_width,
int template_size,
float template_mean
)
{
int ny = num_pixels_y;
int nx = num_pixels_x;
int knx = template_width;
int2 image_index_2d = make_int2( ( blockIdx.x * blockDim.x ) + threadIdx.x, ( blockIdx.y * blockDim.y ) + threadIdx.y );
int image_index_1d = ( nx * image_index_2d.y ) + image_index_2d.x;
if ( image_index_2d.x < nx && image_index_2d.y < ny )
{
//
// compute image mean
//
float image_sum = 0.0f;
for ( int y = -template_half_height; y <= template_half_height; y++ )
{
for ( int x = -template_half_width; x <= template_half_width; x++ )
{
int2 image_offset_index_2d = make_int2( image_index_2d.x + x, image_index_2d.y + y );
int2 image_offset_index_2d_clamped = make_int2( min( nx - 1, max( 0, image_offset_index_2d.x ) ), min( ny - 1, max( 0, image_offset_index_2d.y ) ) );
int image_offset_index_1d_clamped = ( nx * image_offset_index_2d_clamped.y ) + image_offset_index_2d_clamped.x;
unsigned char image_offset_value = d_original[ image_offset_index_1d_clamped ];
image_sum += (float)image_offset_value;
}
}
float image_mean = image_sum / (float)template_size;
//
// compute sums
//
float sum_of_image_template_diff_products = 0.0f;
float sum_of_squared_image_diffs = 0.0f;
float sum_of_squared_template_diffs = 0.0f;
for ( int y = -template_half_height; y <= template_half_height; y++ )
{
for ( int x = -template_half_width; x <= template_half_width; x++ )
{
int2 image_offset_index_2d = make_int2( image_index_2d.x + x, image_index_2d.y + y );
int2 image_offset_index_2d_clamped = make_int2( min( nx - 1, max( 0, image_offset_index_2d.x ) ), min( ny - 1, max( 0, image_offset_index_2d.y ) ) );
int image_offset_index_1d_clamped = ( nx * image_offset_index_2d_clamped.y ) + image_offset_index_2d_clamped.x;
unsigned char image_offset_value = d_original[ image_offset_index_1d_clamped ];
float image_diff = (float)image_offset_value - image_mean;
int2 template_index_2d = make_int2( x + template_half_width, y + template_half_height );
int template_index_1d = ( knx * template_index_2d.y ) + template_index_2d.x;
unsigned char template_value = d_template[ template_index_1d ];
float template_diff = template_value - template_mean;
float image_template_diff_product = image_offset_value * template_diff;
float squared_image_diff = image_diff * image_diff;
float squared_template_diff = template_diff * template_diff;
sum_of_image_template_diff_products += image_template_diff_product;
sum_of_squared_image_diffs += squared_image_diff;
sum_of_squared_template_diffs += squared_template_diff;
}
}
//
// compute final result
//
float result_value = 0.0f;
if ( sum_of_squared_image_diffs != 0 && sum_of_squared_template_diffs != 0 )
{
result_value = sum_of_image_template_diff_products / sqrt( sum_of_squared_image_diffs * sum_of_squared_template_diffs );
}
d_response[ image_index_1d ] = result_value;
}
}
__global__ void remove_redness_from_coordinates(
const unsigned int* d_coordinates,
unsigned char* d_r,
unsigned char* d_b,
unsigned char* d_g,
unsigned char* d_r_output,
int num_coordinates,
int num_pixels_y,
int num_pixels_x,
int template_half_height,
int template_half_width
)
{
int ny = num_pixels_y;
int nx = num_pixels_x;
int global_index_1d = ( blockIdx.x * blockDim.x ) + threadIdx.x;
int imgSize = num_pixels_x * num_pixels_y;
if ( global_index_1d < num_coordinates )
{
unsigned int image_index_1d = d_coordinates[ imgSize - global_index_1d - 1 ];
ushort2 image_index_2d = make_ushort2(image_index_1d % num_pixels_x, image_index_1d / num_pixels_x);
for ( int y = image_index_2d.y - template_half_height; y <= image_index_2d.y + template_half_height; y++ )
{
for ( int x = image_index_2d.x - template_half_width; x <= image_index_2d.x + template_half_width; x++ )
{
int2 image_offset_index_2d = make_int2( x, y );
int2 image_offset_index_2d_clamped = make_int2( min( nx - 1, max( 0, image_offset_index_2d.x ) ), min( ny - 1, max( 0, image_offset_index_2d.y ) ) );
int image_offset_index_1d_clamped = ( nx * image_offset_index_2d_clamped.y ) + image_offset_index_2d_clamped.x;
unsigned char g_value = d_g[ image_offset_index_1d_clamped ];
unsigned char b_value = d_b[ image_offset_index_1d_clamped ];
unsigned int gb_average = ( g_value + b_value ) / 2;
d_r_output[ image_offset_index_1d_clamped ] = (unsigned char)gb_average;
}
}
}
}
struct splitChannels : thrust::unary_function<uchar4, thrust::tuple<unsigned char, unsigned char, unsigned char> >{
__host__ __device__
thrust::tuple<unsigned char, unsigned char, unsigned char> operator()(uchar4 pixel) {
return thrust::make_tuple(pixel.x, pixel.y, pixel.z);
}
};
struct combineChannels : thrust::unary_function<thrust::tuple<unsigned char, unsigned char, unsigned char>, uchar4> {
__host__ __device__
uchar4 operator()(thrust::tuple<unsigned char, unsigned char, unsigned char> t) {
return make_uchar4(thrust::get<0>(t), thrust::get<1>(t), thrust::get<2>(t), 255);
}
};
struct combineResponses : thrust::unary_function<float, thrust::tuple<float, float, float> > {
__host__ __device__
float operator()(thrust::tuple<float, float, float> t) {
return thrust::get<0>(t) * thrust::get<1>(t) * thrust::get<2>(t);
}
};
//we need to save the input so we can remove the redeye for the output
static thrust::device_vector<unsigned char> d_red;
static thrust::device_vector<unsigned char> d_blue;
static thrust::device_vector<unsigned char> d_green;
static size_t numRowsImg;
static size_t numColsImg;
static size_t templateHalfWidth;
static size_t templateHalfHeight;
//return types are void since any internal error will be handled by quitting
//no point in returning error codes...
void preProcess(unsigned int **inputVals,
unsigned int **inputPos,
unsigned int **outputVals,
unsigned int **outputPos,
size_t &numElem,
const std::string& filename) {
//make sure the context initializes ok
checkCudaErrors(hipFree(0));
uchar4 *inImg;
uchar4 *eyeTemplate;
size_t numRowsTemplate, numColsTemplate;
std::string templateFilename("red_eye_effect_template_5.jpg");
loadImageRGBA(filename, &inImg, &numRowsImg, &numColsImg);
loadImageRGBA(templateFilename, &eyeTemplate, &numRowsTemplate, &numColsTemplate);
templateHalfWidth = (numColsTemplate - 1) / 2;
templateHalfHeight = (numRowsTemplate - 1) / 2;
//we need to split each image into its separate channels
//use thrust to demonstrate basic uses
numElem = numRowsImg * numColsImg;
size_t templateSize = numRowsTemplate * numColsTemplate;
thrust::device_vector<uchar4> d_Img(inImg, inImg + numRowsImg * numColsImg);
thrust::device_vector<uchar4> d_Template(eyeTemplate, eyeTemplate + numRowsTemplate * numColsTemplate);
d_red. resize(numElem);
d_blue. resize(numElem);
d_green.resize(numElem);
thrust::device_vector<unsigned char> d_red_template(templateSize);
thrust::device_vector<unsigned char> d_blue_template(templateSize);
thrust::device_vector<unsigned char> d_green_template(templateSize);
//split the image
thrust::transform(d_Img.begin(), d_Img.end(), thrust::make_zip_iterator(
thrust::make_tuple(d_red.begin(),
d_blue.begin(),
d_green.begin())),
splitChannels());
//split the template
thrust::transform(d_Template.begin(), d_Template.end(),
thrust::make_zip_iterator(thrust::make_tuple(d_red_template.begin(),
d_blue_template.begin(),
d_green_template.begin())),
splitChannels());
thrust::device_vector<float> d_red_response(numElem);
thrust::device_vector<float> d_blue_response(numElem);
thrust::device_vector<float> d_green_response(numElem);
//need to compute the mean for each template channel
unsigned int r_sum = thrust::reduce(d_red_template.begin(), d_red_template.end(), 0);
unsigned int b_sum = thrust::reduce(d_blue_template.begin(), d_blue_template.end(), 0);
unsigned int g_sum = thrust::reduce(d_green_template.begin(), d_green_template.end(), 0);
float r_mean = (double)r_sum / templateSize;
float b_mean = (double)b_sum / templateSize;
float g_mean = (double)g_sum / templateSize;
const dim3 blockSize(32, 8, 1);
const dim3 gridSize( (numColsImg + blockSize.x - 1) / blockSize.x, (numRowsImg + blockSize.y - 1) / blockSize.y, 1);
//now compute the cross-correlations for each channel
hipLaunchKernelGGL(( naive_normalized_cross_correlation), dim3(gridSize), dim3(blockSize), 0, 0, thrust::raw_pointer_cast(d_red_response.data()),
thrust::raw_pointer_cast(d_red.data()),
thrust::raw_pointer_cast(d_red_template.data()),
numRowsImg, numColsImg,
templateHalfHeight, numRowsTemplate,
templateHalfWidth, numColsTemplate,
numRowsTemplate * numColsTemplate, r_mean);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( naive_normalized_cross_correlation), dim3(gridSize), dim3(blockSize), 0, 0, thrust::raw_pointer_cast(d_blue_response.data()),
thrust::raw_pointer_cast(d_blue.data()),
thrust::raw_pointer_cast(d_blue_template.data()),
numRowsImg, numColsImg,
templateHalfHeight, numRowsTemplate,
templateHalfWidth, numColsTemplate,
numRowsTemplate * numColsTemplate, b_mean);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( naive_normalized_cross_correlation), dim3(gridSize), dim3(blockSize), 0, 0, thrust::raw_pointer_cast(d_green_response.data()),
thrust::raw_pointer_cast(d_green.data()),
thrust::raw_pointer_cast(d_green_template.data()),
numRowsImg, numColsImg,
templateHalfHeight, numRowsTemplate,
templateHalfWidth, numColsTemplate,
numRowsTemplate * numColsTemplate, g_mean);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
//generate combined response - multiply all channels together
thrust::device_vector<float> d_combined_response(numElem);
thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(
d_red_response.begin(),
d_blue_response.begin(),
d_green_response.begin())),
thrust::make_zip_iterator(thrust::make_tuple(
d_red_response.end(),
d_blue_response.end(),
d_green_response.end())),
d_combined_response.begin(),
combineResponses());
//find max/min of response
typedef thrust::device_vector<float>::iterator floatIt;
thrust::pair<floatIt, floatIt> minmax = thrust::minmax_element(d_combined_response.begin(), d_combined_response.end());
float bias = *minmax.first;
//we need to make all the numbers positive so that the students can sort them without any bit twiddling
thrust::transform(d_combined_response.begin(), d_combined_response.end(), thrust::make_constant_iterator(-bias),
d_combined_response.begin(), thrust::plus<float>());
//now we need to create the 1-D coordinates that will be attached to the keys
thrust::device_vector<unsigned int> coords(numElem);
thrust::sequence(coords.begin(), coords.end()); //[0, ..., numElem - 1]
//allocate memory for output and copy since our device vectors will go out of scope
//and be deleted
checkCudaErrors(hipMalloc(inputVals, sizeof(unsigned int) * numElem));
checkCudaErrors(hipMalloc(inputPos, sizeof(unsigned int) * numElem));
checkCudaErrors(hipMalloc(outputVals, sizeof(unsigned int) * numElem));
checkCudaErrors(hipMalloc(outputPos, sizeof(unsigned int) * numElem));
hipMemcpy(*inputVals, thrust::raw_pointer_cast(d_combined_response.data()), sizeof(unsigned int) * numElem, hipMemcpyDeviceToDevice);
hipMemcpy(*inputPos, thrust::raw_pointer_cast(coords.data()), sizeof(unsigned int) * numElem, hipMemcpyDeviceToDevice);
checkCudaErrors(hipMemset(*outputVals, 0, sizeof(unsigned int) * numElem));
checkCudaErrors(hipMemset(*outputPos, 0, sizeof(unsigned int) * numElem));
}
void postProcess(const unsigned int* const outputVals,
const unsigned int* const outputPos,
const size_t numElems,
const std::string& output_file){
thrust::device_vector<unsigned char> d_output_red = d_red;
const dim3 blockSize(256, 1, 1);
const dim3 gridSize( (40 + blockSize.x - 1) / blockSize.x, 1, 1);
hipLaunchKernelGGL(( remove_redness_from_coordinates), dim3(gridSize), dim3(blockSize), 0, 0, outputPos,
thrust::raw_pointer_cast(d_red.data()),
thrust::raw_pointer_cast(d_blue.data()),
thrust::raw_pointer_cast(d_green.data()),
thrust::raw_pointer_cast(d_output_red.data()),
40,
numRowsImg, numColsImg,
9, 9);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
//combine the new red channel with original blue and green for output
thrust::device_vector<uchar4> d_outputImg(numElems);
thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(
d_output_red.begin(),
d_blue.begin(),
d_green.begin())),
thrust::make_zip_iterator(thrust::make_tuple(
d_output_red.end(),
d_blue.end(),
d_green.end())),
d_outputImg.begin(),
combineChannels());
thrust::host_vector<uchar4> h_Img = d_outputImg;
saveImageRGBA(&h_Img[0], numRowsImg, numColsImg, output_file);
//Clear the global vectors otherwise something goes wrong trying to free them
d_red.clear(); d_red.shrink_to_fit();
d_blue.clear(); d_blue.shrink_to_fit();
d_green.clear(); d_green.shrink_to_fit();
} | e7be12e13076f857664ce739d573c90642ceb789.cu | #include "../utils.hpp"
#include <string>
#include <iostream>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/transform.h>
#include <thrust/functional.h>
#include <thrust/reduce.h>
#include <thrust/extrema.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/sequence.h>
#include "../loadSaveImage.cpp"
#include <stdio.h>
//simple cross correlation kernel copied from Mike's IPython Notebook
__global__ void naive_normalized_cross_correlation(
float* d_response,
unsigned char* d_original,
unsigned char* d_template,
int num_pixels_y,
int num_pixels_x,
int template_half_height,
int template_height,
int template_half_width,
int template_width,
int template_size,
float template_mean
)
{
int ny = num_pixels_y;
int nx = num_pixels_x;
int knx = template_width;
int2 image_index_2d = make_int2( ( blockIdx.x * blockDim.x ) + threadIdx.x, ( blockIdx.y * blockDim.y ) + threadIdx.y );
int image_index_1d = ( nx * image_index_2d.y ) + image_index_2d.x;
if ( image_index_2d.x < nx && image_index_2d.y < ny )
{
//
// compute image mean
//
float image_sum = 0.0f;
for ( int y = -template_half_height; y <= template_half_height; y++ )
{
for ( int x = -template_half_width; x <= template_half_width; x++ )
{
int2 image_offset_index_2d = make_int2( image_index_2d.x + x, image_index_2d.y + y );
int2 image_offset_index_2d_clamped = make_int2( min( nx - 1, max( 0, image_offset_index_2d.x ) ), min( ny - 1, max( 0, image_offset_index_2d.y ) ) );
int image_offset_index_1d_clamped = ( nx * image_offset_index_2d_clamped.y ) + image_offset_index_2d_clamped.x;
unsigned char image_offset_value = d_original[ image_offset_index_1d_clamped ];
image_sum += (float)image_offset_value;
}
}
float image_mean = image_sum / (float)template_size;
//
// compute sums
//
float sum_of_image_template_diff_products = 0.0f;
float sum_of_squared_image_diffs = 0.0f;
float sum_of_squared_template_diffs = 0.0f;
for ( int y = -template_half_height; y <= template_half_height; y++ )
{
for ( int x = -template_half_width; x <= template_half_width; x++ )
{
int2 image_offset_index_2d = make_int2( image_index_2d.x + x, image_index_2d.y + y );
int2 image_offset_index_2d_clamped = make_int2( min( nx - 1, max( 0, image_offset_index_2d.x ) ), min( ny - 1, max( 0, image_offset_index_2d.y ) ) );
int image_offset_index_1d_clamped = ( nx * image_offset_index_2d_clamped.y ) + image_offset_index_2d_clamped.x;
unsigned char image_offset_value = d_original[ image_offset_index_1d_clamped ];
float image_diff = (float)image_offset_value - image_mean;
int2 template_index_2d = make_int2( x + template_half_width, y + template_half_height );
int template_index_1d = ( knx * template_index_2d.y ) + template_index_2d.x;
unsigned char template_value = d_template[ template_index_1d ];
float template_diff = template_value - template_mean;
float image_template_diff_product = image_offset_value * template_diff;
float squared_image_diff = image_diff * image_diff;
float squared_template_diff = template_diff * template_diff;
sum_of_image_template_diff_products += image_template_diff_product;
sum_of_squared_image_diffs += squared_image_diff;
sum_of_squared_template_diffs += squared_template_diff;
}
}
//
// compute final result
//
float result_value = 0.0f;
if ( sum_of_squared_image_diffs != 0 && sum_of_squared_template_diffs != 0 )
{
result_value = sum_of_image_template_diff_products / sqrt( sum_of_squared_image_diffs * sum_of_squared_template_diffs );
}
d_response[ image_index_1d ] = result_value;
}
}
__global__ void remove_redness_from_coordinates(
const unsigned int* d_coordinates,
unsigned char* d_r,
unsigned char* d_b,
unsigned char* d_g,
unsigned char* d_r_output,
int num_coordinates,
int num_pixels_y,
int num_pixels_x,
int template_half_height,
int template_half_width
)
{
int ny = num_pixels_y;
int nx = num_pixels_x;
int global_index_1d = ( blockIdx.x * blockDim.x ) + threadIdx.x;
int imgSize = num_pixels_x * num_pixels_y;
if ( global_index_1d < num_coordinates )
{
unsigned int image_index_1d = d_coordinates[ imgSize - global_index_1d - 1 ];
ushort2 image_index_2d = make_ushort2(image_index_1d % num_pixels_x, image_index_1d / num_pixels_x);
for ( int y = image_index_2d.y - template_half_height; y <= image_index_2d.y + template_half_height; y++ )
{
for ( int x = image_index_2d.x - template_half_width; x <= image_index_2d.x + template_half_width; x++ )
{
int2 image_offset_index_2d = make_int2( x, y );
int2 image_offset_index_2d_clamped = make_int2( min( nx - 1, max( 0, image_offset_index_2d.x ) ), min( ny - 1, max( 0, image_offset_index_2d.y ) ) );
int image_offset_index_1d_clamped = ( nx * image_offset_index_2d_clamped.y ) + image_offset_index_2d_clamped.x;
unsigned char g_value = d_g[ image_offset_index_1d_clamped ];
unsigned char b_value = d_b[ image_offset_index_1d_clamped ];
unsigned int gb_average = ( g_value + b_value ) / 2;
d_r_output[ image_offset_index_1d_clamped ] = (unsigned char)gb_average;
}
}
}
}
struct splitChannels : thrust::unary_function<uchar4, thrust::tuple<unsigned char, unsigned char, unsigned char> >{
__host__ __device__
thrust::tuple<unsigned char, unsigned char, unsigned char> operator()(uchar4 pixel) {
return thrust::make_tuple(pixel.x, pixel.y, pixel.z);
}
};
struct combineChannels : thrust::unary_function<thrust::tuple<unsigned char, unsigned char, unsigned char>, uchar4> {
__host__ __device__
uchar4 operator()(thrust::tuple<unsigned char, unsigned char, unsigned char> t) {
return make_uchar4(thrust::get<0>(t), thrust::get<1>(t), thrust::get<2>(t), 255);
}
};
struct combineResponses : thrust::unary_function<float, thrust::tuple<float, float, float> > {
__host__ __device__
float operator()(thrust::tuple<float, float, float> t) {
return thrust::get<0>(t) * thrust::get<1>(t) * thrust::get<2>(t);
}
};
//we need to save the input so we can remove the redeye for the output
static thrust::device_vector<unsigned char> d_red;
static thrust::device_vector<unsigned char> d_blue;
static thrust::device_vector<unsigned char> d_green;
static size_t numRowsImg;
static size_t numColsImg;
static size_t templateHalfWidth;
static size_t templateHalfHeight;
//return types are void since any internal error will be handled by quitting
//no point in returning error codes...
void preProcess(unsigned int **inputVals,
unsigned int **inputPos,
unsigned int **outputVals,
unsigned int **outputPos,
size_t &numElem,
const std::string& filename) {
//make sure the context initializes ok
checkCudaErrors(cudaFree(0));
uchar4 *inImg;
uchar4 *eyeTemplate;
size_t numRowsTemplate, numColsTemplate;
std::string templateFilename("red_eye_effect_template_5.jpg");
loadImageRGBA(filename, &inImg, &numRowsImg, &numColsImg);
loadImageRGBA(templateFilename, &eyeTemplate, &numRowsTemplate, &numColsTemplate);
templateHalfWidth = (numColsTemplate - 1) / 2;
templateHalfHeight = (numRowsTemplate - 1) / 2;
//we need to split each image into its separate channels
//use thrust to demonstrate basic uses
numElem = numRowsImg * numColsImg;
size_t templateSize = numRowsTemplate * numColsTemplate;
thrust::device_vector<uchar4> d_Img(inImg, inImg + numRowsImg * numColsImg);
thrust::device_vector<uchar4> d_Template(eyeTemplate, eyeTemplate + numRowsTemplate * numColsTemplate);
d_red. resize(numElem);
d_blue. resize(numElem);
d_green.resize(numElem);
thrust::device_vector<unsigned char> d_red_template(templateSize);
thrust::device_vector<unsigned char> d_blue_template(templateSize);
thrust::device_vector<unsigned char> d_green_template(templateSize);
//split the image
thrust::transform(d_Img.begin(), d_Img.end(), thrust::make_zip_iterator(
thrust::make_tuple(d_red.begin(),
d_blue.begin(),
d_green.begin())),
splitChannels());
//split the template
thrust::transform(d_Template.begin(), d_Template.end(),
thrust::make_zip_iterator(thrust::make_tuple(d_red_template.begin(),
d_blue_template.begin(),
d_green_template.begin())),
splitChannels());
thrust::device_vector<float> d_red_response(numElem);
thrust::device_vector<float> d_blue_response(numElem);
thrust::device_vector<float> d_green_response(numElem);
//need to compute the mean for each template channel
unsigned int r_sum = thrust::reduce(d_red_template.begin(), d_red_template.end(), 0);
unsigned int b_sum = thrust::reduce(d_blue_template.begin(), d_blue_template.end(), 0);
unsigned int g_sum = thrust::reduce(d_green_template.begin(), d_green_template.end(), 0);
float r_mean = (double)r_sum / templateSize;
float b_mean = (double)b_sum / templateSize;
float g_mean = (double)g_sum / templateSize;
const dim3 blockSize(32, 8, 1);
const dim3 gridSize( (numColsImg + blockSize.x - 1) / blockSize.x, (numRowsImg + blockSize.y - 1) / blockSize.y, 1);
//now compute the cross-correlations for each channel
naive_normalized_cross_correlation<<<gridSize, blockSize>>>(thrust::raw_pointer_cast(d_red_response.data()),
thrust::raw_pointer_cast(d_red.data()),
thrust::raw_pointer_cast(d_red_template.data()),
numRowsImg, numColsImg,
templateHalfHeight, numRowsTemplate,
templateHalfWidth, numColsTemplate,
numRowsTemplate * numColsTemplate, r_mean);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
naive_normalized_cross_correlation<<<gridSize, blockSize>>>(thrust::raw_pointer_cast(d_blue_response.data()),
thrust::raw_pointer_cast(d_blue.data()),
thrust::raw_pointer_cast(d_blue_template.data()),
numRowsImg, numColsImg,
templateHalfHeight, numRowsTemplate,
templateHalfWidth, numColsTemplate,
numRowsTemplate * numColsTemplate, b_mean);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
naive_normalized_cross_correlation<<<gridSize, blockSize>>>(thrust::raw_pointer_cast(d_green_response.data()),
thrust::raw_pointer_cast(d_green.data()),
thrust::raw_pointer_cast(d_green_template.data()),
numRowsImg, numColsImg,
templateHalfHeight, numRowsTemplate,
templateHalfWidth, numColsTemplate,
numRowsTemplate * numColsTemplate, g_mean);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
//generate combined response - multiply all channels together
thrust::device_vector<float> d_combined_response(numElem);
thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(
d_red_response.begin(),
d_blue_response.begin(),
d_green_response.begin())),
thrust::make_zip_iterator(thrust::make_tuple(
d_red_response.end(),
d_blue_response.end(),
d_green_response.end())),
d_combined_response.begin(),
combineResponses());
//find max/min of response
typedef thrust::device_vector<float>::iterator floatIt;
thrust::pair<floatIt, floatIt> minmax = thrust::minmax_element(d_combined_response.begin(), d_combined_response.end());
float bias = *minmax.first;
//we need to make all the numbers positive so that the students can sort them without any bit twiddling
thrust::transform(d_combined_response.begin(), d_combined_response.end(), thrust::make_constant_iterator(-bias),
d_combined_response.begin(), thrust::plus<float>());
//now we need to create the 1-D coordinates that will be attached to the keys
thrust::device_vector<unsigned int> coords(numElem);
thrust::sequence(coords.begin(), coords.end()); //[0, ..., numElem - 1]
//allocate memory for output and copy since our device vectors will go out of scope
//and be deleted
checkCudaErrors(cudaMalloc(inputVals, sizeof(unsigned int) * numElem));
checkCudaErrors(cudaMalloc(inputPos, sizeof(unsigned int) * numElem));
checkCudaErrors(cudaMalloc(outputVals, sizeof(unsigned int) * numElem));
checkCudaErrors(cudaMalloc(outputPos, sizeof(unsigned int) * numElem));
cudaMemcpy(*inputVals, thrust::raw_pointer_cast(d_combined_response.data()), sizeof(unsigned int) * numElem, cudaMemcpyDeviceToDevice);
cudaMemcpy(*inputPos, thrust::raw_pointer_cast(coords.data()), sizeof(unsigned int) * numElem, cudaMemcpyDeviceToDevice);
checkCudaErrors(cudaMemset(*outputVals, 0, sizeof(unsigned int) * numElem));
checkCudaErrors(cudaMemset(*outputPos, 0, sizeof(unsigned int) * numElem));
}
void postProcess(const unsigned int* const outputVals,
const unsigned int* const outputPos,
const size_t numElems,
const std::string& output_file){
thrust::device_vector<unsigned char> d_output_red = d_red;
const dim3 blockSize(256, 1, 1);
const dim3 gridSize( (40 + blockSize.x - 1) / blockSize.x, 1, 1);
remove_redness_from_coordinates<<<gridSize, blockSize>>>(outputPos,
thrust::raw_pointer_cast(d_red.data()),
thrust::raw_pointer_cast(d_blue.data()),
thrust::raw_pointer_cast(d_green.data()),
thrust::raw_pointer_cast(d_output_red.data()),
40,
numRowsImg, numColsImg,
9, 9);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
//combine the new red channel with original blue and green for output
thrust::device_vector<uchar4> d_outputImg(numElems);
thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(
d_output_red.begin(),
d_blue.begin(),
d_green.begin())),
thrust::make_zip_iterator(thrust::make_tuple(
d_output_red.end(),
d_blue.end(),
d_green.end())),
d_outputImg.begin(),
combineChannels());
thrust::host_vector<uchar4> h_Img = d_outputImg;
saveImageRGBA(&h_Img[0], numRowsImg, numColsImg, output_file);
//Clear the global vectors otherwise something goes wrong trying to free them
d_red.clear(); d_red.shrink_to_fit();
d_blue.clear(); d_blue.shrink_to_fit();
d_green.clear(); d_green.shrink_to_fit();
} |
033db66a24178319ba05e8b18e62fcde9c609384.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<iostream>
using namespace std;
__global__ void MatrixMulKernel(float *Md,float *Nd,float *Pd,int Width){
int tx=threadIdx.x;
int ty=threadIdx.y;
float Pvalue=0;
for(int k=0;k<Width;k++){
float Mdelement=Md[ty*Width+k];
float Ndelement=Nd[k*Width+tx];
Pvalue+=Mdelement*Ndelement;
}
Pd[ty*Width+tx]=Pvalue;
}
void MatrixMultiplication(float *M,float *N,float *P,int Width){
int size=Width*Width*sizeof(float);
float *Md,*Nd,*Pd;
hipMalloc((void **)&Md,size);
hipMemcpy(Md,M,size,hipMemcpyHostToDevice);
hipMalloc((void **)&Nd,size);
hipMemcpy(Nd,N,size,hipMemcpyHostToDevice);
hipMalloc((void **)&Pd,size);
dim3 dimBlock(Width,Width);
dim3 dimGrid(1,1);
hipLaunchKernelGGL(( MatrixMulKernel), dim3(dimGrid),dim3(dimBlock), 0, 0, Md,Nd,Pd,Width);
hipMemcpy(P,Pd,size,hipMemcpyDeviceToHost);
hipFree(Md);
hipFree(Nd);
hipFree(Pd);
}
int main(){
float M[3][3]={1,2,3,4,5,6,7,8,9};
float N[3][3]={9,8,7,6,5,4,3,2,1};
float P[3][3]={0};
MatrixMultiplication(*M,*N,*P,3);
cout << "P[3][3] = " << endl;
for(int m=0;m<3;m++){
for(int n=0;n<3;n++){
cout << P[m][n] << " ";
}
cout << endl;
}
} | 033db66a24178319ba05e8b18e62fcde9c609384.cu | #include<iostream>
using namespace std;
__global__ void MatrixMulKernel(float *Md,float *Nd,float *Pd,int Width){
int tx=threadIdx.x;
int ty=threadIdx.y;
float Pvalue=0;
for(int k=0;k<Width;k++){
float Mdelement=Md[ty*Width+k];
float Ndelement=Nd[k*Width+tx];
Pvalue+=Mdelement*Ndelement;
}
Pd[ty*Width+tx]=Pvalue;
}
void MatrixMultiplication(float *M,float *N,float *P,int Width){
int size=Width*Width*sizeof(float);
float *Md,*Nd,*Pd;
cudaMalloc((void **)&Md,size);
cudaMemcpy(Md,M,size,cudaMemcpyHostToDevice);
cudaMalloc((void **)&Nd,size);
cudaMemcpy(Nd,N,size,cudaMemcpyHostToDevice);
cudaMalloc((void **)&Pd,size);
dim3 dimBlock(Width,Width);
dim3 dimGrid(1,1);
MatrixMulKernel<<<dimGrid,dimBlock>>>(Md,Nd,Pd,Width);
cudaMemcpy(P,Pd,size,cudaMemcpyDeviceToHost);
cudaFree(Md);
cudaFree(Nd);
cudaFree(Pd);
}
int main(){
float M[3][3]={1,2,3,4,5,6,7,8,9};
float N[3][3]={9,8,7,6,5,4,3,2,1};
float P[3][3]={0};
MatrixMultiplication(*M,*N,*P,3);
cout << "P[3][3] = " << endl;
for(int m=0;m<3;m++){
for(int n=0;n<3;n++){
cout << P[m][n] << " ";
}
cout << endl;
}
} |
2ae5a1b128c3deda3bc94dae6b686a79eaaca547.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* \file src/ucx/kernel.cu
* MegRay is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#include "communicator.h"
namespace MegRay {
template <typename T>
__global__ void reduce_sum_kernel(T* i0, T* i1, T* o, size_t len) {
size_t i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < len) {
o[i] = i0[i] + i1[i];
}
}
template <typename T>
__global__ void reduce_max_kernel(T* i0, T* i1, T* o, size_t len) {
size_t i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < len) {
o[i] = (i0[i] > i1[i]) ? i0[i] : i1[i];
}
}
template <typename T>
__global__ void reduce_min_kernel(T* i0, T* i1, T* o, size_t len) {
size_t i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < len) {
o[i] = (i0[i] < i1[i]) ? i0[i] : i1[i];
}
}
template <typename T>
void reduce_helper(T* i0, T* i1, T* o, size_t len, ReduceOp op,
hipStream_t stream) {
size_t block_dim = 512;
size_t grid_dim = (len + block_dim - 1) / block_dim;
switch (op) {
case MEGRAY_SUM:
hipLaunchKernelGGL(( reduce_sum_kernel<T>), dim3(grid_dim), dim3(block_dim), 0, stream, i0, i1, o, len);
break;
case MEGRAY_MAX:
hipLaunchKernelGGL(( reduce_max_kernel<T>), dim3(grid_dim), dim3(block_dim), 0, stream, i0, i1, o, len);
break;
case MEGRAY_MIN:
hipLaunchKernelGGL(( reduce_min_kernel<T>), dim3(grid_dim), dim3(block_dim), 0, stream, i0, i1, o, len);
break;
default:
MEGRAY_THROW("unknown reduce op");
}
}
void UcxCommunicator::_reduce(void* i0, void* i1, void* o, size_t len,
DType dtype, ReduceOp op, hipStream_t stream) {
switch (dtype) {
case MEGRAY_INT8:
reduce_helper<int8_t>((int8_t*)i0, (int8_t*)i1, (int8_t*)o,
len, op, stream);
break;
case MEGRAY_UINT8:
reduce_helper<uint8_t>((uint8_t*)i0, (uint8_t*)i1, (uint8_t*)o,
len, op, stream);
break;
case MEGRAY_INT32:
reduce_helper<int32_t>((int32_t*)i0, (int32_t*)i1, (int32_t*)o,
len, op, stream);
break;
case MEGRAY_UINT32:
reduce_helper<uint32_t>((uint32_t*)i0, (uint32_t*)i1, (uint32_t*)o,
len, op, stream);
break;
case MEGRAY_INT64:
reduce_helper<int64_t>((int64_t*)i0, (int64_t*)i1, (int64_t*)o,
len, op, stream);
break;
case MEGRAY_UINT64:
reduce_helper<uint64_t>((uint64_t*)i0, (uint64_t*)i1, (uint64_t*)o,
len, op, stream);
break;
case MEGRAY_FLOAT32:
reduce_helper<float>((float*)i0, (float*)i1, (float*)o,
len, op, stream);
break;
case MEGRAY_FLOAT64:
reduce_helper<double>((double*)i0, (double*)i1, (double*)o,
len, op, stream);
break;
default:
MEGRAY_THROW("unknown dtype");
}
}
} // namespace MegRay
| 2ae5a1b128c3deda3bc94dae6b686a79eaaca547.cu | /**
* \file src/ucx/kernel.cu
* MegRay is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#include "communicator.h"
namespace MegRay {
template <typename T>
__global__ void reduce_sum_kernel(T* i0, T* i1, T* o, size_t len) {
size_t i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < len) {
o[i] = i0[i] + i1[i];
}
}
template <typename T>
__global__ void reduce_max_kernel(T* i0, T* i1, T* o, size_t len) {
size_t i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < len) {
o[i] = (i0[i] > i1[i]) ? i0[i] : i1[i];
}
}
template <typename T>
__global__ void reduce_min_kernel(T* i0, T* i1, T* o, size_t len) {
size_t i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < len) {
o[i] = (i0[i] < i1[i]) ? i0[i] : i1[i];
}
}
template <typename T>
void reduce_helper(T* i0, T* i1, T* o, size_t len, ReduceOp op,
cudaStream_t stream) {
size_t block_dim = 512;
size_t grid_dim = (len + block_dim - 1) / block_dim;
switch (op) {
case MEGRAY_SUM:
reduce_sum_kernel<T><<<grid_dim, block_dim, 0, stream>>>(i0, i1, o, len);
break;
case MEGRAY_MAX:
reduce_max_kernel<T><<<grid_dim, block_dim, 0, stream>>>(i0, i1, o, len);
break;
case MEGRAY_MIN:
reduce_min_kernel<T><<<grid_dim, block_dim, 0, stream>>>(i0, i1, o, len);
break;
default:
MEGRAY_THROW("unknown reduce op");
}
}
void UcxCommunicator::_reduce(void* i0, void* i1, void* o, size_t len,
DType dtype, ReduceOp op, cudaStream_t stream) {
switch (dtype) {
case MEGRAY_INT8:
reduce_helper<int8_t>((int8_t*)i0, (int8_t*)i1, (int8_t*)o,
len, op, stream);
break;
case MEGRAY_UINT8:
reduce_helper<uint8_t>((uint8_t*)i0, (uint8_t*)i1, (uint8_t*)o,
len, op, stream);
break;
case MEGRAY_INT32:
reduce_helper<int32_t>((int32_t*)i0, (int32_t*)i1, (int32_t*)o,
len, op, stream);
break;
case MEGRAY_UINT32:
reduce_helper<uint32_t>((uint32_t*)i0, (uint32_t*)i1, (uint32_t*)o,
len, op, stream);
break;
case MEGRAY_INT64:
reduce_helper<int64_t>((int64_t*)i0, (int64_t*)i1, (int64_t*)o,
len, op, stream);
break;
case MEGRAY_UINT64:
reduce_helper<uint64_t>((uint64_t*)i0, (uint64_t*)i1, (uint64_t*)o,
len, op, stream);
break;
case MEGRAY_FLOAT32:
reduce_helper<float>((float*)i0, (float*)i1, (float*)o,
len, op, stream);
break;
case MEGRAY_FLOAT64:
reduce_helper<double>((double*)i0, (double*)i1, (double*)o,
len, op, stream);
break;
default:
MEGRAY_THROW("unknown dtype");
}
}
} // namespace MegRay
|
b0da72b63902cba9a5def1b2673f1c32377315f9.hip | // !!! This is a file automatically generated by hipify!!!
/*
* File: needle_kernel_tile.cu
* Author: Da Li
* Email: [email protected]
* Organization: Networking and Parallel Systems Lab (http://nps.missouri.edu/)
*
* Description: This file defines kernel functions of tiled scan approach.
*
*/
#include <stdio.h>
#include <hip/hip_runtime.h>
#include "needle.h"
#if defined (__CUDA_ARCH__) && (__CUDA_ARCH__ < 200)
#define printf(f, ...) ((void)(f, __VA_ARGS__),0)
#endif
__device__ __host__ short maximumm( short a, short b, short c)
{
int k;
if( a <= b ) k = b;
else k = a;
if( k <=c ) return(c);
else return(k);
}
int tile_size = TILE_SIZE;
__device__ void print_s_tile( int s_tile[TILE_SIZE+1][TILE_SIZE+1], int row, int col)
{
printf("%3d %3d\n", s_tile[row-1][col-1], s_tile[row-1][col]);
printf("%3d %3d\n", s_tile[row][col-1], s_tile[row][col]);
printf("\n\n");
}
__device__ void dump_s_tile( int s_tile[TILE_SIZE+1][TILE_SIZE+1])
{
for (int i=0; i<=TILE_SIZE; ++i) {
for (int j=0; j<=TILE_SIZE; ++j )
printf("%3d\t", s_tile[i][j]);
printf("\n");
}
printf("\n");
}
__global__ void needleman_cuda_init(int *score_matrix, unsigned int *pos_matrix, unsigned int *dim_matrix, int penalty)
{
int pair_no = blockIdx.x;
int tid = threadIdx.x;
int *matrix = score_matrix+pos_matrix[pair_no];
unsigned int row_size = dim_matrix[pair_no]+1; // 1 element margin
unsigned int stride = blockDim.x;
int iteration = row_size / stride;
// init first row
for (int i=0; i<=iteration ; ++i)
{
int index = (tid+stride*i);
if ( index<row_size ){
matrix[ index ] = index * penalty;
}
}
// init first column
for (int i=0; i<=iteration ; ++i)
{
int index = row_size * (tid+stride*i);
if ( (tid+stride*i)<row_size) {
matrix[ index ] = (tid+stride*i)*penalty;
}
}
}
__global__ void needleman_cuda_tile_upleft( char *sequence_set1, char *sequence_set2,
unsigned int *pos1, unsigned int *pos2,
int *score_matrix, unsigned int *pos_matrix, unsigned int *dim_matrix,
int pair_num, int iter_no, int penalty)
{
// 4KB, seq1[], sqe2[], tile[][]
__shared__ char s_seq1[TILE_SIZE];
__shared__ char s_seq2[TILE_SIZE];
__shared__ int s_tile[(TILE_SIZE+1)][(TILE_SIZE+1)];
int tile_num = iter_no;
int total_tile = pair_num * tile_num;
int tile_per_block = total_tile / gridDim.x + 1;
for (int b = 0; b < tile_per_block; ++b ) {
int tile_index = b * gridDim.x + blockIdx.x ;
int pair_no = tile_index / iter_no;
int tile_no = tile_index % iter_no;
if ( tile_index>=total_tile ) continue;
int tid = threadIdx.x;
char *seq1 = sequence_set1 + pos1[pair_no];
char *seq2 = sequence_set2 + pos2[pair_no];
int seq1_len = pos1[pair_no+1] - pos1[pair_no];
int seq2_len = pos2[pair_no+1] - pos2[pair_no];
int *matrix = score_matrix+pos_matrix[pair_no];
unsigned int row_size = dim_matrix[pair_no] + 1;
// calculate index, what are index_x & index_y
int index_x = TILE_SIZE*tile_no + 1; // 2-D matrix starts from (1,1)
int index_y = TILE_SIZE*(iter_no-1) - TILE_SIZE*tile_no + 1;
// load seq1
int seq_index = index_x - 1 + tid;
if ( tid<TILE_SIZE && seq_index<seq1_len )
s_seq1[tid] = seq1[seq_index];
// load seq2
seq_index = index_y - 1 + tid;
if ( tid<TILE_SIZE && seq_index<seq2_len )
s_seq2[tid] = seq2[seq_index];
// load boundary of tile
if ( tid<TILE_SIZE ){
int index = (index_y-1)*row_size + index_x + tid; // x-index in 1-D array
s_tile[0][tid+1] = matrix[index];
//printf("s_tile[0][%d] = %d\n", tid+1, matrix[index]);
index = (index_y+tid)*row_size + index_x - 1; // y-index in 1-D array
s_tile[tid+1][0] = matrix[index];
}
if ( tid==0 ) {
int index = (index_y-1)*row_size + index_x-1;
s_tile[tid][0] = matrix[index];
}
__syncthreads();
// calculate
for( int i = 0 ; i < TILE_SIZE ; i++){
if ( tid <= i ){
index_x = tid + 1;
index_y = i - tid + 1;
s_tile[index_y][index_x] = maximumm(s_tile[index_y-1][index_x] + penalty, // up
s_tile[index_y][index_x-1] + penalty, // left
s_tile[index_y-1][index_x-1]+blosum62[s_seq2[index_y-1]][s_seq1[index_x-1]]);
}
__syncthreads();
}
for( int i = TILE_SIZE - 1 ; i >=0 ; i--){
if ( tid <= i){
index_x = tid + TILE_SIZE - i ;
index_y = TILE_SIZE - tid;
s_tile[index_y][index_x] = maximumm(s_tile[index_y-1][index_x] + penalty, // up
s_tile[index_y][index_x-1] + penalty, // left
s_tile[index_y-1][index_x-1]+blosum62[s_seq2[index_y-1]][s_seq1[index_x-1]]);
}
__syncthreads();
}
int stride = blockDim.x / TILE_SIZE ;
int row_iter = TILE_SIZE/stride+1;
if ( tid < stride*TILE_SIZE ) {
for ( int i=0; i<row_iter; ++i) {
int s_tile_idx = tid % TILE_SIZE;
int s_tile_idy = i * stride + tid / TILE_SIZE;
if ( s_tile_idx<TILE_SIZE && s_tile_idy<TILE_SIZE) {
index_x = TILE_SIZE*tile_no + 1 + s_tile_idx;// 2-D matrix starts from (1,1)
index_y = TILE_SIZE*(iter_no-1) - TILE_SIZE*tile_no + 1 + s_tile_idy;
matrix[index_x + index_y * row_size] = s_tile[s_tile_idy+1][s_tile_idx+1];
}
}
}
} // end for
}
__global__ void needleman_cuda_tile_bottomright(char *sequence_set1, char *sequence_set2,
unsigned int *pos1, unsigned int *pos2,
int *score_matrix, unsigned int *pos_matrix, unsigned int *dim_matrix,
int pair_num, int iter_no, int penalty)
{
// 4KB, seq1[], sqe2[], tile[][]
__shared__ char s_seq1[TILE_SIZE];
__shared__ char s_seq2[TILE_SIZE];
__shared__ int s_tile[(TILE_SIZE+1)][(TILE_SIZE+1)];
int tile_num = iter_no;
int total_tile = pair_num * tile_num;
int tile_per_block = total_tile / gridDim.x + 1;
for (int b = 0; b < tile_per_block; ++b ) {
int tile_index = b * gridDim.x + blockIdx.x ;
int pair_no = tile_index / iter_no;
int tile_no = tile_index % iter_no;
if ( tile_index>=total_tile ) continue;
int tid = threadIdx.x;
char *seq1 = sequence_set1 + pos1[pair_no];
char *seq2 = sequence_set2 + pos2[pair_no];
int seq1_len = pos1[pair_no+1] - pos1[pair_no];
int seq2_len = pos2[pair_no+1] - pos2[pair_no];
int *matrix = score_matrix+pos_matrix[pair_no];
unsigned int row_size = dim_matrix[pair_no] + 1;
// calculate index
int index_x = row_size - TILE_SIZE*iter_no + TILE_SIZE*tile_no; // 2-D matrix starts from (1,1)
int index_y = row_size - TILE_SIZE - TILE_SIZE*tile_no;
// load seq1
int seq_index = index_x -1 + tid;
if ( tid<TILE_SIZE && seq_index<seq1_len )
s_seq1[tid] = seq1[seq_index];
// load seq2
seq_index = index_y -1 + tid;
if ( tid<TILE_SIZE && seq_index<seq2_len )
s_seq2[tid] = seq2[seq_index];
// load boundary of tile
if ( tid<TILE_SIZE ) {
int index = (index_y-1)*row_size + index_x + tid; // x-index in 1-D array
s_tile[0][tid+1] = matrix[index];
index = (index_y+tid)*row_size + index_x - 1; // y-index in 1-D array
s_tile[tid+1][0] = matrix[index];
}
if ( tid==0 ) {
int index = (index_y-1)*row_size + index_x-1;
s_tile[tid][0] = matrix[index];
}
__syncthreads();
// calculate
for( int i = 0 ; i < TILE_SIZE ; i++){
if ( tid <= i ){
index_x = tid + 1;
index_y = i - tid + 1;
s_tile[index_y][index_x] = maximumm(s_tile[index_y-1][index_x] + penalty,// up
s_tile[index_y][index_x-1] + penalty, // left
s_tile[index_y-1][index_x-1]+blosum62[s_seq2[index_y-1]][s_seq1[index_x-1]]);
}
__syncthreads();
}
for( int i = TILE_SIZE - 1 ; i >=0 ; i--){
if ( tid <= i){
index_x = tid + TILE_SIZE - i ;
index_y = TILE_SIZE - tid;
s_tile[index_y][index_x] = maximumm(s_tile[index_y-1][index_x] + penalty,// up
s_tile[index_y][index_x-1] + penalty, // left
s_tile[index_y-1][index_x-1]+blosum62[s_seq2[index_y-1]][s_seq1[index_x-1]]);
}
__syncthreads();
}
int stride = blockDim.x / TILE_SIZE ;
int row_iter = TILE_SIZE/stride+1;
if ( tid < stride*TILE_SIZE ) {
for ( int i=0; i<row_iter; ++i) {
int s_tile_idx = tid % TILE_SIZE;
int s_tile_idy = i * stride + tid / TILE_SIZE;
if ( s_tile_idx<TILE_SIZE && s_tile_idy<TILE_SIZE) {
index_x = row_size - TILE_SIZE*iter_no + TILE_SIZE*tile_no + s_tile_idx;
index_y = row_size - TILE_SIZE- TILE_SIZE*tile_no + s_tile_idy;
matrix[index_x + index_y * row_size] = s_tile[s_tile_idy+1][s_tile_idx+1];
}
}
}
} // end for
}
| b0da72b63902cba9a5def1b2673f1c32377315f9.cu | /*
* File: needle_kernel_tile.cu
* Author: Da Li
* Email: [email protected]
* Organization: Networking and Parallel Systems Lab (http://nps.missouri.edu/)
*
* Description: This file defines kernel functions of tiled scan approach.
*
*/
#include <stdio.h>
#include <cuda.h>
#include "needle.h"
#if defined (__CUDA_ARCH__) && (__CUDA_ARCH__ < 200)
#define printf(f, ...) ((void)(f, __VA_ARGS__),0)
#endif
__device__ __host__ short maximumm( short a, short b, short c)
{
int k;
if( a <= b ) k = b;
else k = a;
if( k <=c ) return(c);
else return(k);
}
int tile_size = TILE_SIZE;
__device__ void print_s_tile( int s_tile[TILE_SIZE+1][TILE_SIZE+1], int row, int col)
{
printf("%3d %3d\n", s_tile[row-1][col-1], s_tile[row-1][col]);
printf("%3d %3d\n", s_tile[row][col-1], s_tile[row][col]);
printf("\n\n");
}
__device__ void dump_s_tile( int s_tile[TILE_SIZE+1][TILE_SIZE+1])
{
for (int i=0; i<=TILE_SIZE; ++i) {
for (int j=0; j<=TILE_SIZE; ++j )
printf("%3d\t", s_tile[i][j]);
printf("\n");
}
printf("\n");
}
__global__ void needleman_cuda_init(int *score_matrix, unsigned int *pos_matrix, unsigned int *dim_matrix, int penalty)
{
int pair_no = blockIdx.x;
int tid = threadIdx.x;
int *matrix = score_matrix+pos_matrix[pair_no];
unsigned int row_size = dim_matrix[pair_no]+1; // 1 element margin
unsigned int stride = blockDim.x;
int iteration = row_size / stride;
// init first row
for (int i=0; i<=iteration ; ++i)
{
int index = (tid+stride*i);
if ( index<row_size ){
matrix[ index ] = index * penalty;
}
}
// init first column
for (int i=0; i<=iteration ; ++i)
{
int index = row_size * (tid+stride*i);
if ( (tid+stride*i)<row_size) {
matrix[ index ] = (tid+stride*i)*penalty;
}
}
}
__global__ void needleman_cuda_tile_upleft( char *sequence_set1, char *sequence_set2,
unsigned int *pos1, unsigned int *pos2,
int *score_matrix, unsigned int *pos_matrix, unsigned int *dim_matrix,
int pair_num, int iter_no, int penalty)
{
// 4KB, seq1[], sqe2[], tile[][]
__shared__ char s_seq1[TILE_SIZE];
__shared__ char s_seq2[TILE_SIZE];
__shared__ int s_tile[(TILE_SIZE+1)][(TILE_SIZE+1)];
int tile_num = iter_no;
int total_tile = pair_num * tile_num;
int tile_per_block = total_tile / gridDim.x + 1;
for (int b = 0; b < tile_per_block; ++b ) {
int tile_index = b * gridDim.x + blockIdx.x ;
int pair_no = tile_index / iter_no;
int tile_no = tile_index % iter_no;
if ( tile_index>=total_tile ) continue;
int tid = threadIdx.x;
char *seq1 = sequence_set1 + pos1[pair_no];
char *seq2 = sequence_set2 + pos2[pair_no];
int seq1_len = pos1[pair_no+1] - pos1[pair_no];
int seq2_len = pos2[pair_no+1] - pos2[pair_no];
int *matrix = score_matrix+pos_matrix[pair_no];
unsigned int row_size = dim_matrix[pair_no] + 1;
// calculate index, what are index_x & index_y
int index_x = TILE_SIZE*tile_no + 1; // 2-D matrix starts from (1,1)
int index_y = TILE_SIZE*(iter_no-1) - TILE_SIZE*tile_no + 1;
// load seq1
int seq_index = index_x - 1 + tid;
if ( tid<TILE_SIZE && seq_index<seq1_len )
s_seq1[tid] = seq1[seq_index];
// load seq2
seq_index = index_y - 1 + tid;
if ( tid<TILE_SIZE && seq_index<seq2_len )
s_seq2[tid] = seq2[seq_index];
// load boundary of tile
if ( tid<TILE_SIZE ){
int index = (index_y-1)*row_size + index_x + tid; // x-index in 1-D array
s_tile[0][tid+1] = matrix[index];
//printf("s_tile[0][%d] = %d\n", tid+1, matrix[index]);
index = (index_y+tid)*row_size + index_x - 1; // y-index in 1-D array
s_tile[tid+1][0] = matrix[index];
}
if ( tid==0 ) {
int index = (index_y-1)*row_size + index_x-1;
s_tile[tid][0] = matrix[index];
}
__syncthreads();
// calculate
for( int i = 0 ; i < TILE_SIZE ; i++){
if ( tid <= i ){
index_x = tid + 1;
index_y = i - tid + 1;
s_tile[index_y][index_x] = maximumm(s_tile[index_y-1][index_x] + penalty, // up
s_tile[index_y][index_x-1] + penalty, // left
s_tile[index_y-1][index_x-1]+blosum62[s_seq2[index_y-1]][s_seq1[index_x-1]]);
}
__syncthreads();
}
for( int i = TILE_SIZE - 1 ; i >=0 ; i--){
if ( tid <= i){
index_x = tid + TILE_SIZE - i ;
index_y = TILE_SIZE - tid;
s_tile[index_y][index_x] = maximumm(s_tile[index_y-1][index_x] + penalty, // up
s_tile[index_y][index_x-1] + penalty, // left
s_tile[index_y-1][index_x-1]+blosum62[s_seq2[index_y-1]][s_seq1[index_x-1]]);
}
__syncthreads();
}
int stride = blockDim.x / TILE_SIZE ;
int row_iter = TILE_SIZE/stride+1;
if ( tid < stride*TILE_SIZE ) {
for ( int i=0; i<row_iter; ++i) {
int s_tile_idx = tid % TILE_SIZE;
int s_tile_idy = i * stride + tid / TILE_SIZE;
if ( s_tile_idx<TILE_SIZE && s_tile_idy<TILE_SIZE) {
index_x = TILE_SIZE*tile_no + 1 + s_tile_idx;// 2-D matrix starts from (1,1)
index_y = TILE_SIZE*(iter_no-1) - TILE_SIZE*tile_no + 1 + s_tile_idy;
matrix[index_x + index_y * row_size] = s_tile[s_tile_idy+1][s_tile_idx+1];
}
}
}
} // end for
}
__global__ void needleman_cuda_tile_bottomright(char *sequence_set1, char *sequence_set2,
unsigned int *pos1, unsigned int *pos2,
int *score_matrix, unsigned int *pos_matrix, unsigned int *dim_matrix,
int pair_num, int iter_no, int penalty)
{
// 4KB, seq1[], sqe2[], tile[][]
__shared__ char s_seq1[TILE_SIZE];
__shared__ char s_seq2[TILE_SIZE];
__shared__ int s_tile[(TILE_SIZE+1)][(TILE_SIZE+1)];
int tile_num = iter_no;
int total_tile = pair_num * tile_num;
int tile_per_block = total_tile / gridDim.x + 1;
for (int b = 0; b < tile_per_block; ++b ) {
int tile_index = b * gridDim.x + blockIdx.x ;
int pair_no = tile_index / iter_no;
int tile_no = tile_index % iter_no;
if ( tile_index>=total_tile ) continue;
int tid = threadIdx.x;
char *seq1 = sequence_set1 + pos1[pair_no];
char *seq2 = sequence_set2 + pos2[pair_no];
int seq1_len = pos1[pair_no+1] - pos1[pair_no];
int seq2_len = pos2[pair_no+1] - pos2[pair_no];
int *matrix = score_matrix+pos_matrix[pair_no];
unsigned int row_size = dim_matrix[pair_no] + 1;
// calculate index
int index_x = row_size - TILE_SIZE*iter_no + TILE_SIZE*tile_no; // 2-D matrix starts from (1,1)
int index_y = row_size - TILE_SIZE - TILE_SIZE*tile_no;
// load seq1
int seq_index = index_x -1 + tid;
if ( tid<TILE_SIZE && seq_index<seq1_len )
s_seq1[tid] = seq1[seq_index];
// load seq2
seq_index = index_y -1 + tid;
if ( tid<TILE_SIZE && seq_index<seq2_len )
s_seq2[tid] = seq2[seq_index];
// load boundary of tile
if ( tid<TILE_SIZE ) {
int index = (index_y-1)*row_size + index_x + tid; // x-index in 1-D array
s_tile[0][tid+1] = matrix[index];
index = (index_y+tid)*row_size + index_x - 1; // y-index in 1-D array
s_tile[tid+1][0] = matrix[index];
}
if ( tid==0 ) {
int index = (index_y-1)*row_size + index_x-1;
s_tile[tid][0] = matrix[index];
}
__syncthreads();
// calculate
for( int i = 0 ; i < TILE_SIZE ; i++){
if ( tid <= i ){
index_x = tid + 1;
index_y = i - tid + 1;
s_tile[index_y][index_x] = maximumm(s_tile[index_y-1][index_x] + penalty,// up
s_tile[index_y][index_x-1] + penalty, // left
s_tile[index_y-1][index_x-1]+blosum62[s_seq2[index_y-1]][s_seq1[index_x-1]]);
}
__syncthreads();
}
for( int i = TILE_SIZE - 1 ; i >=0 ; i--){
if ( tid <= i){
index_x = tid + TILE_SIZE - i ;
index_y = TILE_SIZE - tid;
s_tile[index_y][index_x] = maximumm(s_tile[index_y-1][index_x] + penalty,// up
s_tile[index_y][index_x-1] + penalty, // left
s_tile[index_y-1][index_x-1]+blosum62[s_seq2[index_y-1]][s_seq1[index_x-1]]);
}
__syncthreads();
}
int stride = blockDim.x / TILE_SIZE ;
int row_iter = TILE_SIZE/stride+1;
if ( tid < stride*TILE_SIZE ) {
for ( int i=0; i<row_iter; ++i) {
int s_tile_idx = tid % TILE_SIZE;
int s_tile_idy = i * stride + tid / TILE_SIZE;
if ( s_tile_idx<TILE_SIZE && s_tile_idy<TILE_SIZE) {
index_x = row_size - TILE_SIZE*iter_no + TILE_SIZE*tile_no + s_tile_idx;
index_y = row_size - TILE_SIZE- TILE_SIZE*tile_no + s_tile_idy;
matrix[index_x + index_y * row_size] = s_tile[s_tile_idy+1][s_tile_idx+1];
}
}
}
} // end for
}
|
5b7cbc1cd9ac2b14b6203c3d22a8a7b08ce83d9c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "bcnn_op_cuda_relu_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
float *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
float *y = NULL;
hipMalloc(&y, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
bcnn_op_cuda_relu_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, n,x,y);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
bcnn_op_cuda_relu_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, n,x,y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
bcnn_op_cuda_relu_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, n,x,y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 5b7cbc1cd9ac2b14b6203c3d22a8a7b08ce83d9c.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "bcnn_op_cuda_relu_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
float *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
float *y = NULL;
cudaMalloc(&y, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
bcnn_op_cuda_relu_kernel<<<gridBlock,threadBlock>>>(n,x,y);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
bcnn_op_cuda_relu_kernel<<<gridBlock,threadBlock>>>(n,x,y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
bcnn_op_cuda_relu_kernel<<<gridBlock,threadBlock>>>(n,x,y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
e4c887f5f2280d413d614c144b38645135b55273.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <cstdlib>
#include <cstdio>
#include <vector>
#include <hiprand/hiprand_kernel.h>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include <thrust/execution_policy.h>
#include <thrust/extrema.h>
#include <thrust/device_ptr.h>
#define d_maxColour 9
using namespace std;
__device__ int d_count = 0;
__global__ void incrementalColouring (int *vertexArray, int *neighbourArray, int n, int m, int *colouring, int start, int end){
int i = threadIdx.x;
int startStart, startStop;
int me, you;
if (i==0){
me = start;
you = end;
}
else{
me = end;
you = start;
}
startStart = vertexArray[me-1];
if (me==n){
startStop = 2*m;
}
else{
startStop = vertexArray[me];
}
for (int j=startStart; j<startStop; j++){
if (neighbourArray[j]==0){
neighbourArray[j]=you;
break;
}
}
__syncthreads();
if (colouring[start-1]!=colouring[end-1]){
return;
}
if (i==0)
printf("%d and %d Conflict\n", start, end);
__shared__ int colours[2];
colours[i]=0;
if (i==0)
printf("I am %d and %d and %d\n", i, colours[i], colours[1-i]);
bool bucket[d_maxColour];
for (int j=0; j<d_maxColour; j++){
bucket[j]=true;
}
if (i==0){
printf("%d %d", startStart, startStop);
for (int j=startStart; j<startStop; j++){
printf("clo %d\n", neighbourArray[j]);
if (neighbourArray[j]!=0){
printf("clocli %d\n", colouring[neighbourArray[j]-1]);
}
}
}
for (int j=startStart; j<startStop; j++){
if (neighbourArray[j]==0){
continue;
}
bucket[colouring[neighbourArray[j]-1]-1] = false;
if (i==0)
printf("buvket clo %d and %d and %d\n", neighbourArray[j]-1, colouring[neighbourArray[j]-1], bucket[colouring[neighbourArray[j]-1]-1]);
}
for (int j=0; j<d_maxColour; j++){
if(bucket[j]){
colours[i]=j+1;
printf("%d ashhas \t", j+1);
break;
}
}
if (i==0)
for (int j=0; j<d_maxColour; j++){
printf("%d \t",bucket[j]);
}
if (i==0){
printf("\n");
}
__syncthreads();
printf("%d and %d Conflict new colour min %d \n", start, end, colours[i]);
// Possible issue: There could be a number inbetween the smallest equal guess and the current colour.
if (colours[i]==colours[1-i]){
if (colours[i]<colouring[me-1]){
if(i==0){
colouring[me-1]=colours[i];
}
}
else{
if (i==1){
colouring[me-1]=colours[i];
}
}
}
else{
if (colours[i]<colouring[me-1]){
colouring[me-1]=colours[i];
}
else{
if (colours[i]<colours[1-i]){
colouring[me-1]=colours[i];
}
}
}
__syncthreads();
if (i==0){
for (int j=0; j<n; j++){
printf("%d ", colouring[j]);
}
printf("\n");
}
}
__global__ void colourMinMax (int *vertexArray, int *neighbourArray, int *numbers, int n, int m, int *colouring, int currentColour){
int i= blockDim.x * blockIdx.x + threadIdx.x;
if (i>=n){
return;
}
if (colouring[i]!=0){
return;
}
int myValue = numbers[i];
// printf("I am node %d with value %d\n", i+1, myMax);
int start = -1, stop = -1;
start = vertexArray[i];
if (i==n-1){
stop = 2*m;
}
else{
stop = vertexArray[i+1];
}
bool max = true, min = true;
for (int j=start; j<stop; j++){
// printf("My neighbour %d with value %d from %d \n", neighbourArray[j], numbers[neighbourArray[j]-1], i+1);
int neighbour = neighbourArray[j];
if (neighbour==0){
continue;
}
neighbour--;
if (max && colouring[neighbour]==0 && numbers[neighbour] >= myValue){
if (numbers[neighbour] == myValue){
if (i < neighbour){
continue;
}
}
max=false;
if (!min){
return;
}
}
if (min && colouring[neighbour]==0 && numbers[neighbour] <= myValue){
if (numbers[neighbour] == myValue){
if (i > neighbour){
continue;
}
}
min=false;
if (!max){
return;
}
}
}
if (max){
colouring[i] = currentColour;
}
else if (min){
colouring[i] = currentColour+1;
}
atomicAdd(&d_count, 1);
}
__global__ void setup_kernel (hiprandState_t * state, unsigned long seed ){
int i= blockDim.x * blockIdx.x + threadIdx.x;
hiprand_init (seed, i, 0, &state[i]);
}
__global__ void randomNumbering (hiprandState_t* globalState, int *degreeCount, int n, int limit){
int i= blockDim.x * blockIdx.x + threadIdx.x;
hiprandState_t localState = globalState[i];
float RANDOM = hiprand_uniform( &localState );
globalState[i] = localState;
RANDOM *= (limit - 1 + 0.999999);
RANDOM += 1;
degreeCount[i] = (int) RANDOM;
}
__global__ void degreeCalc (int *vertexArray, int *neighbourArray, int *degreeCount, int n, int m){
int i= blockDim.x * blockIdx.x + threadIdx.x;
if (i>=n){
return;
}
int start = -1, stop = -1;
int diff=0;
start = vertexArray[i];
if (i==n-1){
stop = 2*m;
}
else{
stop = vertexArray[i+1];
}
diff = stop-start;
degreeCount[i]=diff;
}
void edgesPrint (int vertexArray[], int neighbourArray[], int n, int m){
for (int i=0; i<n-1; i++){
for (int j = vertexArray[i]; j < vertexArray[i+1]; ++j){
cout<<"e "<<i+1<<" "<<neighbourArray[j]<<endl;
/* code */
}
}
for (int j = vertexArray[n-1]; j < m; ++j)
{
cout<<"e "<<n<<" "<<neighbourArray[j]<<endl;
/* code */
}
}
int main(int argc, char const *argv[])
{
int n, m;
cin>>n>>m;
int *h_count = new int;
int *h_vertexArray = new int [n];
int *h_neighbourArray = new int [2*m];
int *h_degreeCount = new int [n];
int *h_colour = new int [n];
int *d_vertexArray = NULL;
hipMalloc((void **)&d_vertexArray, n*sizeof(int));
int *d_neighbourArray = NULL;
hipMalloc((void **)&d_neighbourArray, 2*m*sizeof(int));
int *d_colour = NULL;
hipMalloc((void **)&d_colour, (n)*sizeof(int));
hipMemset((void *)d_colour, 0, (n)*sizeof(int));
int *d_degreeCount = NULL;
hipMalloc((void **)&d_degreeCount, (n)*sizeof(int));
hipMemset((void *)d_degreeCount, 0, (n)*sizeof(int));
hiprandState_t* devStates;
hipMalloc ( &devStates, n*sizeof( hiprandState_t ) );
int offset = 0;
vector<int> startArray, stopArray;
for (int i = 0; i < n; ++i)
{
h_vertexArray[i]=offset;
int degree;
cin>>degree;
offset+=degree;
}
for (int i = 0; i < 2*m; ++i)
{
h_neighbourArray[i]=0;
}
for (int i = 0; i < m; ++i)
{
int start;
int end;
cin>>start>>end;
double r = ((double) rand() / (RAND_MAX));
if (r<=0.5){
int startStart, startStop, stopStart, stopStop;
startStart = h_vertexArray[start-1];
if (start==n){
startStop = 2*m;
}
else{
startStop = h_vertexArray[start];
}
stopStart = h_vertexArray[end-1];
if (end==n){
stopStop = 2*m;
}
else{
stopStop = h_vertexArray[end];
}
for (int j=startStart; j<startStop; j++){
if (h_neighbourArray[j]==0){
h_neighbourArray[j]=end;
break;
}
}
for (int j=stopStart; j<stopStop; j++){
if (h_neighbourArray[j]==0){
h_neighbourArray[j]=start;
break;
}
}
}
else{
startArray.push_back(start);
stopArray.push_back(end);
}
}
for (int i=0; i<n; i++){
cout<<h_vertexArray[i]<<" ";
}
cout<<endl;
for (int i=0; i<2*m; i++){
cout<<h_neighbourArray[i]<<" ";
}
cout<<endl;
hipMemcpy(d_vertexArray, h_vertexArray, n*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_neighbourArray, h_neighbourArray, 2*m*sizeof(int), hipMemcpyHostToDevice);
int threadsPerBlock = 512;
int blocksPerGrid = (n + threadsPerBlock -1)/threadsPerBlock;
hipLaunchKernelGGL(( setup_kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, devStates, time(NULL) );
hipLaunchKernelGGL(( randomNumbering), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, devStates, d_degreeCount, n, n);
hipMemcpy(h_degreeCount, d_degreeCount, n*sizeof(int), hipMemcpyDeviceToHost);
cout<<"Random numbers: "<<endl;
for (int i=0; i<n; i++){
cout<<h_degreeCount[i]<<endl;
}
int colourCount = 1;
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
cout<<"Worklist: "<<endl;
for (int i=0; i<startArray.size(); i++){
cout<<startArray[i]<<" "<<stopArray[i]<<endl;
}
while (1){
hipLaunchKernelGGL(( colourMinMax), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_vertexArray, d_neighbourArray, d_degreeCount, n, m, d_colour, colourCount);
hipMemcpyFromSymbol(h_count, d_count, sizeof(int), 0, hipMemcpyDeviceToHost);
cout<<"H Count = "<<*h_count<<"at colour: "<<colourCount<<endl;
if (*h_count == n){
break;
}
colourCount+=2;
}
colourCount++;
for (int i=0; i<startArray.size(); i++){
cout<<"New added edge: "<<startArray[i]<<" "<<stopArray[i]<<endl;
hipLaunchKernelGGL(( incrementalColouring), dim3(1), dim3(2), 0, 0, d_vertexArray, d_neighbourArray, n, m, d_colour, startArray[i], stopArray[i]);
hipDeviceSynchronize();
}
hipMemcpy(h_colour, d_colour, n*sizeof(int), hipMemcpyDeviceToHost);
thrust::device_ptr<int> c_ptr = thrust::device_pointer_cast(d_colour);
int maxColour = *(thrust::max_element(c_ptr, c_ptr + n));
cout<<"Max Colour = "<<maxColour<<endl;
cout<<"Colour numbers: "<<endl;
for (int i=0; i<n; i++){
cout<<h_colour[i]<<endl;
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
cout<<"Time for the kernel: "<<time<<" ms"<<endl;
hipMemcpy(h_vertexArray, d_vertexArray, n*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(h_neighbourArray, d_neighbourArray, 2*m*sizeof(int), hipMemcpyDeviceToHost);
for (int i=0; i<n; i++){
cout<<h_vertexArray[i]<<" ";
}
cout<<endl;
for (int i=0; i<2*m; i++){
cout<<h_neighbourArray[i]<<" ";
}
cout<<endl;
delete h_count;
delete[] h_vertexArray;
delete[] h_neighbourArray;
delete[] h_degreeCount;
delete[] h_colour;
hipFree(d_neighbourArray);
hipFree(d_vertexArray);
hipFree(d_degreeCount);
hipFree(d_colour);
hipDeviceReset();
return 0;
}
| e4c887f5f2280d413d614c144b38645135b55273.cu | #include <iostream>
#include <cstdlib>
#include <cstdio>
#include <vector>
#include <curand_kernel.h>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include <thrust/execution_policy.h>
#include <thrust/extrema.h>
#include <thrust/device_ptr.h>
#define d_maxColour 9
using namespace std;
__device__ int d_count = 0;
__global__ void incrementalColouring (int *vertexArray, int *neighbourArray, int n, int m, int *colouring, int start, int end){
int i = threadIdx.x;
int startStart, startStop;
int me, you;
if (i==0){
me = start;
you = end;
}
else{
me = end;
you = start;
}
startStart = vertexArray[me-1];
if (me==n){
startStop = 2*m;
}
else{
startStop = vertexArray[me];
}
for (int j=startStart; j<startStop; j++){
if (neighbourArray[j]==0){
neighbourArray[j]=you;
break;
}
}
__syncthreads();
if (colouring[start-1]!=colouring[end-1]){
return;
}
if (i==0)
printf("%d and %d Conflict\n", start, end);
__shared__ int colours[2];
colours[i]=0;
if (i==0)
printf("I am %d and %d and %d\n", i, colours[i], colours[1-i]);
bool bucket[d_maxColour];
for (int j=0; j<d_maxColour; j++){
bucket[j]=true;
}
if (i==0){
printf("%d %d", startStart, startStop);
for (int j=startStart; j<startStop; j++){
printf("clo %d\n", neighbourArray[j]);
if (neighbourArray[j]!=0){
printf("clocli %d\n", colouring[neighbourArray[j]-1]);
}
}
}
for (int j=startStart; j<startStop; j++){
if (neighbourArray[j]==0){
continue;
}
bucket[colouring[neighbourArray[j]-1]-1] = false;
if (i==0)
printf("buvket clo %d and %d and %d\n", neighbourArray[j]-1, colouring[neighbourArray[j]-1], bucket[colouring[neighbourArray[j]-1]-1]);
}
for (int j=0; j<d_maxColour; j++){
if(bucket[j]){
colours[i]=j+1;
printf("%d ashhas \t", j+1);
break;
}
}
if (i==0)
for (int j=0; j<d_maxColour; j++){
printf("%d \t",bucket[j]);
}
if (i==0){
printf("\n");
}
__syncthreads();
printf("%d and %d Conflict new colour min %d \n", start, end, colours[i]);
// Possible issue: There could be a number inbetween the smallest equal guess and the current colour.
if (colours[i]==colours[1-i]){
if (colours[i]<colouring[me-1]){
if(i==0){
colouring[me-1]=colours[i];
}
}
else{
if (i==1){
colouring[me-1]=colours[i];
}
}
}
else{
if (colours[i]<colouring[me-1]){
colouring[me-1]=colours[i];
}
else{
if (colours[i]<colours[1-i]){
colouring[me-1]=colours[i];
}
}
}
__syncthreads();
if (i==0){
for (int j=0; j<n; j++){
printf("%d ", colouring[j]);
}
printf("\n");
}
}
__global__ void colourMinMax (int *vertexArray, int *neighbourArray, int *numbers, int n, int m, int *colouring, int currentColour){
int i= blockDim.x * blockIdx.x + threadIdx.x;
if (i>=n){
return;
}
if (colouring[i]!=0){
return;
}
int myValue = numbers[i];
// printf("I am node %d with value %d\n", i+1, myMax);
int start = -1, stop = -1;
start = vertexArray[i];
if (i==n-1){
stop = 2*m;
}
else{
stop = vertexArray[i+1];
}
bool max = true, min = true;
for (int j=start; j<stop; j++){
// printf("My neighbour %d with value %d from %d \n", neighbourArray[j], numbers[neighbourArray[j]-1], i+1);
int neighbour = neighbourArray[j];
if (neighbour==0){
continue;
}
neighbour--;
if (max && colouring[neighbour]==0 && numbers[neighbour] >= myValue){
if (numbers[neighbour] == myValue){
if (i < neighbour){
continue;
}
}
max=false;
if (!min){
return;
}
}
if (min && colouring[neighbour]==0 && numbers[neighbour] <= myValue){
if (numbers[neighbour] == myValue){
if (i > neighbour){
continue;
}
}
min=false;
if (!max){
return;
}
}
}
if (max){
colouring[i] = currentColour;
}
else if (min){
colouring[i] = currentColour+1;
}
atomicAdd(&d_count, 1);
}
__global__ void setup_kernel (curandState * state, unsigned long seed ){
int i= blockDim.x * blockIdx.x + threadIdx.x;
curand_init (seed, i, 0, &state[i]);
}
__global__ void randomNumbering (curandState* globalState, int *degreeCount, int n, int limit){
int i= blockDim.x * blockIdx.x + threadIdx.x;
curandState localState = globalState[i];
float RANDOM = curand_uniform( &localState );
globalState[i] = localState;
RANDOM *= (limit - 1 + 0.999999);
RANDOM += 1;
degreeCount[i] = (int) RANDOM;
}
__global__ void degreeCalc (int *vertexArray, int *neighbourArray, int *degreeCount, int n, int m){
int i= blockDim.x * blockIdx.x + threadIdx.x;
if (i>=n){
return;
}
int start = -1, stop = -1;
int diff=0;
start = vertexArray[i];
if (i==n-1){
stop = 2*m;
}
else{
stop = vertexArray[i+1];
}
diff = stop-start;
degreeCount[i]=diff;
}
void edgesPrint (int vertexArray[], int neighbourArray[], int n, int m){
for (int i=0; i<n-1; i++){
for (int j = vertexArray[i]; j < vertexArray[i+1]; ++j){
cout<<"e "<<i+1<<" "<<neighbourArray[j]<<endl;
/* code */
}
}
for (int j = vertexArray[n-1]; j < m; ++j)
{
cout<<"e "<<n<<" "<<neighbourArray[j]<<endl;
/* code */
}
}
int main(int argc, char const *argv[])
{
int n, m;
cin>>n>>m;
int *h_count = new int;
int *h_vertexArray = new int [n];
int *h_neighbourArray = new int [2*m];
int *h_degreeCount = new int [n];
int *h_colour = new int [n];
int *d_vertexArray = NULL;
cudaMalloc((void **)&d_vertexArray, n*sizeof(int));
int *d_neighbourArray = NULL;
cudaMalloc((void **)&d_neighbourArray, 2*m*sizeof(int));
int *d_colour = NULL;
cudaMalloc((void **)&d_colour, (n)*sizeof(int));
cudaMemset((void *)d_colour, 0, (n)*sizeof(int));
int *d_degreeCount = NULL;
cudaMalloc((void **)&d_degreeCount, (n)*sizeof(int));
cudaMemset((void *)d_degreeCount, 0, (n)*sizeof(int));
curandState* devStates;
cudaMalloc ( &devStates, n*sizeof( curandState ) );
int offset = 0;
vector<int> startArray, stopArray;
for (int i = 0; i < n; ++i)
{
h_vertexArray[i]=offset;
int degree;
cin>>degree;
offset+=degree;
}
for (int i = 0; i < 2*m; ++i)
{
h_neighbourArray[i]=0;
}
for (int i = 0; i < m; ++i)
{
int start;
int end;
cin>>start>>end;
double r = ((double) rand() / (RAND_MAX));
if (r<=0.5){
int startStart, startStop, stopStart, stopStop;
startStart = h_vertexArray[start-1];
if (start==n){
startStop = 2*m;
}
else{
startStop = h_vertexArray[start];
}
stopStart = h_vertexArray[end-1];
if (end==n){
stopStop = 2*m;
}
else{
stopStop = h_vertexArray[end];
}
for (int j=startStart; j<startStop; j++){
if (h_neighbourArray[j]==0){
h_neighbourArray[j]=end;
break;
}
}
for (int j=stopStart; j<stopStop; j++){
if (h_neighbourArray[j]==0){
h_neighbourArray[j]=start;
break;
}
}
}
else{
startArray.push_back(start);
stopArray.push_back(end);
}
}
for (int i=0; i<n; i++){
cout<<h_vertexArray[i]<<" ";
}
cout<<endl;
for (int i=0; i<2*m; i++){
cout<<h_neighbourArray[i]<<" ";
}
cout<<endl;
cudaMemcpy(d_vertexArray, h_vertexArray, n*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_neighbourArray, h_neighbourArray, 2*m*sizeof(int), cudaMemcpyHostToDevice);
int threadsPerBlock = 512;
int blocksPerGrid = (n + threadsPerBlock -1)/threadsPerBlock;
setup_kernel <<<blocksPerGrid, threadsPerBlock>>> ( devStates, time(NULL) );
randomNumbering<<<blocksPerGrid, threadsPerBlock>>>(devStates, d_degreeCount, n, n);
cudaMemcpy(h_degreeCount, d_degreeCount, n*sizeof(int), cudaMemcpyDeviceToHost);
cout<<"Random numbers: "<<endl;
for (int i=0; i<n; i++){
cout<<h_degreeCount[i]<<endl;
}
int colourCount = 1;
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cout<<"Worklist: "<<endl;
for (int i=0; i<startArray.size(); i++){
cout<<startArray[i]<<" "<<stopArray[i]<<endl;
}
while (1){
colourMinMax<<<blocksPerGrid, threadsPerBlock>>>(d_vertexArray, d_neighbourArray, d_degreeCount, n, m, d_colour, colourCount);
cudaMemcpyFromSymbol(h_count, d_count, sizeof(int), 0, cudaMemcpyDeviceToHost);
cout<<"H Count = "<<*h_count<<"at colour: "<<colourCount<<endl;
if (*h_count == n){
break;
}
colourCount+=2;
}
colourCount++;
for (int i=0; i<startArray.size(); i++){
cout<<"New added edge: "<<startArray[i]<<" "<<stopArray[i]<<endl;
incrementalColouring<<<1, 2>>>(d_vertexArray, d_neighbourArray, n, m, d_colour, startArray[i], stopArray[i]);
cudaDeviceSynchronize();
}
cudaMemcpy(h_colour, d_colour, n*sizeof(int), cudaMemcpyDeviceToHost);
thrust::device_ptr<int> c_ptr = thrust::device_pointer_cast(d_colour);
int maxColour = *(thrust::max_element(c_ptr, c_ptr + n));
cout<<"Max Colour = "<<maxColour<<endl;
cout<<"Colour numbers: "<<endl;
for (int i=0; i<n; i++){
cout<<h_colour[i]<<endl;
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
cout<<"Time for the kernel: "<<time<<" ms"<<endl;
cudaMemcpy(h_vertexArray, d_vertexArray, n*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(h_neighbourArray, d_neighbourArray, 2*m*sizeof(int), cudaMemcpyDeviceToHost);
for (int i=0; i<n; i++){
cout<<h_vertexArray[i]<<" ";
}
cout<<endl;
for (int i=0; i<2*m; i++){
cout<<h_neighbourArray[i]<<" ";
}
cout<<endl;
delete h_count;
delete[] h_vertexArray;
delete[] h_neighbourArray;
delete[] h_degreeCount;
delete[] h_colour;
cudaFree(d_neighbourArray);
cudaFree(d_vertexArray);
cudaFree(d_degreeCount);
cudaFree(d_colour);
cudaDeviceReset();
return 0;
}
|
e4dc201912ea67890ceccfe2c5049d90d938ce7c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "copy_fill.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *in = NULL;
hipMalloc(&in, XSIZE*YSIZE);
float *out = NULL;
hipMalloc(&out, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int in_size2 = XSIZE*YSIZE;
int in_size3 = XSIZE*YSIZE;
int out_size2 = XSIZE*YSIZE;
int out_size3 = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
copy_fill), dim3(gridBlock),dim3(threadBlock), 0, 0, in,out,size,in_size2,in_size3,out_size2,out_size3);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
copy_fill), dim3(gridBlock),dim3(threadBlock), 0, 0, in,out,size,in_size2,in_size3,out_size2,out_size3);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
copy_fill), dim3(gridBlock),dim3(threadBlock), 0, 0, in,out,size,in_size2,in_size3,out_size2,out_size3);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | e4dc201912ea67890ceccfe2c5049d90d938ce7c.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "copy_fill.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *in = NULL;
cudaMalloc(&in, XSIZE*YSIZE);
float *out = NULL;
cudaMalloc(&out, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int in_size2 = XSIZE*YSIZE;
int in_size3 = XSIZE*YSIZE;
int out_size2 = XSIZE*YSIZE;
int out_size3 = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
copy_fill<<<gridBlock,threadBlock>>>(in,out,size,in_size2,in_size3,out_size2,out_size3);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
copy_fill<<<gridBlock,threadBlock>>>(in,out,size,in_size2,in_size3,out_size2,out_size3);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
copy_fill<<<gridBlock,threadBlock>>>(in,out,size,in_size2,in_size3,out_size2,out_size3);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
325db11b9bad53bd393ddfbf5ff6cc26a7321667.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include "caffe/layers/domain_transform_layer.hpp"
#include "caffe/layer.hpp"
#include "caffe/common.cuh"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void kernel_horizontal_filter_left_to_right_forward(
const int channels, const int height, const int width,
const int input_height, const int input_width,
const Dtype* weight, Dtype* intermediate_res, Dtype* output) {
// One thread per row.
CUDA_KERNEL_LOOP(ind, channels * input_height) {
int h = ind % input_height;
int c = ind / input_height;
for (int w = 1; w < input_width; ++w) {
int ind_out = (c * height + h) * width + w;
int ind_wei = h * width + w;
intermediate_res[ind_out] = output[ind_out - 1] - output[ind_out];
output[ind_out] += weight[ind_wei] * intermediate_res[ind_out];
}
}
}
template <typename Dtype>
__global__ void kernel_horizontal_filter_left_to_right_backward(
const int channels, const int height, const int width,
const int input_height, const int input_width,
const Dtype* weight, const Dtype* intermediate_res,
Dtype* output, Dtype* weight_diff) {
// One thread per row.
CUDA_KERNEL_LOOP(ind, channels * input_height) {
int h = ind % input_height;
int c = ind / input_height;
for (int w = input_width - 1; w >= 1; --w) {
int ind_out = (c * height + h) * width + w;
int ind_wei = h * width + w;
atomicAdd(&weight_diff[ind_wei],
output[ind_out] * intermediate_res[ind_out]);
output[ind_out - 1] += weight[ind_wei] * output[ind_out];
output[ind_out] *= 1 - weight[ind_wei];
}
}
}
template <typename Dtype>
__global__ void kernel_horizontal_filter_right_to_left_forward(
const int channels, const int height, const int width,
const int input_height, const int input_width,
const Dtype* weight, Dtype* intermediate_res, Dtype* output) {
// One thread per row.
CUDA_KERNEL_LOOP(ind, channels * input_height) {
int h = ind % input_height;
int c = ind / input_height;
for (int w = input_width - 2; w >= 0; --w) {
int ind_out = (c * height + h) * width + w;
int ind_wei = h * width + w;
intermediate_res[ind_out] = output[ind_out + 1] - output[ind_out];
output[ind_out] += weight[ind_wei + 1] * intermediate_res[ind_out];
}
}
}
template <typename Dtype>
__global__ void kernel_horizontal_filter_right_to_left_backward(
const int channels, const int height, const int width,
const int input_height, const int input_width,
const Dtype* weight, const Dtype* intermediate_res,
Dtype* output, Dtype* weight_diff) {
// One thread per row.
CUDA_KERNEL_LOOP(ind, channels * input_height) {
int h = ind % input_height;
int c = ind / input_height;
for (int w = 0; w < input_width - 1; ++w) {
int ind_out = (c * height + h) * width + w;
int ind_wei = h * width + w;
atomicAdd(&weight_diff[ind_wei + 1],
output[ind_out] * intermediate_res[ind_out]);
output[ind_out + 1] += weight[ind_wei + 1] * output[ind_out];
output[ind_out] *= 1 - weight[ind_wei + 1];
}
}
}
template <typename Dtype>
__global__ void kernel_vertical_filter_top_to_bottom_forward(
const int channels, const int height, const int width,
const int input_height, const int input_width,
const Dtype* weight, Dtype* intermediate_res, Dtype* output) {
// One thread per column.
CUDA_KERNEL_LOOP(ind, channels * input_width) {
int w = ind % input_width;
int c = ind / input_width;
for (int h = 1; h < input_height; ++h) {
int ind_out = (c * height + h) * width + w;
int ind_wei = h * width + w;
intermediate_res[ind_out] = output[ind_out - width] - output[ind_out];
output[ind_out] += weight[ind_wei] * intermediate_res[ind_out];
}
}
}
template <typename Dtype>
__global__ void kernel_vertical_filter_top_to_bottom_backward(
const int channels, const int height, const int width,
const int input_height, const int input_width,
const Dtype* weight, const Dtype* intermediate_res,
Dtype* output, Dtype* weight_diff) {
// One thread per column.
CUDA_KERNEL_LOOP(ind, channels * input_width) {
int w = ind % input_width;
int c = ind / input_width;
for (int h = input_height - 1; h >= 1; --h) {
int ind_out = (c * height + h) * width + w;
int ind_wei = h * width + w;
atomicAdd(&weight_diff[ind_wei],
output[ind_out] * intermediate_res[ind_out]);
output[ind_out - width] += weight[ind_wei] * output[ind_out];
output[ind_out] = (1 - weight[ind_wei]) * output[ind_out];
}
}
}
template <typename Dtype>
__global__ void kernel_vertical_filter_bottom_to_top_forward(
const int channels, const int height, const int width,
const int input_height, const int input_width,
const Dtype* weight, Dtype* intermediate_res, Dtype* output) {
// One thread per column.
CUDA_KERNEL_LOOP(ind, channels * input_width) {
int w = ind % input_width;
int c = ind / input_width;
for (int h = input_height - 2; h >= 0; --h) {
int ind_out = (c * height + h) * width + w;
int ind_wei = h * width + w;
intermediate_res[ind_out] = output[ind_out + width] - output[ind_out];
output[ind_out] += weight[ind_wei + width] * intermediate_res[ind_out];
}
}
}
template <typename Dtype>
__global__ void kernel_vertical_filter_bottom_to_top_backward(
const int channels, const int height, const int width,
const int input_height, const int input_width,
const Dtype* weight, const Dtype* intermediate_res,
Dtype* output, Dtype* weight_diff) {
// One thread per column.
CUDA_KERNEL_LOOP(ind, channels * input_width) {
int w = ind % input_width;
int c = ind / input_width;
for (int h = 0; h < input_height - 1; ++h) {
int ind_out = (c * height + h) * width + w;
int ind_wei = h * width + w;
atomicAdd(&weight_diff[ind_wei + width],
output[ind_out] * intermediate_res[ind_out]);
output[ind_out + width] += weight[ind_wei + width] * output[ind_out];
output[ind_out] *= 1 - weight[ind_wei + width];
}
}
}
template <typename Dtype>
__global__ void kernel_setup_weight_image(
const int count, const int input_width, const int width,
const Dtype sigma_i, const Dtype spatial_sigma, const Dtype range_sigma,
const Dtype min_weight, const Dtype* data, Dtype* weight) {
// Division by zero has been checked in LayerSetUp.
Dtype mult1 = -sqrt(2.) / sigma_i;
Dtype mult2 = spatial_sigma / range_sigma;
CUDA_KERNEL_LOOP(index, count) {
int h = index / input_width;
int w = index % input_width;
int pos = h * width + w;
// weight must be [min_weight_, 1]
weight[pos] = min(max(exp(mult1 * (1 + data[pos] * mult2)), min_weight), Dtype(1));
}
}
template <typename Dtype>
__global__ void kernel_compute_ref_grad_diff(
const int count, const int input_width, const int width,
const Dtype sigma_i, const Dtype spatial_sigma, const Dtype range_sigma,
const Dtype* weight, const Dtype* weight_diff, Dtype* ref_grad_diff) {
// Division by zero has been checked in LayerSetUp.
Dtype mult1 = -sqrt(2.) / sigma_i;
Dtype mult2 = spatial_sigma / range_sigma;
CUDA_KERNEL_LOOP(index, count) {
int h = index / input_width;
int w = index % input_width;
int pos = h * width + w;
ref_grad_diff[pos] += (mult1 * mult2 * weight_diff[pos] * weight[pos]);
}
}
template <typename Dtype>
void DomainTransformLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const int spatial_dim = height_ * width_;
const int sample_dim = channels_ * spatial_dim;
Dtype* weight = weight_image_.mutable_gpu_data();
for (int n = 0; n < num_; ++n) {
const Dtype* feat_data = bottom[0]->gpu_data_at(n);
Dtype* top_data = top[0]->mutable_gpu_data_at(n);
caffe_copy<Dtype>(sample_dim, feat_data, top_data);
const Dtype* ref_grad_data = bottom[1]->gpu_data_at(n);
const int input_height = static_cast<int>(bottom[2]->cpu_data_at(n)[0]);
const int input_width = static_cast<int>(bottom[2]->cpu_data_at(n)[1]);
const int input_spatial_dim = input_height * input_width;
CHECK_LE(input_height, height_) <<
"input_height should be less than or equal to height.";
CHECK_LE(input_width, width_) <<
"input_width should be less than or equal to width.";
for (int iter = 0; iter < num_iter_; ++iter) {
Dtype sigma_i = ComputeSigma(iter);
hipLaunchKernelGGL(( kernel_setup_weight_image<Dtype>), CAFFE_GET_BLOCKS(
input_spatial_dim), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
input_spatial_dim, input_width, width_,
sigma_i, spatial_sigma_, range_sigma_, min_weight_,
ref_grad_data, weight);
/* TODO(gpapan): This CUDA implementation is inefficient, because there
* are dependencies within each row or col, so you can only use height
* or width threads. You can improve this by doing all channels in
* parallel and also being more careful with your <<< . >>> arguments.
* You can further significantly improve speed by using BLAS *axpby()
* routines. Right now caffe_gpu_axpby is not sufficient because it
* assumes strides = 1, but you need to use the full BLAS interface
* that allows strides > 1.
* Overload caffe_gpu_axpby(), also supplying a version that accepts
* a stride parameter. Use this to significantly improve speed. Also
* adding this functionality to caffe_cpu_axpby() would further allow
* you to have almost identical cpu / gpu implementations.
*/
// Filter the input four times in the following (forward) orders:
// (0) left->right (1) right->left (2) top->bottom (3) bottom->top.
for (int pass = 0; pass < num_passes_; ++pass) {
int ind = iter * num_passes_ + pass;
Dtype* intermediate_res =
intermediate_results_[ind]->mutable_gpu_data_at(n);
switch (pass) {
case 0:
hipLaunchKernelGGL(( kernel_horizontal_filter_left_to_right_forward<Dtype>),
dim3(CAFFE_GET_BLOCKS(channels_ * input_height)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
channels_, height_, width_,
input_height, input_width,
weight, intermediate_res, top_data);
break;
case 1:
hipLaunchKernelGGL(( kernel_horizontal_filter_right_to_left_forward<Dtype>),
dim3(CAFFE_GET_BLOCKS(channels_ * input_height)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
channels_, height_, width_,
input_height, input_width,
weight, intermediate_res, top_data);
break;
case 2:
hipLaunchKernelGGL(( kernel_vertical_filter_top_to_bottom_forward<Dtype>),
dim3(CAFFE_GET_BLOCKS(channels_ * input_width)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
channels_, height_, width_,
input_height, input_width,
weight, intermediate_res, top_data);
break;
case 3:
hipLaunchKernelGGL(( kernel_vertical_filter_bottom_to_top_forward<Dtype>),
dim3(CAFFE_GET_BLOCKS(channels_ * input_width)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
channels_, height_, width_,
input_height, input_width,
weight, intermediate_res, top_data);
break;
}
}
}
}
}
template <typename Dtype>
void DomainTransformLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[2]) {
LOG(FATAL) << this->type()
<< " Layer cannot back-propagate to image dimension.";
}
if (propagate_down[0] || propagate_down[1]) {
const int spatial_dim = height_ * width_;
const int sample_dim = channels_ * spatial_dim;
// weight_diff is a temporary buffer shared for all samples.
Dtype* weight_diff = blob_weight_diff_.mutable_gpu_diff();
Dtype* weight = weight_image_.mutable_gpu_data();
for (int n = 0; n < num_; ++n) {
const Dtype* top_diff = top[0]->gpu_diff_at(n);
Dtype* bottom_input_diff = bottom[0]->mutable_gpu_diff_at(n);
Dtype* bottom_ref_grad_diff = bottom[1]->mutable_gpu_diff_at(n);
caffe_copy<Dtype>(sample_dim, top_diff, bottom_input_diff);
caffe_gpu_set<Dtype>(spatial_dim, Dtype(0), bottom_ref_grad_diff);
const Dtype* ref_grad_data = bottom[1]->gpu_data_at(n);
const int input_height = static_cast<int>(bottom[2]->cpu_data_at(n)[0]);
const int input_width = static_cast<int>(bottom[2]->cpu_data_at(n)[1]);
CHECK_LE(input_height, height_) <<
"input_height should be less than or equal to height.";
CHECK_LE(input_width, width_) <<
"input_width should be less than or equal to width.";
const int input_spatial_dim = input_height * input_width;
for (int iter = num_iter_ - 1; iter >= 0; --iter) {
Dtype sigma_i = ComputeSigma(iter);
hipLaunchKernelGGL(( kernel_setup_weight_image<Dtype>), CAFFE_GET_BLOCKS(
input_spatial_dim), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
input_spatial_dim, input_width, width_,
sigma_i, spatial_sigma_, range_sigma_, min_weight_,
ref_grad_data, weight);
caffe_gpu_set<Dtype>(spatial_dim, Dtype(0), weight_diff);
/* TODO(gpapan): This CUDA implementation is inefficient, because there
* are dependencies within each row or col, so you can only use height
* or width threads. You can improve this by doing all channels in
* parallel and also being more careful with your <<< . >>> arguments.
* You can further significantly improve speed by using BLAS *axpby()
* routines. Right now caffe_gpu_axpby is not sufficient because it
* assumes strides = 1, but you need to use the full BLAS interface
* that allows strides > 1.
* Overload caffe_gpu_axpby(), also supplying a version that accepts
* a stride parameter. Use this to significantly improve speed. Also
* adding this functionality to caffe_cpu_axpby() would further allow
* you to have almost identical cpu / gpu implementations.
*/
// Filter the input four times in the following (backward) orders:
// (3) bottom->top (2) top->bottom (1) right->left (0) left->right.
for (int pass = num_passes_ - 1; pass >= 0; --pass) {
int ind = iter * num_passes_ + pass;
Dtype* intermediate_res =
intermediate_results_[ind]->mutable_gpu_data_at(n);
switch (pass) {
case 0:
hipLaunchKernelGGL(( kernel_horizontal_filter_left_to_right_backward<Dtype>),
dim3(CAFFE_GET_BLOCKS(channels_ * input_height)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
channels_, height_, width_,
input_height, input_width,
weight, intermediate_res, bottom_input_diff, weight_diff);
break;
case 1:
hipLaunchKernelGGL(( kernel_horizontal_filter_right_to_left_backward<Dtype>),
dim3(CAFFE_GET_BLOCKS(channels_ * input_height)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
channels_, height_, width_,
input_height, input_width,
weight, intermediate_res, bottom_input_diff, weight_diff);
break;
case 2:
hipLaunchKernelGGL(( kernel_vertical_filter_top_to_bottom_backward<Dtype>),
dim3(CAFFE_GET_BLOCKS(channels_ * input_width)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
channels_, height_, width_,
input_height, input_width,
weight, intermediate_res, bottom_input_diff, weight_diff);
break;
case 3:
hipLaunchKernelGGL(( kernel_vertical_filter_bottom_to_top_backward<Dtype>),
dim3(CAFFE_GET_BLOCKS(channels_ * input_width)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
channels_, height_, width_,
input_height, input_width,
weight, intermediate_res, bottom_input_diff, weight_diff);
break;
}
}
hipLaunchKernelGGL(( kernel_compute_ref_grad_diff<Dtype>),
dim3(CAFFE_GET_BLOCKS(input_spatial_dim)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
input_spatial_dim, input_width, width_,
sigma_i, spatial_sigma_, range_sigma_,
weight, weight_diff, bottom_ref_grad_diff);
}
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(DomainTransformLayer);
} // namespace caffe
| 325db11b9bad53bd393ddfbf5ff6cc26a7321667.cu | #include <algorithm>
#include <vector>
#include "caffe/layers/domain_transform_layer.hpp"
#include "caffe/layer.hpp"
#include "caffe/common.cuh"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void kernel_horizontal_filter_left_to_right_forward(
const int channels, const int height, const int width,
const int input_height, const int input_width,
const Dtype* weight, Dtype* intermediate_res, Dtype* output) {
// One thread per row.
CUDA_KERNEL_LOOP(ind, channels * input_height) {
int h = ind % input_height;
int c = ind / input_height;
for (int w = 1; w < input_width; ++w) {
int ind_out = (c * height + h) * width + w;
int ind_wei = h * width + w;
intermediate_res[ind_out] = output[ind_out - 1] - output[ind_out];
output[ind_out] += weight[ind_wei] * intermediate_res[ind_out];
}
}
}
template <typename Dtype>
__global__ void kernel_horizontal_filter_left_to_right_backward(
const int channels, const int height, const int width,
const int input_height, const int input_width,
const Dtype* weight, const Dtype* intermediate_res,
Dtype* output, Dtype* weight_diff) {
// One thread per row.
CUDA_KERNEL_LOOP(ind, channels * input_height) {
int h = ind % input_height;
int c = ind / input_height;
for (int w = input_width - 1; w >= 1; --w) {
int ind_out = (c * height + h) * width + w;
int ind_wei = h * width + w;
atomicAdd(&weight_diff[ind_wei],
output[ind_out] * intermediate_res[ind_out]);
output[ind_out - 1] += weight[ind_wei] * output[ind_out];
output[ind_out] *= 1 - weight[ind_wei];
}
}
}
template <typename Dtype>
__global__ void kernel_horizontal_filter_right_to_left_forward(
const int channels, const int height, const int width,
const int input_height, const int input_width,
const Dtype* weight, Dtype* intermediate_res, Dtype* output) {
// One thread per row.
CUDA_KERNEL_LOOP(ind, channels * input_height) {
int h = ind % input_height;
int c = ind / input_height;
for (int w = input_width - 2; w >= 0; --w) {
int ind_out = (c * height + h) * width + w;
int ind_wei = h * width + w;
intermediate_res[ind_out] = output[ind_out + 1] - output[ind_out];
output[ind_out] += weight[ind_wei + 1] * intermediate_res[ind_out];
}
}
}
template <typename Dtype>
__global__ void kernel_horizontal_filter_right_to_left_backward(
const int channels, const int height, const int width,
const int input_height, const int input_width,
const Dtype* weight, const Dtype* intermediate_res,
Dtype* output, Dtype* weight_diff) {
// One thread per row.
CUDA_KERNEL_LOOP(ind, channels * input_height) {
int h = ind % input_height;
int c = ind / input_height;
for (int w = 0; w < input_width - 1; ++w) {
int ind_out = (c * height + h) * width + w;
int ind_wei = h * width + w;
atomicAdd(&weight_diff[ind_wei + 1],
output[ind_out] * intermediate_res[ind_out]);
output[ind_out + 1] += weight[ind_wei + 1] * output[ind_out];
output[ind_out] *= 1 - weight[ind_wei + 1];
}
}
}
template <typename Dtype>
__global__ void kernel_vertical_filter_top_to_bottom_forward(
const int channels, const int height, const int width,
const int input_height, const int input_width,
const Dtype* weight, Dtype* intermediate_res, Dtype* output) {
// One thread per column.
CUDA_KERNEL_LOOP(ind, channels * input_width) {
int w = ind % input_width;
int c = ind / input_width;
for (int h = 1; h < input_height; ++h) {
int ind_out = (c * height + h) * width + w;
int ind_wei = h * width + w;
intermediate_res[ind_out] = output[ind_out - width] - output[ind_out];
output[ind_out] += weight[ind_wei] * intermediate_res[ind_out];
}
}
}
template <typename Dtype>
__global__ void kernel_vertical_filter_top_to_bottom_backward(
const int channels, const int height, const int width,
const int input_height, const int input_width,
const Dtype* weight, const Dtype* intermediate_res,
Dtype* output, Dtype* weight_diff) {
// One thread per column.
CUDA_KERNEL_LOOP(ind, channels * input_width) {
int w = ind % input_width;
int c = ind / input_width;
for (int h = input_height - 1; h >= 1; --h) {
int ind_out = (c * height + h) * width + w;
int ind_wei = h * width + w;
atomicAdd(&weight_diff[ind_wei],
output[ind_out] * intermediate_res[ind_out]);
output[ind_out - width] += weight[ind_wei] * output[ind_out];
output[ind_out] = (1 - weight[ind_wei]) * output[ind_out];
}
}
}
template <typename Dtype>
__global__ void kernel_vertical_filter_bottom_to_top_forward(
const int channels, const int height, const int width,
const int input_height, const int input_width,
const Dtype* weight, Dtype* intermediate_res, Dtype* output) {
// One thread per column.
CUDA_KERNEL_LOOP(ind, channels * input_width) {
int w = ind % input_width;
int c = ind / input_width;
for (int h = input_height - 2; h >= 0; --h) {
int ind_out = (c * height + h) * width + w;
int ind_wei = h * width + w;
intermediate_res[ind_out] = output[ind_out + width] - output[ind_out];
output[ind_out] += weight[ind_wei + width] * intermediate_res[ind_out];
}
}
}
template <typename Dtype>
__global__ void kernel_vertical_filter_bottom_to_top_backward(
const int channels, const int height, const int width,
const int input_height, const int input_width,
const Dtype* weight, const Dtype* intermediate_res,
Dtype* output, Dtype* weight_diff) {
// One thread per column.
CUDA_KERNEL_LOOP(ind, channels * input_width) {
int w = ind % input_width;
int c = ind / input_width;
for (int h = 0; h < input_height - 1; ++h) {
int ind_out = (c * height + h) * width + w;
int ind_wei = h * width + w;
atomicAdd(&weight_diff[ind_wei + width],
output[ind_out] * intermediate_res[ind_out]);
output[ind_out + width] += weight[ind_wei + width] * output[ind_out];
output[ind_out] *= 1 - weight[ind_wei + width];
}
}
}
template <typename Dtype>
__global__ void kernel_setup_weight_image(
const int count, const int input_width, const int width,
const Dtype sigma_i, const Dtype spatial_sigma, const Dtype range_sigma,
const Dtype min_weight, const Dtype* data, Dtype* weight) {
// Division by zero has been checked in LayerSetUp.
Dtype mult1 = -sqrt(2.) / sigma_i;
Dtype mult2 = spatial_sigma / range_sigma;
CUDA_KERNEL_LOOP(index, count) {
int h = index / input_width;
int w = index % input_width;
int pos = h * width + w;
// weight must be [min_weight_, 1]
weight[pos] = min(max(exp(mult1 * (1 + data[pos] * mult2)), min_weight), Dtype(1));
}
}
template <typename Dtype>
__global__ void kernel_compute_ref_grad_diff(
const int count, const int input_width, const int width,
const Dtype sigma_i, const Dtype spatial_sigma, const Dtype range_sigma,
const Dtype* weight, const Dtype* weight_diff, Dtype* ref_grad_diff) {
// Division by zero has been checked in LayerSetUp.
Dtype mult1 = -sqrt(2.) / sigma_i;
Dtype mult2 = spatial_sigma / range_sigma;
CUDA_KERNEL_LOOP(index, count) {
int h = index / input_width;
int w = index % input_width;
int pos = h * width + w;
ref_grad_diff[pos] += (mult1 * mult2 * weight_diff[pos] * weight[pos]);
}
}
template <typename Dtype>
void DomainTransformLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const int spatial_dim = height_ * width_;
const int sample_dim = channels_ * spatial_dim;
Dtype* weight = weight_image_.mutable_gpu_data();
for (int n = 0; n < num_; ++n) {
const Dtype* feat_data = bottom[0]->gpu_data_at(n);
Dtype* top_data = top[0]->mutable_gpu_data_at(n);
caffe_copy<Dtype>(sample_dim, feat_data, top_data);
const Dtype* ref_grad_data = bottom[1]->gpu_data_at(n);
const int input_height = static_cast<int>(bottom[2]->cpu_data_at(n)[0]);
const int input_width = static_cast<int>(bottom[2]->cpu_data_at(n)[1]);
const int input_spatial_dim = input_height * input_width;
CHECK_LE(input_height, height_) <<
"input_height should be less than or equal to height.";
CHECK_LE(input_width, width_) <<
"input_width should be less than or equal to width.";
for (int iter = 0; iter < num_iter_; ++iter) {
Dtype sigma_i = ComputeSigma(iter);
kernel_setup_weight_image<Dtype><<<CAFFE_GET_BLOCKS(
input_spatial_dim), CAFFE_CUDA_NUM_THREADS>>>(
input_spatial_dim, input_width, width_,
sigma_i, spatial_sigma_, range_sigma_, min_weight_,
ref_grad_data, weight);
/* TODO(gpapan): This CUDA implementation is inefficient, because there
* are dependencies within each row or col, so you can only use height
* or width threads. You can improve this by doing all channels in
* parallel and also being more careful with your <<< . >>> arguments.
* You can further significantly improve speed by using BLAS *axpby()
* routines. Right now caffe_gpu_axpby is not sufficient because it
* assumes strides = 1, but you need to use the full BLAS interface
* that allows strides > 1.
* Overload caffe_gpu_axpby(), also supplying a version that accepts
* a stride parameter. Use this to significantly improve speed. Also
* adding this functionality to caffe_cpu_axpby() would further allow
* you to have almost identical cpu / gpu implementations.
*/
// Filter the input four times in the following (forward) orders:
// (0) left->right (1) right->left (2) top->bottom (3) bottom->top.
for (int pass = 0; pass < num_passes_; ++pass) {
int ind = iter * num_passes_ + pass;
Dtype* intermediate_res =
intermediate_results_[ind]->mutable_gpu_data_at(n);
switch (pass) {
case 0:
kernel_horizontal_filter_left_to_right_forward<Dtype><<<
CAFFE_GET_BLOCKS(channels_ * input_height),
CAFFE_CUDA_NUM_THREADS>>>(
channels_, height_, width_,
input_height, input_width,
weight, intermediate_res, top_data);
break;
case 1:
kernel_horizontal_filter_right_to_left_forward<Dtype><<<
CAFFE_GET_BLOCKS(channels_ * input_height),
CAFFE_CUDA_NUM_THREADS>>>(
channels_, height_, width_,
input_height, input_width,
weight, intermediate_res, top_data);
break;
case 2:
kernel_vertical_filter_top_to_bottom_forward<Dtype><<<
CAFFE_GET_BLOCKS(channels_ * input_width),
CAFFE_CUDA_NUM_THREADS>>>(
channels_, height_, width_,
input_height, input_width,
weight, intermediate_res, top_data);
break;
case 3:
kernel_vertical_filter_bottom_to_top_forward<Dtype><<<
CAFFE_GET_BLOCKS(channels_ * input_width),
CAFFE_CUDA_NUM_THREADS>>>(
channels_, height_, width_,
input_height, input_width,
weight, intermediate_res, top_data);
break;
}
}
}
}
}
template <typename Dtype>
void DomainTransformLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[2]) {
LOG(FATAL) << this->type()
<< " Layer cannot back-propagate to image dimension.";
}
if (propagate_down[0] || propagate_down[1]) {
const int spatial_dim = height_ * width_;
const int sample_dim = channels_ * spatial_dim;
// weight_diff is a temporary buffer shared for all samples.
Dtype* weight_diff = blob_weight_diff_.mutable_gpu_diff();
Dtype* weight = weight_image_.mutable_gpu_data();
for (int n = 0; n < num_; ++n) {
const Dtype* top_diff = top[0]->gpu_diff_at(n);
Dtype* bottom_input_diff = bottom[0]->mutable_gpu_diff_at(n);
Dtype* bottom_ref_grad_diff = bottom[1]->mutable_gpu_diff_at(n);
caffe_copy<Dtype>(sample_dim, top_diff, bottom_input_diff);
caffe_gpu_set<Dtype>(spatial_dim, Dtype(0), bottom_ref_grad_diff);
const Dtype* ref_grad_data = bottom[1]->gpu_data_at(n);
const int input_height = static_cast<int>(bottom[2]->cpu_data_at(n)[0]);
const int input_width = static_cast<int>(bottom[2]->cpu_data_at(n)[1]);
CHECK_LE(input_height, height_) <<
"input_height should be less than or equal to height.";
CHECK_LE(input_width, width_) <<
"input_width should be less than or equal to width.";
const int input_spatial_dim = input_height * input_width;
for (int iter = num_iter_ - 1; iter >= 0; --iter) {
Dtype sigma_i = ComputeSigma(iter);
kernel_setup_weight_image<Dtype><<<CAFFE_GET_BLOCKS(
input_spatial_dim), CAFFE_CUDA_NUM_THREADS>>>(
input_spatial_dim, input_width, width_,
sigma_i, spatial_sigma_, range_sigma_, min_weight_,
ref_grad_data, weight);
caffe_gpu_set<Dtype>(spatial_dim, Dtype(0), weight_diff);
/* TODO(gpapan): This CUDA implementation is inefficient, because there
* are dependencies within each row or col, so you can only use height
* or width threads. You can improve this by doing all channels in
* parallel and also being more careful with your <<< . >>> arguments.
* You can further significantly improve speed by using BLAS *axpby()
* routines. Right now caffe_gpu_axpby is not sufficient because it
* assumes strides = 1, but you need to use the full BLAS interface
* that allows strides > 1.
* Overload caffe_gpu_axpby(), also supplying a version that accepts
* a stride parameter. Use this to significantly improve speed. Also
* adding this functionality to caffe_cpu_axpby() would further allow
* you to have almost identical cpu / gpu implementations.
*/
// Filter the input four times in the following (backward) orders:
// (3) bottom->top (2) top->bottom (1) right->left (0) left->right.
for (int pass = num_passes_ - 1; pass >= 0; --pass) {
int ind = iter * num_passes_ + pass;
Dtype* intermediate_res =
intermediate_results_[ind]->mutable_gpu_data_at(n);
switch (pass) {
case 0:
kernel_horizontal_filter_left_to_right_backward<Dtype><<<
CAFFE_GET_BLOCKS(channels_ * input_height),
CAFFE_CUDA_NUM_THREADS>>>(
channels_, height_, width_,
input_height, input_width,
weight, intermediate_res, bottom_input_diff, weight_diff);
break;
case 1:
kernel_horizontal_filter_right_to_left_backward<Dtype><<<
CAFFE_GET_BLOCKS(channels_ * input_height),
CAFFE_CUDA_NUM_THREADS>>>(
channels_, height_, width_,
input_height, input_width,
weight, intermediate_res, bottom_input_diff, weight_diff);
break;
case 2:
kernel_vertical_filter_top_to_bottom_backward<Dtype><<<
CAFFE_GET_BLOCKS(channels_ * input_width),
CAFFE_CUDA_NUM_THREADS>>>(
channels_, height_, width_,
input_height, input_width,
weight, intermediate_res, bottom_input_diff, weight_diff);
break;
case 3:
kernel_vertical_filter_bottom_to_top_backward<Dtype><<<
CAFFE_GET_BLOCKS(channels_ * input_width),
CAFFE_CUDA_NUM_THREADS>>>(
channels_, height_, width_,
input_height, input_width,
weight, intermediate_res, bottom_input_diff, weight_diff);
break;
}
}
kernel_compute_ref_grad_diff<Dtype><<<
CAFFE_GET_BLOCKS(input_spatial_dim), CAFFE_CUDA_NUM_THREADS>>>(
input_spatial_dim, input_width, width_,
sigma_i, spatial_sigma_, range_sigma_,
weight, weight_diff, bottom_ref_grad_diff);
}
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(DomainTransformLayer);
} // namespace caffe
|
879842bbe6ee4d6eb16d2e0468d89bdf8a2a37c9.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include <stdio.h>
using namespace std;
#include <sys/time.h>
#include <unistd.h>
#include <stdlib.h>
#include <cstdlib>
union FP32
{
unsigned int i;
float f;
};
union FP16
{
unsigned short int i;
__half f;
};
__global__ void test(float* dst, __half* a, __half* b, float* c){
asm volatile(
"ld.param.u64 %rd1, [_Z4testPfP6__halfS1_S__param_0];\n\t"
".reg .b32 a<8>, b<8>, c<8>,d<8>;\n\t"
"wmma.load.a.sync.aligned.m16n16k16.global.row.f16 {a0, a1, a2, a3, a4, a5, a6, a7}, [%1];\n\t"
"wmma.load.b.sync.aligned.m16n16k16.global.col.f16 {b0, b1, b2, b3, b4, b5, b6, b7}, [%2];\n\t"
"wmma.load.c.sync.aligned.m16n16k16.global.row.f32 {c0, c1, c2, c3, c4, c5, c6, c7}, [%3];\n\t"
"wmma.mma.sync.aligned.m16n16k16.row.col.f32.f32 {d0,d1,d2,d3,d4,d5,d6,d7}, {a0, a1, a2, a3, a4, a5, a6, a7}, {b0, b1, b2, b3, b4, b5, b6, b7}, {c0, c1, c2, c3, c4, c5, c6, c7};\n\t"
"wmma.store.d.sync.aligned.m16n16k16.global.row.f32 [%0], {d0,d1,d2,d3,d4,d5,d6,d7};" : "=l"(dst): "l"(a), "l"(b), "l"(c));
}
void InitOne(__half* a, const int n) {
for ( int i = 0; i < n; i++ ) {
a[i] = 1.0;
}
}
void InitZero(__half* a, const int n) {
for ( int i = 0; i < n; i++ ) {
a[i] = 0.0;
}
}
void InitZero_float(float* a, const int n) {
for ( int i = 0; i < n; i++ ) {
a[i] = 0.0;
}
}
void show(float * a, const int n) {
std::cout << std::endl;
for ( int i=0; i<n; i++){
std::cout<<a[i] << std::endl;
}
std::cout << std::endl;
}
int main(int argc, char** argv){
int size = 256;
__half* host_a=(__half*)malloc(sizeof(__half) * size);
__half* host_b=(__half*)malloc(sizeof(__half) * size);
float* host_c=(float*)malloc(sizeof(float) * size);
float* host_d=(float*)malloc(sizeof(float) * size);
__half* device_a=NULL;
__half* device_b=NULL;
float* device_c=NULL;
float* device_d=NULL;
hipMalloc((void**)(&device_a), sizeof(__half) * size);
hipMalloc((void**)(&device_b), sizeof(__half) * size);
hipMalloc((void**)(&device_c), sizeof(float) * size);
hipMalloc((void**)(&device_d), sizeof(float) * size);
InitOne(host_a, size);
InitOne(host_b, size);
InitZero_float(host_c, size);
InitZero_float(host_d, size);
//FP16 fp16;
//fp16.i = 0x3c00; host_a[0]=fp16.f;
//fp16.i = 0x3c00; host_a[1]=fp16.f;
//fp16.i = 0x3c00; host_a[2]=fp16.f;
//fp16.i = 0x3c00; host_a[3]=fp16.f;
//fp16.i = 0x3c00; host_a[4]=fp16.f;
//fp16.i = 0x3c00; host_a[5]=fp16.f;
//fp16.i = 0x3c00; host_a[6]=fp16.f;
//fp16.i = 0x3c00; host_a[7]=fp16.f;
FP32 fp32;
fp32.i = 0x4c000000; host_c[0]=fp32.f;
hipMemcpy((void*)device_a, (void*)host_a, sizeof(__half)* size, hipMemcpyHostToDevice);
hipMemcpy((void*)device_b, (void*)host_b, sizeof(__half)* size, hipMemcpyHostToDevice);
hipMemcpy((void*)device_c, (void*)host_c, sizeof(float)* size, hipMemcpyHostToDevice);
hipMemcpy((void*)device_d, (void*)host_d, sizeof(float)* size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( test), dim3(1),dim3(32), 0, 0, device_d, device_a, device_b, device_c);
hipDeviceSynchronize();
hipMemcpy((void*)host_d, (void*)device_d, sizeof(float) * size, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
//FP32 fp32;
fp32.f=host_d[0];
//std::cout<< host_d[0] << std::endl;
std::cout<< hex << fp32.i << std::endl;
//show(host_d, size);
}
| 879842bbe6ee4d6eb16d2e0468d89bdf8a2a37c9.cu | #include <iostream>
#include <cuda.h>
#include <cuda_fp16.h>
#include <stdio.h>
using namespace std;
#include <sys/time.h>
#include <unistd.h>
#include <stdlib.h>
#include <cstdlib>
union FP32
{
unsigned int i;
float f;
};
union FP16
{
unsigned short int i;
__half f;
};
__global__ void test(float* dst, __half* a, __half* b, float* c){
asm volatile(
"ld.param.u64 %rd1, [_Z4testPfP6__halfS1_S__param_0];\n\t"
".reg .b32 a<8>, b<8>, c<8>,d<8>;\n\t"
"wmma.load.a.sync.aligned.m16n16k16.global.row.f16 {a0, a1, a2, a3, a4, a5, a6, a7}, [%1];\n\t"
"wmma.load.b.sync.aligned.m16n16k16.global.col.f16 {b0, b1, b2, b3, b4, b5, b6, b7}, [%2];\n\t"
"wmma.load.c.sync.aligned.m16n16k16.global.row.f32 {c0, c1, c2, c3, c4, c5, c6, c7}, [%3];\n\t"
"wmma.mma.sync.aligned.m16n16k16.row.col.f32.f32 {d0,d1,d2,d3,d4,d5,d6,d7}, {a0, a1, a2, a3, a4, a5, a6, a7}, {b0, b1, b2, b3, b4, b5, b6, b7}, {c0, c1, c2, c3, c4, c5, c6, c7};\n\t"
"wmma.store.d.sync.aligned.m16n16k16.global.row.f32 [%0], {d0,d1,d2,d3,d4,d5,d6,d7};" : "=l"(dst): "l"(a), "l"(b), "l"(c));
}
void InitOne(__half* a, const int n) {
for ( int i = 0; i < n; i++ ) {
a[i] = 1.0;
}
}
void InitZero(__half* a, const int n) {
for ( int i = 0; i < n; i++ ) {
a[i] = 0.0;
}
}
void InitZero_float(float* a, const int n) {
for ( int i = 0; i < n; i++ ) {
a[i] = 0.0;
}
}
void show(float * a, const int n) {
std::cout << std::endl;
for ( int i=0; i<n; i++){
std::cout<<a[i] << std::endl;
}
std::cout << std::endl;
}
int main(int argc, char** argv){
int size = 256;
__half* host_a=(__half*)malloc(sizeof(__half) * size);
__half* host_b=(__half*)malloc(sizeof(__half) * size);
float* host_c=(float*)malloc(sizeof(float) * size);
float* host_d=(float*)malloc(sizeof(float) * size);
__half* device_a=NULL;
__half* device_b=NULL;
float* device_c=NULL;
float* device_d=NULL;
cudaMalloc((void**)(&device_a), sizeof(__half) * size);
cudaMalloc((void**)(&device_b), sizeof(__half) * size);
cudaMalloc((void**)(&device_c), sizeof(float) * size);
cudaMalloc((void**)(&device_d), sizeof(float) * size);
InitOne(host_a, size);
InitOne(host_b, size);
InitZero_float(host_c, size);
InitZero_float(host_d, size);
//FP16 fp16;
//fp16.i = 0x3c00; host_a[0]=fp16.f;
//fp16.i = 0x3c00; host_a[1]=fp16.f;
//fp16.i = 0x3c00; host_a[2]=fp16.f;
//fp16.i = 0x3c00; host_a[3]=fp16.f;
//fp16.i = 0x3c00; host_a[4]=fp16.f;
//fp16.i = 0x3c00; host_a[5]=fp16.f;
//fp16.i = 0x3c00; host_a[6]=fp16.f;
//fp16.i = 0x3c00; host_a[7]=fp16.f;
FP32 fp32;
fp32.i = 0x4c000000; host_c[0]=fp32.f;
cudaMemcpy((void*)device_a, (void*)host_a, sizeof(__half)* size, cudaMemcpyHostToDevice);
cudaMemcpy((void*)device_b, (void*)host_b, sizeof(__half)* size, cudaMemcpyHostToDevice);
cudaMemcpy((void*)device_c, (void*)host_c, sizeof(float)* size, cudaMemcpyHostToDevice);
cudaMemcpy((void*)device_d, (void*)host_d, sizeof(float)* size, cudaMemcpyHostToDevice);
test<<<1,32>>>(device_d, device_a, device_b, device_c);
cudaDeviceSynchronize();
cudaMemcpy((void*)host_d, (void*)device_d, sizeof(float) * size, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
//FP32 fp32;
fp32.f=host_d[0];
//std::cout<< host_d[0] << std::endl;
std::cout<< hex << fp32.i << std::endl;
//show(host_d, size);
}
|
2a065d39bc0295bc272e6f0283c7def849112a48.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "cuda/dcn_v2_im2col_cuda.h"
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THH.h>
#include <THH/THHAtomics.cuh>
#include <THH/THHDeviceUtils.cuh>
extern THCState *state;
// author: Charles Shang
// https://github.com/torch/cunn/blob/master/lib/THCUNN/generic/SpatialConvolutionMM.cu
// [batch gemm]
// https://github.com/pytorch/pytorch/blob/master/aten/src/THC/generic/THCTensorMathBlas.cu
__global__ void createBatchGemmBuffer(const float **input_b, float **output_b,
float **columns_b, const float **ones_b,
const float **weight_b, const float **bias_b,
float *input, float *output,
float *columns, float *ones,
float *weight, float *bias,
const int input_stride, const int output_stride,
const int columns_stride, const int ones_stride,
const int num_batches)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_batches)
{
input_b[idx] = input + idx * input_stride;
output_b[idx] = output + idx * output_stride;
columns_b[idx] = columns + idx * columns_stride;
ones_b[idx] = ones + idx * ones_stride;
// share weights and bias within a Mini-Batch
weight_b[idx] = weight;
bias_b[idx] = bias;
}
}
at::Tensor
dcn_v2_cuda_forward(const at::Tensor &input,
const at::Tensor &weight,
const at::Tensor &bias,
const at::Tensor &offset,
const at::Tensor &mask,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
const int dilation_h,
const int dilation_w,
const int deformable_group)
{
using scalar_t = float;
// THCAssertSameGPU(THCudaTensor_checkGPU(state, 5, input, weight, bias, offset, mask));
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor");
AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor");
AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor");
AT_ASSERTM(mask.type().is_cuda(), "mask must be a CUDA tensor");
const int batch = input.size(0);
const int channels = input.size(1);
const int height = input.size(2);
const int width = input.size(3);
const int channels_out = weight.size(0);
const int channels_kernel = weight.size(1);
const int kernel_h_ = weight.size(2);
const int kernel_w_ = weight.size(3);
// printf("Kernels: %d %d %d %d\n", kernel_h_, kernel_w_, kernel_w, kernel_h);
// printf("Channels: %d %d\n", channels, channels_kernel);
// printf("Channels: %d %d\n", channels_out, channels_kernel);
AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w,
"Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_);
AT_ASSERTM(channels == channels_kernel,
"Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel);
const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
auto ones = at::ones({batch, height_out, width_out}, input.options());
auto columns = at::empty({batch, channels * kernel_h * kernel_w, 1 * height_out * width_out}, input.options());
auto output = at::empty({batch, channels_out, height_out, width_out}, input.options());
// prepare for batch-wise computing, which is significantly faster than instance-wise computing
// when batch size is large.
// launch batch threads
int matrices_size = batch * sizeof(float *);
auto input_b = static_cast<const float **>(THCudaMalloc(state, matrices_size));
auto output_b = static_cast<float **>(THCudaMalloc(state, matrices_size));
auto columns_b = static_cast<float **>(THCudaMalloc(state, matrices_size));
auto ones_b = static_cast<const float **>(THCudaMalloc(state, matrices_size));
auto weight_b = static_cast<const float **>(THCudaMalloc(state, matrices_size));
auto bias_b = static_cast<const float **>(THCudaMalloc(state, matrices_size));
const int block = 128;
const int grid = (batch + block - 1) / block;
hipLaunchKernelGGL(( createBatchGemmBuffer), dim3(grid), dim3(block), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
input_b, output_b,
columns_b, ones_b,
weight_b, bias_b,
input.data<scalar_t>(),
output.data<scalar_t>(),
columns.data<scalar_t>(),
ones.data<scalar_t>(),
weight.data<scalar_t>(),
bias.data<scalar_t>(),
channels * width * height,
channels_out * width_out * height_out,
channels * kernel_h * kernel_w * height_out * width_out,
height_out * width_out,
batch);
long m_ = channels_out;
long n_ = height_out * width_out;
long k_ = 1;
THCudaBlas_SgemmBatched(state,
't',
'n',
n_,
m_,
k_,
1.0f,
ones_b, k_,
bias_b, k_,
0.0f,
output_b, n_,
batch);
modulated_deformable_im2col_cuda(c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
input.data<scalar_t>(),
offset.data<scalar_t>(),
mask.data<scalar_t>(),
batch, channels, height, width,
height_out, width_out, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
deformable_group,
columns.data<scalar_t>());
long m = channels_out;
long n = height_out * width_out;
long k = channels * kernel_h * kernel_w;
THCudaBlas_SgemmBatched(state,
'n',
'n',
n,
m,
k,
1.0f,
(const float **)columns_b, n,
weight_b, k,
1.0f,
output_b, n,
batch);
THCudaFree(state, input_b);
THCudaFree(state, output_b);
THCudaFree(state, columns_b);
THCudaFree(state, ones_b);
THCudaFree(state, weight_b);
THCudaFree(state, bias_b);
return output;
}
__global__ void createBatchGemmBufferBackward(
float **grad_output_b,
float **columns_b,
float **ones_b,
float **weight_b,
float **grad_weight_b,
float **grad_bias_b,
float *grad_output,
float *columns,
float *ones,
float *weight,
float *grad_weight,
float *grad_bias,
const int grad_output_stride,
const int columns_stride,
const int ones_stride,
const int num_batches)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_batches)
{
grad_output_b[idx] = grad_output + idx * grad_output_stride;
columns_b[idx] = columns + idx * columns_stride;
ones_b[idx] = ones + idx * ones_stride;
// share weights and bias within a Mini-Batch
weight_b[idx] = weight;
grad_weight_b[idx] = grad_weight;
grad_bias_b[idx] = grad_bias;
}
}
std::vector<at::Tensor> dcn_v2_cuda_backward(const at::Tensor &input,
const at::Tensor &weight,
const at::Tensor &bias,
const at::Tensor &offset,
const at::Tensor &mask,
const at::Tensor &grad_output,
int kernel_h, int kernel_w,
int stride_h, int stride_w,
int pad_h, int pad_w,
int dilation_h, int dilation_w,
int deformable_group)
{
THArgCheck(input.is_contiguous(), 1, "input tensor has to be contiguous");
THArgCheck(weight.is_contiguous(), 2, "weight tensor has to be contiguous");
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor");
AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor");
AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor");
AT_ASSERTM(mask.type().is_cuda(), "mask must be a CUDA tensor");
const int batch = input.size(0);
const int channels = input.size(1);
const int height = input.size(2);
const int width = input.size(3);
const int channels_out = weight.size(0);
const int channels_kernel = weight.size(1);
const int kernel_h_ = weight.size(2);
const int kernel_w_ = weight.size(3);
AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w,
"Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_);
AT_ASSERTM(channels == channels_kernel,
"Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel);
const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
auto ones = at::ones({height_out, width_out}, input.options());
auto columns = at::empty({channels * kernel_h * kernel_w, 1 * height_out * width_out}, input.options());
auto output = at::empty({batch, channels_out, height_out, width_out}, input.options());
auto grad_input = at::zeros_like(input);
auto grad_weight = at::zeros_like(weight);
auto grad_bias = at::zeros_like(bias);
auto grad_offset = at::zeros_like(offset);
auto grad_mask = at::zeros_like(mask);
using scalar_t = float;
for (int b = 0; b < batch; b++)
{
auto input_n = input.select(0, b);
auto offset_n = offset.select(0, b);
auto mask_n = mask.select(0, b);
auto grad_output_n = grad_output.select(0, b);
auto grad_input_n = grad_input.select(0, b);
auto grad_offset_n = grad_offset.select(0, b);
auto grad_mask_n = grad_mask.select(0, b);
long m = channels * kernel_h * kernel_w;
long n = height_out * width_out;
long k = channels_out;
THCudaBlas_Sgemm(state, 'n', 't', n, m, k, 1.0f,
grad_output_n.data<scalar_t>(), n,
weight.data<scalar_t>(), m, 0.0f,
columns.data<scalar_t>(), n);
// gradient w.r.t. input coordinate data
modulated_deformable_col2im_coord_cuda(c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
columns.data<scalar_t>(),
input_n.data<scalar_t>(),
offset_n.data<scalar_t>(),
mask_n.data<scalar_t>(),
1, channels, height, width,
height_out, width_out, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, deformable_group,
grad_offset_n.data<scalar_t>(),
grad_mask_n.data<scalar_t>());
// gradient w.r.t. input data
modulated_deformable_col2im_cuda(c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
columns.data<scalar_t>(),
offset_n.data<scalar_t>(),
mask_n.data<scalar_t>(),
1, channels, height, width,
height_out, width_out, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, deformable_group,
grad_input_n.data<scalar_t>());
// gradient w.r.t. weight, dWeight should accumulate across the batch and group
modulated_deformable_im2col_cuda(c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
input_n.data<scalar_t>(),
offset_n.data<scalar_t>(),
mask_n.data<scalar_t>(),
1, channels, height, width,
height_out, width_out, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, deformable_group,
columns.data<scalar_t>());
long m_ = channels_out;
long n_ = channels * kernel_h * kernel_w;
long k_ = height_out * width_out;
THCudaBlas_Sgemm(state, 't', 'n', n_, m_, k_, 1.0f,
columns.data<scalar_t>(), k_,
grad_output_n.data<scalar_t>(), k_, 1.0f,
grad_weight.data<scalar_t>(), n_);
// gradient w.r.t. bias
// long m_ = channels_out;
// long k__ = height_out * width_out;
THCudaBlas_Sgemv(state,
't',
k_, m_, 1.0f,
grad_output_n.data<scalar_t>(), k_,
ones.data<scalar_t>(), 1, 1.0f,
grad_bias.data<scalar_t>(), 1);
}
return {
grad_input, grad_offset, grad_mask, grad_weight, grad_bias
};
}
| 2a065d39bc0295bc272e6f0283c7def849112a48.cu | #include <vector>
#include "cuda/dcn_v2_im2col_cuda.h"
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THC.h>
#include <THC/THCAtomics.cuh>
#include <THC/THCDeviceUtils.cuh>
extern THCState *state;
// author: Charles Shang
// https://github.com/torch/cunn/blob/master/lib/THCUNN/generic/SpatialConvolutionMM.cu
// [batch gemm]
// https://github.com/pytorch/pytorch/blob/master/aten/src/THC/generic/THCTensorMathBlas.cu
__global__ void createBatchGemmBuffer(const float **input_b, float **output_b,
float **columns_b, const float **ones_b,
const float **weight_b, const float **bias_b,
float *input, float *output,
float *columns, float *ones,
float *weight, float *bias,
const int input_stride, const int output_stride,
const int columns_stride, const int ones_stride,
const int num_batches)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_batches)
{
input_b[idx] = input + idx * input_stride;
output_b[idx] = output + idx * output_stride;
columns_b[idx] = columns + idx * columns_stride;
ones_b[idx] = ones + idx * ones_stride;
// share weights and bias within a Mini-Batch
weight_b[idx] = weight;
bias_b[idx] = bias;
}
}
at::Tensor
dcn_v2_cuda_forward(const at::Tensor &input,
const at::Tensor &weight,
const at::Tensor &bias,
const at::Tensor &offset,
const at::Tensor &mask,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
const int dilation_h,
const int dilation_w,
const int deformable_group)
{
using scalar_t = float;
// THCAssertSameGPU(THCudaTensor_checkGPU(state, 5, input, weight, bias, offset, mask));
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor");
AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor");
AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor");
AT_ASSERTM(mask.type().is_cuda(), "mask must be a CUDA tensor");
const int batch = input.size(0);
const int channels = input.size(1);
const int height = input.size(2);
const int width = input.size(3);
const int channels_out = weight.size(0);
const int channels_kernel = weight.size(1);
const int kernel_h_ = weight.size(2);
const int kernel_w_ = weight.size(3);
// printf("Kernels: %d %d %d %d\n", kernel_h_, kernel_w_, kernel_w, kernel_h);
// printf("Channels: %d %d\n", channels, channels_kernel);
// printf("Channels: %d %d\n", channels_out, channels_kernel);
AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w,
"Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_);
AT_ASSERTM(channels == channels_kernel,
"Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel);
const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
auto ones = at::ones({batch, height_out, width_out}, input.options());
auto columns = at::empty({batch, channels * kernel_h * kernel_w, 1 * height_out * width_out}, input.options());
auto output = at::empty({batch, channels_out, height_out, width_out}, input.options());
// prepare for batch-wise computing, which is significantly faster than instance-wise computing
// when batch size is large.
// launch batch threads
int matrices_size = batch * sizeof(float *);
auto input_b = static_cast<const float **>(THCudaMalloc(state, matrices_size));
auto output_b = static_cast<float **>(THCudaMalloc(state, matrices_size));
auto columns_b = static_cast<float **>(THCudaMalloc(state, matrices_size));
auto ones_b = static_cast<const float **>(THCudaMalloc(state, matrices_size));
auto weight_b = static_cast<const float **>(THCudaMalloc(state, matrices_size));
auto bias_b = static_cast<const float **>(THCudaMalloc(state, matrices_size));
const int block = 128;
const int grid = (batch + block - 1) / block;
createBatchGemmBuffer<<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>(
input_b, output_b,
columns_b, ones_b,
weight_b, bias_b,
input.data<scalar_t>(),
output.data<scalar_t>(),
columns.data<scalar_t>(),
ones.data<scalar_t>(),
weight.data<scalar_t>(),
bias.data<scalar_t>(),
channels * width * height,
channels_out * width_out * height_out,
channels * kernel_h * kernel_w * height_out * width_out,
height_out * width_out,
batch);
long m_ = channels_out;
long n_ = height_out * width_out;
long k_ = 1;
THCudaBlas_SgemmBatched(state,
't',
'n',
n_,
m_,
k_,
1.0f,
ones_b, k_,
bias_b, k_,
0.0f,
output_b, n_,
batch);
modulated_deformable_im2col_cuda(c10::cuda::getCurrentCUDAStream(),
input.data<scalar_t>(),
offset.data<scalar_t>(),
mask.data<scalar_t>(),
batch, channels, height, width,
height_out, width_out, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
deformable_group,
columns.data<scalar_t>());
long m = channels_out;
long n = height_out * width_out;
long k = channels * kernel_h * kernel_w;
THCudaBlas_SgemmBatched(state,
'n',
'n',
n,
m,
k,
1.0f,
(const float **)columns_b, n,
weight_b, k,
1.0f,
output_b, n,
batch);
THCudaFree(state, input_b);
THCudaFree(state, output_b);
THCudaFree(state, columns_b);
THCudaFree(state, ones_b);
THCudaFree(state, weight_b);
THCudaFree(state, bias_b);
return output;
}
__global__ void createBatchGemmBufferBackward(
float **grad_output_b,
float **columns_b,
float **ones_b,
float **weight_b,
float **grad_weight_b,
float **grad_bias_b,
float *grad_output,
float *columns,
float *ones,
float *weight,
float *grad_weight,
float *grad_bias,
const int grad_output_stride,
const int columns_stride,
const int ones_stride,
const int num_batches)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_batches)
{
grad_output_b[idx] = grad_output + idx * grad_output_stride;
columns_b[idx] = columns + idx * columns_stride;
ones_b[idx] = ones + idx * ones_stride;
// share weights and bias within a Mini-Batch
weight_b[idx] = weight;
grad_weight_b[idx] = grad_weight;
grad_bias_b[idx] = grad_bias;
}
}
std::vector<at::Tensor> dcn_v2_cuda_backward(const at::Tensor &input,
const at::Tensor &weight,
const at::Tensor &bias,
const at::Tensor &offset,
const at::Tensor &mask,
const at::Tensor &grad_output,
int kernel_h, int kernel_w,
int stride_h, int stride_w,
int pad_h, int pad_w,
int dilation_h, int dilation_w,
int deformable_group)
{
THArgCheck(input.is_contiguous(), 1, "input tensor has to be contiguous");
THArgCheck(weight.is_contiguous(), 2, "weight tensor has to be contiguous");
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor");
AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor");
AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor");
AT_ASSERTM(mask.type().is_cuda(), "mask must be a CUDA tensor");
const int batch = input.size(0);
const int channels = input.size(1);
const int height = input.size(2);
const int width = input.size(3);
const int channels_out = weight.size(0);
const int channels_kernel = weight.size(1);
const int kernel_h_ = weight.size(2);
const int kernel_w_ = weight.size(3);
AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w,
"Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_);
AT_ASSERTM(channels == channels_kernel,
"Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel);
const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
auto ones = at::ones({height_out, width_out}, input.options());
auto columns = at::empty({channels * kernel_h * kernel_w, 1 * height_out * width_out}, input.options());
auto output = at::empty({batch, channels_out, height_out, width_out}, input.options());
auto grad_input = at::zeros_like(input);
auto grad_weight = at::zeros_like(weight);
auto grad_bias = at::zeros_like(bias);
auto grad_offset = at::zeros_like(offset);
auto grad_mask = at::zeros_like(mask);
using scalar_t = float;
for (int b = 0; b < batch; b++)
{
auto input_n = input.select(0, b);
auto offset_n = offset.select(0, b);
auto mask_n = mask.select(0, b);
auto grad_output_n = grad_output.select(0, b);
auto grad_input_n = grad_input.select(0, b);
auto grad_offset_n = grad_offset.select(0, b);
auto grad_mask_n = grad_mask.select(0, b);
long m = channels * kernel_h * kernel_w;
long n = height_out * width_out;
long k = channels_out;
THCudaBlas_Sgemm(state, 'n', 't', n, m, k, 1.0f,
grad_output_n.data<scalar_t>(), n,
weight.data<scalar_t>(), m, 0.0f,
columns.data<scalar_t>(), n);
// gradient w.r.t. input coordinate data
modulated_deformable_col2im_coord_cuda(c10::cuda::getCurrentCUDAStream(),
columns.data<scalar_t>(),
input_n.data<scalar_t>(),
offset_n.data<scalar_t>(),
mask_n.data<scalar_t>(),
1, channels, height, width,
height_out, width_out, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, deformable_group,
grad_offset_n.data<scalar_t>(),
grad_mask_n.data<scalar_t>());
// gradient w.r.t. input data
modulated_deformable_col2im_cuda(c10::cuda::getCurrentCUDAStream(),
columns.data<scalar_t>(),
offset_n.data<scalar_t>(),
mask_n.data<scalar_t>(),
1, channels, height, width,
height_out, width_out, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, deformable_group,
grad_input_n.data<scalar_t>());
// gradient w.r.t. weight, dWeight should accumulate across the batch and group
modulated_deformable_im2col_cuda(c10::cuda::getCurrentCUDAStream(),
input_n.data<scalar_t>(),
offset_n.data<scalar_t>(),
mask_n.data<scalar_t>(),
1, channels, height, width,
height_out, width_out, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, deformable_group,
columns.data<scalar_t>());
long m_ = channels_out;
long n_ = channels * kernel_h * kernel_w;
long k_ = height_out * width_out;
THCudaBlas_Sgemm(state, 't', 'n', n_, m_, k_, 1.0f,
columns.data<scalar_t>(), k_,
grad_output_n.data<scalar_t>(), k_, 1.0f,
grad_weight.data<scalar_t>(), n_);
// gradient w.r.t. bias
// long m_ = channels_out;
// long k__ = height_out * width_out;
THCudaBlas_Sgemv(state,
't',
k_, m_, 1.0f,
grad_output_n.data<scalar_t>(), k_,
ones.data<scalar_t>(), 1, 1.0f,
grad_bias.data<scalar_t>(), 1);
}
return {
grad_input, grad_offset, grad_mask, grad_weight, grad_bias
};
}
|
c1b52054365822f712dfd9397443d1de5d1c9575.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void calculate_correlation(float *norm,float *corelation,float *miu_x,float *miu_y,float *stdx,float *stdy,int *ikj,float *dif_variance,int max,float sum,int size){
//printf("%d\n",max);
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int idx = iy * max + ix;
int tid=threadIdx.x;
int i;
for(i=0;i<max;i++){
if(idx>=i*max && idx<(i+1)*(max)){
miu_x[idx]=i*norm[idx];
//printf("%d,i %d %f %f \n",idx,i,miu_x[idx],norm[idx]);
}
//printf("xx %d %f\n",idx*i+idx,miu_x[idx]);
}
int blok=0;
for(i=0;i<max;i++){
if(blok==i && idx<max){
miu_y[blok*max+idx]=i*norm[idx*max+i];
//printf("%d %d,i %d %f %f %d \n",idx,idx,i,miu_y[idx],norm[idx*max+i],idx*max+i);
blok++;
}
//printf("xx %d %f\n",idx*i+idx,miu_x[idx]);
}
for(i=0;i<max;i++){
if(idx>=i*max && idx<(i+1)*(max)){
stdx[idx]=((i-miu_x[0])*(i-miu_x[0]))*norm[idx];
//printf("%d,i %d %f %f \n",idx,i,miu_x[idx],norm[idx]);
}
//printf("xx %d %f\n",idx*i+idx,miu_x[idx]);
}
int batas=0;
for(i=0;i<max;i++){
// printf("%d",batas);
if(batas==i && idx<max){
stdy[batas*max+idx]=((i-miu_y[0])*(i-miu_y[0]))*norm[idx*max+i];
//printf("%d %d,i %d %f %f %d \n",idx,idx,i,stdy[idx],norm[idx*max+i],idx*max+i);
batas++;
}
//printf("xx %d %f\n",idx*i+idx,miu_x[idx]);
}
if(idx==0){
for(i=0;i<max;i++){
for(int j=0;j<max;j++){
ikj[max*i+j]=i*j;
//printf("tid %d %d\n",max*i+j,ikj[max*i+j]);
}
}
}
if(idx<size){
corelation[idx]=((ikj[idx]*norm[idx]));
//printf("%d %d,i %d %f %f \n",idx,idx,i,corelation[idx],norm[idx]);
}
for (int stride = 1; stride < size; stride *= 2)
{
if ((tid % (2 * stride)) == 0)
{
corelation[idx] += corelation[idx+ stride];
//printf("%d %f\n",idx,corelation[idx]);
}
// synchronize within threadblock
__syncthreads();
}
for (int stride = 1; stride < size; stride *= 2)
{
if ((tid % (2 * stride)) == 0)
{
miu_x[idx] += miu_x[idx+ stride];
stdy[idx] += stdy[idx+ stride];
miu_y[idx] += miu_y[idx+ stride];
stdx[idx] += stdx[idx+ stride];
// corelation[idx] += corelation[idx+ stride];
//printf("%d %f\n",idx,miu_x[idx]);
}
// synchronize within threadblock
__syncthreads();
}
int k=0;
if(idx==0){
for(i=0;i<max;i++){
for(int j=0;j<max;j++){
k=abs(i-j);
dif_variance[k]=((k-((miu_x[0]+miu_y[0])/2))*(k-((miu_x[0]+miu_y[0])/2)))*norm[k];
if(k=i){
dif_variance[k]+=dif_variance[i];
//printf("%d %f %f %f \n",k,dif_variance[k],(k-((miu_x[0]+miu_y[0])/2)),norm[k]);
}
}
}
}
for (int stride = 1; stride < size; stride *= 2)
{
if ((tid % (2 * stride)) == 0)
{
dif_variance[idx] +=dif_variance[idx+stride];
}
// synchronize within threadblock
__syncthreads();
}
if (idx == 0){
printf("correlation %f\n",abs(corelation[0]-miu_x[0]*miu_y[0])/stdx[0]*stdy[0]);
printf("variance %f\n",stdx[0]);
printf("difference variance %f\n",dif_variance[0]);
}
} | c1b52054365822f712dfd9397443d1de5d1c9575.cu | #include "includes.h"
__global__ void calculate_correlation(float *norm,float *corelation,float *miu_x,float *miu_y,float *stdx,float *stdy,int *ikj,float *dif_variance,int max,float sum,int size){
//printf("%d\n",max);
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int idx = iy * max + ix;
int tid=threadIdx.x;
int i;
for(i=0;i<max;i++){
if(idx>=i*max && idx<(i+1)*(max)){
miu_x[idx]=i*norm[idx];
//printf("%d,i %d %f %f \n",idx,i,miu_x[idx],norm[idx]);
}
//printf("xx %d %f\n",idx*i+idx,miu_x[idx]);
}
int blok=0;
for(i=0;i<max;i++){
if(blok==i && idx<max){
miu_y[blok*max+idx]=i*norm[idx*max+i];
//printf("%d %d,i %d %f %f %d \n",idx,idx,i,miu_y[idx],norm[idx*max+i],idx*max+i);
blok++;
}
//printf("xx %d %f\n",idx*i+idx,miu_x[idx]);
}
for(i=0;i<max;i++){
if(idx>=i*max && idx<(i+1)*(max)){
stdx[idx]=((i-miu_x[0])*(i-miu_x[0]))*norm[idx];
//printf("%d,i %d %f %f \n",idx,i,miu_x[idx],norm[idx]);
}
//printf("xx %d %f\n",idx*i+idx,miu_x[idx]);
}
int batas=0;
for(i=0;i<max;i++){
// printf("%d",batas);
if(batas==i && idx<max){
stdy[batas*max+idx]=((i-miu_y[0])*(i-miu_y[0]))*norm[idx*max+i];
//printf("%d %d,i %d %f %f %d \n",idx,idx,i,stdy[idx],norm[idx*max+i],idx*max+i);
batas++;
}
//printf("xx %d %f\n",idx*i+idx,miu_x[idx]);
}
if(idx==0){
for(i=0;i<max;i++){
for(int j=0;j<max;j++){
ikj[max*i+j]=i*j;
//printf("tid %d %d\n",max*i+j,ikj[max*i+j]);
}
}
}
if(idx<size){
corelation[idx]=((ikj[idx]*norm[idx]));
//printf("%d %d,i %d %f %f \n",idx,idx,i,corelation[idx],norm[idx]);
}
for (int stride = 1; stride < size; stride *= 2)
{
if ((tid % (2 * stride)) == 0)
{
corelation[idx] += corelation[idx+ stride];
//printf("%d %f\n",idx,corelation[idx]);
}
// synchronize within threadblock
__syncthreads();
}
for (int stride = 1; stride < size; stride *= 2)
{
if ((tid % (2 * stride)) == 0)
{
miu_x[idx] += miu_x[idx+ stride];
stdy[idx] += stdy[idx+ stride];
miu_y[idx] += miu_y[idx+ stride];
stdx[idx] += stdx[idx+ stride];
// corelation[idx] += corelation[idx+ stride];
//printf("%d %f\n",idx,miu_x[idx]);
}
// synchronize within threadblock
__syncthreads();
}
int k=0;
if(idx==0){
for(i=0;i<max;i++){
for(int j=0;j<max;j++){
k=abs(i-j);
dif_variance[k]=((k-((miu_x[0]+miu_y[0])/2))*(k-((miu_x[0]+miu_y[0])/2)))*norm[k];
if(k=i){
dif_variance[k]+=dif_variance[i];
//printf("%d %f %f %f \n",k,dif_variance[k],(k-((miu_x[0]+miu_y[0])/2)),norm[k]);
}
}
}
}
for (int stride = 1; stride < size; stride *= 2)
{
if ((tid % (2 * stride)) == 0)
{
dif_variance[idx] +=dif_variance[idx+stride];
}
// synchronize within threadblock
__syncthreads();
}
if (idx == 0){
printf("correlation %f\n",abs(corelation[0]-miu_x[0]*miu_y[0])/stdx[0]*stdy[0]);
printf("variance %f\n",stdx[0]);
printf("difference variance %f\n",dif_variance[0]);
}
} |
bdf89dd140a2e09a6d305199afea3c7702cad0e6.hip | // !!! This is a file automatically generated by hipify!!!
//pass
//--blockDim=512 --gridDim=1 --no-inline
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <assert.h>
#define N 2 //512
__global__ void curand_test(hiprandState_t *state, float *A) {
A[threadIdx.x] = hiprand(&state[threadIdx.x]); // the pseudo random number returned by 'hiprand' is an unsigned int
}
| bdf89dd140a2e09a6d305199afea3c7702cad0e6.cu | //pass
//--blockDim=512 --gridDim=1 --no-inline
#include <cuda.h>
#include <stdio.h>
#include <assert.h>
#define N 2 //512
__global__ void curand_test(curandState *state, float *A) {
A[threadIdx.x] = curand(&state[threadIdx.x]); // the pseudo random number returned by 'curand' is an unsigned int
}
|
2f1d7b95f9d4e8e21e47aa6f3ba1b447b0c50e52.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void add(int n, float *x, float *y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n)
y[i] = x[i] + y[i];
} | 2f1d7b95f9d4e8e21e47aa6f3ba1b447b0c50e52.cu | #include "includes.h"
__global__ void add(int n, float *x, float *y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n)
y[i] = x[i] + y[i];
} |
ac6142e8b4cb4a69556db36dc5784745ffa81385.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@generated from zmgecsrmv.cu normal z -> c, Fri Sep 11 18:29:42 2015
*/
#include "common_magma.h"
#define BLOCK_SIZE 512
__global__ void
cmgecsrmv_kernel(
int num_rows,
int num_cols,
int num_vecs,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
magmaFloatComplex * dx,
magmaFloatComplex beta,
magmaFloatComplex * dy)
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
extern __shared__ magmaFloatComplex dot[];
if( row<num_rows ){
for( int i=0; i<num_vecs; i++ )
dot[ threadIdx.x+ i*blockDim.x ] = MAGMA_C_MAKE(0.0, 0.0);
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( j=start; j<end; j++ ){
int col = dcolind [ j ];
magmaFloatComplex val = dval[ j ];
for( int i=0; i<num_vecs; i++ )
dot[ threadIdx.x + i*blockDim.x ] +=
val * dx[ col + i*num_cols ];
}
for( int i=0; i<num_vecs; i++ )
dy[ row +i*num_cols ] = alpha * dot[ threadIdx.x + i*blockDim.x ]
+ beta * dy[ row + i*num_cols ];
}
}
/**
Purpose
-------
This routine computes Y = alpha * A * X + beta * Y for X and Y sets of
num_vec vectors on the GPU. Input format is CSR.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
num_vecs mama_int_t
number of vectors
@param[in]
alpha magmaFloatComplex
scalar multiplier
@param[in]
dval magmaFloatComplex_ptr
array containing values of A in CSR
@param[in]
drowptr magmaIndex_ptr
rowpointer of A in CSR
@param[in]
dcolind magmaIndex_ptr
columnindices of A in CSR
@param[in]
dx magmaFloatComplex_ptr
input vector x
@param[in]
beta magmaFloatComplex
scalar multiplier
@param[out]
dy magmaFloatComplex_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cblas
********************************************************************/
extern "C" magma_int_t
magma_cmgecsrmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t num_vecs,
magmaFloatComplex alpha,
magmaFloatComplex_ptr dval,
magmaIndex_ptr drowptr,
magmaIndex_ptr dcolind,
magmaFloatComplex_ptr dx,
magmaFloatComplex beta,
magmaFloatComplex_ptr dy,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ), 1, 1);
magma_int_t threads = BLOCK_SIZE;
unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE
* sizeof( magmaFloatComplex ); // num_vecs vectors
hipLaunchKernelGGL(( cmgecsrmv_kernel), dim3(grid), dim3(threads), MEM_SIZE , 0,
m, n, num_vecs, alpha, dval, drowptr, dcolind, dx, beta, dy);
return MAGMA_SUCCESS;
}
| ac6142e8b4cb4a69556db36dc5784745ffa81385.cu | /*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@generated from zmgecsrmv.cu normal z -> c, Fri Sep 11 18:29:42 2015
*/
#include "common_magma.h"
#define BLOCK_SIZE 512
__global__ void
cmgecsrmv_kernel(
int num_rows,
int num_cols,
int num_vecs,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
magmaFloatComplex * dx,
magmaFloatComplex beta,
magmaFloatComplex * dy)
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
extern __shared__ magmaFloatComplex dot[];
if( row<num_rows ){
for( int i=0; i<num_vecs; i++ )
dot[ threadIdx.x+ i*blockDim.x ] = MAGMA_C_MAKE(0.0, 0.0);
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( j=start; j<end; j++ ){
int col = dcolind [ j ];
magmaFloatComplex val = dval[ j ];
for( int i=0; i<num_vecs; i++ )
dot[ threadIdx.x + i*blockDim.x ] +=
val * dx[ col + i*num_cols ];
}
for( int i=0; i<num_vecs; i++ )
dy[ row +i*num_cols ] = alpha * dot[ threadIdx.x + i*blockDim.x ]
+ beta * dy[ row + i*num_cols ];
}
}
/**
Purpose
-------
This routine computes Y = alpha * A * X + beta * Y for X and Y sets of
num_vec vectors on the GPU. Input format is CSR.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
num_vecs mama_int_t
number of vectors
@param[in]
alpha magmaFloatComplex
scalar multiplier
@param[in]
dval magmaFloatComplex_ptr
array containing values of A in CSR
@param[in]
drowptr magmaIndex_ptr
rowpointer of A in CSR
@param[in]
dcolind magmaIndex_ptr
columnindices of A in CSR
@param[in]
dx magmaFloatComplex_ptr
input vector x
@param[in]
beta magmaFloatComplex
scalar multiplier
@param[out]
dy magmaFloatComplex_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cblas
********************************************************************/
extern "C" magma_int_t
magma_cmgecsrmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t num_vecs,
magmaFloatComplex alpha,
magmaFloatComplex_ptr dval,
magmaIndex_ptr drowptr,
magmaIndex_ptr dcolind,
magmaFloatComplex_ptr dx,
magmaFloatComplex beta,
magmaFloatComplex_ptr dy,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ), 1, 1);
magma_int_t threads = BLOCK_SIZE;
unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE
* sizeof( magmaFloatComplex ); // num_vecs vectors
cmgecsrmv_kernel<<< grid, threads, MEM_SIZE >>>
(m, n, num_vecs, alpha, dval, drowptr, dcolind, dx, beta, dy);
return MAGMA_SUCCESS;
}
|
9bd0157b261b68b65b5af3c1c8122ec5f7b8d7cf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "WrapWorldCUDA.cuh"
extern "C"
{
__global__ void WrapWorldKernel( float4 * pdPosition,
float3 const worldSize,
uint const numAgents
);
}
using namespace OpenSteer;
WrapWorldCUDA::WrapWorldCUDA( AgentGroup * pAgentGroup, float3 const& worldSize )
: AbstractCUDAKernel( pAgentGroup, 0.f, 0 ),
m_worldSize( worldSize )
{
// Nothing to do.
}
void WrapWorldCUDA::init( void )
{
// Nothing to do.
}
void WrapWorldCUDA::run( void )
{
dim3 grid = gridDim();
dim3 block = blockDim();
float4 * pdPosition = m_pAgentGroupData->pdPosition();
uint const numAgents = m_pAgentGroupData->size();
hipLaunchKernelGGL(( WrapWorldKernel), dim3(grid), dim3(block) , 0, 0, pdPosition,
m_worldSize,
numAgents
);
cutilCheckMsg( "WrapWorldKernel failed." );
//CUDA_SAFE_CALL( hipDeviceSynchronize() );
}
void WrapWorldCUDA::close( void )
{
// Agent data has possibly changed.
m_pAgentGroup->SetSyncHost();
}
| 9bd0157b261b68b65b5af3c1c8122ec5f7b8d7cf.cu | #include "WrapWorldCUDA.cuh"
extern "C"
{
__global__ void WrapWorldKernel( float4 * pdPosition,
float3 const worldSize,
uint const numAgents
);
}
using namespace OpenSteer;
WrapWorldCUDA::WrapWorldCUDA( AgentGroup * pAgentGroup, float3 const& worldSize )
: AbstractCUDAKernel( pAgentGroup, 0.f, 0 ),
m_worldSize( worldSize )
{
// Nothing to do.
}
void WrapWorldCUDA::init( void )
{
// Nothing to do.
}
void WrapWorldCUDA::run( void )
{
dim3 grid = gridDim();
dim3 block = blockDim();
float4 * pdPosition = m_pAgentGroupData->pdPosition();
uint const numAgents = m_pAgentGroupData->size();
WrapWorldKernel<<< grid, block >>>( pdPosition,
m_worldSize,
numAgents
);
cutilCheckMsg( "WrapWorldKernel failed." );
//CUDA_SAFE_CALL( cudaThreadSynchronize() );
}
void WrapWorldCUDA::close( void )
{
// Agent data has possibly changed.
m_pAgentGroup->SetSyncHost();
}
|
fc33b5d8e2dad855c2d1818574d20b649131ec00.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
//#include <cutil.h>
#define TILE_WIDTH 64
#define WIDTH_PER_THREAD 2
#define N 4096
void err_handling(hipError_t *err, const char *str)
{
if (*err != hipSuccess) {
printf("%s\n", str);
exit(EXIT_FAILURE);
}
}
__global__ void matMul(const float *A, const float *B, float *C, int m, int k, int n)
{
__shared__ float sh_A[TILE_WIDTH][TILE_WIDTH];
__shared__ float sh_B00[TILE_WIDTH/WIDTH_PER_THREAD][TILE_WIDTH/WIDTH_PER_THREAD];
__shared__ float sh_B01[TILE_WIDTH/WIDTH_PER_THREAD][TILE_WIDTH/WIDTH_PER_THREAD];
__shared__ float sh_B10[TILE_WIDTH/WIDTH_PER_THREAD][TILE_WIDTH/WIDTH_PER_THREAD];
__shared__ float sh_B11[TILE_WIDTH/WIDTH_PER_THREAD][TILE_WIDTH/WIDTH_PER_THREAD];
int x = threadIdx.x; int y = threadIdx.y;
int tx = threadIdx.x*WIDTH_PER_THREAD; int ty = threadIdx.y*WIDTH_PER_THREAD;
int r = blockIdx.y*TILE_WIDTH + y;
//int c = blockIdx.x*TILE_WIDTH + x;
int row = blockIdx.y*TILE_WIDTH + ty;
int col = blockIdx.x*TILE_WIDTH + tx;
float c00 = 0.0;
float c01 = 0.0;
float c10 = 0.0;
float c11 = 0.0;
float a00 = 0.0;
float a01 = 0.0;
float a10 = 0.0;
float a11 = 0.0;
float b00 = 0.0;
float b01 = 0.0;
float b10 = 0.0;
float b11 = 0.0;
for (int t = 0; t < k/TILE_WIDTH; ++t) {
sh_A[y][x] = A[r*k + t*TILE_WIDTH + x];
sh_A[y][x+32] = A[r*k + t*TILE_WIDTH + x + 32];
sh_A[y+32][x] = A[(r+32)*k + t*TILE_WIDTH + x];
sh_A[y+32][x+32] = A[(r+32)*k + t*TILE_WIDTH + x + 32];
sh_B00[ty/WIDTH_PER_THREAD][tx/WIDTH_PER_THREAD] = B[(t*TILE_WIDTH + ty)*k + col];
sh_B01[ty/WIDTH_PER_THREAD][tx/WIDTH_PER_THREAD] = B[(t*TILE_WIDTH + ty)*k + col+1];
sh_B10[ty/WIDTH_PER_THREAD][tx/WIDTH_PER_THREAD] = B[(t*TILE_WIDTH + ty+1)*k + col];
sh_B11[ty/WIDTH_PER_THREAD][tx/WIDTH_PER_THREAD] = B[(t*TILE_WIDTH + ty+1)*k + col+1];
__syncthreads();
for (int i = 0; i < TILE_WIDTH; i += WIDTH_PER_THREAD) {
a00 = sh_A[ty][i];
a01 = sh_A[ty][i+1];
a10 = sh_A[ty+1][i];
a11 = sh_A[ty+1][i+1];
b00 = sh_B00[i/WIDTH_PER_THREAD][x];
b01 = sh_B01[i/WIDTH_PER_THREAD][x];
b10 = sh_B10[i/WIDTH_PER_THREAD][x];
b11 = sh_B11[i/WIDTH_PER_THREAD][x];
c00 += a00*b00 + a01*b10;
c01 += a00*b01 + a01*b11;
c10 += a10*b00 + a11*b10;
c11 += a10*b01 + a11*b11;
}
__syncthreads();
}
C[row*n + col] = c00;
C[row*n + col+1] = c01;
C[(row+1)*n + col] = c10;
C[(row+1)*n + col+1] = c11;
}
int main(void)
{
hipError_t err = hipSuccess;
int m = N;
int n = N;
int k = N;
float *A = (float*)malloc(m*k*sizeof(float));
float *B = (float*)malloc(k*n*sizeof(float));
float *C = (float*)malloc(m*n*sizeof(float));
if (A == NULL || B == NULL || C == NULL) {
printf("allocate host error!\n");
return 1;
}
for (int i = 0; i < m*k; ++i) {
A[i] = rand()/(float)RAND_MAX;
}
for (int i = 0; i < k*n; ++i) {
B[i] = rand()/(float)RAND_MAX;
}
for (int i = 0; i < m*n; ++i) {
C[i] = rand()/(float)RAND_MAX;
}
float *dev_A = NULL;
float *dev_B = NULL;
float *dev_C = NULL;
err = hipMalloc((void**)&dev_A, m*k*sizeof(float));
err_handling(&err, "allocate devecie error A!");
err = hipMalloc((void**)&dev_B, k*n*sizeof(float));
err_handling(&err, "allocate devecie error B!");
err = hipMalloc((void**)&dev_C, m*n*sizeof(float));
err_handling(&err, "allocate devecie error C!");
err = hipMemcpy(dev_A, A, m*k*sizeof(float), hipMemcpyHostToDevice);
err_handling(&err, "memcpy to A error!");
err = hipMemcpy(dev_B, B, k*n*sizeof(float), hipMemcpyHostToDevice);
err_handling(&err, "memcpy to B error!");
dim3 dimGrid((m-1)/TILE_WIDTH+1, (n-1)/TILE_WIDTH+1, 1);
dim3 dimBlock(TILE_WIDTH/WIDTH_PER_THREAD, TILE_WIDTH/WIDTH_PER_THREAD, 1);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
hipLaunchKernelGGL(( matMul), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_A, dev_B, dev_C, m, k, n);
hipEventRecord(stop, 0);
hipEventSynchronize(start);
hipEventSynchronize(stop);
float time_elapsed = 0;
hipEventElapsedTime(&time_elapsed, start, stop);
printf("%fms\n", time_elapsed);
err = hipMemcpy(C, dev_C, m*n*sizeof(float), hipMemcpyDeviceToHost);
err_handling(&err, "memcpy to host C error!");
printf("%f %f\n", C[100*N+100], C[234*N+234]);
err = hipFree(dev_A);
err_handling(&err, "mem free A error!");
err = hipFree(dev_B);
err_handling(&err, "mem free B error!");
err = hipFree(dev_C);
err_handling(&err, "mem free C error!");
err = hipDeviceReset();
err_handling(&err, "device reset error!");
return 0;
}
| fc33b5d8e2dad855c2d1818574d20b649131ec00.cu | #include <stdio.h>
#include <cuda_runtime.h>
//#include <cutil.h>
#define TILE_WIDTH 64
#define WIDTH_PER_THREAD 2
#define N 4096
void err_handling(cudaError_t *err, const char *str)
{
if (*err != cudaSuccess) {
printf("%s\n", str);
exit(EXIT_FAILURE);
}
}
__global__ void matMul(const float *A, const float *B, float *C, int m, int k, int n)
{
__shared__ float sh_A[TILE_WIDTH][TILE_WIDTH];
__shared__ float sh_B00[TILE_WIDTH/WIDTH_PER_THREAD][TILE_WIDTH/WIDTH_PER_THREAD];
__shared__ float sh_B01[TILE_WIDTH/WIDTH_PER_THREAD][TILE_WIDTH/WIDTH_PER_THREAD];
__shared__ float sh_B10[TILE_WIDTH/WIDTH_PER_THREAD][TILE_WIDTH/WIDTH_PER_THREAD];
__shared__ float sh_B11[TILE_WIDTH/WIDTH_PER_THREAD][TILE_WIDTH/WIDTH_PER_THREAD];
int x = threadIdx.x; int y = threadIdx.y;
int tx = threadIdx.x*WIDTH_PER_THREAD; int ty = threadIdx.y*WIDTH_PER_THREAD;
int r = blockIdx.y*TILE_WIDTH + y;
//int c = blockIdx.x*TILE_WIDTH + x;
int row = blockIdx.y*TILE_WIDTH + ty;
int col = blockIdx.x*TILE_WIDTH + tx;
float c00 = 0.0;
float c01 = 0.0;
float c10 = 0.0;
float c11 = 0.0;
float a00 = 0.0;
float a01 = 0.0;
float a10 = 0.0;
float a11 = 0.0;
float b00 = 0.0;
float b01 = 0.0;
float b10 = 0.0;
float b11 = 0.0;
for (int t = 0; t < k/TILE_WIDTH; ++t) {
sh_A[y][x] = A[r*k + t*TILE_WIDTH + x];
sh_A[y][x+32] = A[r*k + t*TILE_WIDTH + x + 32];
sh_A[y+32][x] = A[(r+32)*k + t*TILE_WIDTH + x];
sh_A[y+32][x+32] = A[(r+32)*k + t*TILE_WIDTH + x + 32];
sh_B00[ty/WIDTH_PER_THREAD][tx/WIDTH_PER_THREAD] = B[(t*TILE_WIDTH + ty)*k + col];
sh_B01[ty/WIDTH_PER_THREAD][tx/WIDTH_PER_THREAD] = B[(t*TILE_WIDTH + ty)*k + col+1];
sh_B10[ty/WIDTH_PER_THREAD][tx/WIDTH_PER_THREAD] = B[(t*TILE_WIDTH + ty+1)*k + col];
sh_B11[ty/WIDTH_PER_THREAD][tx/WIDTH_PER_THREAD] = B[(t*TILE_WIDTH + ty+1)*k + col+1];
__syncthreads();
for (int i = 0; i < TILE_WIDTH; i += WIDTH_PER_THREAD) {
a00 = sh_A[ty][i];
a01 = sh_A[ty][i+1];
a10 = sh_A[ty+1][i];
a11 = sh_A[ty+1][i+1];
b00 = sh_B00[i/WIDTH_PER_THREAD][x];
b01 = sh_B01[i/WIDTH_PER_THREAD][x];
b10 = sh_B10[i/WIDTH_PER_THREAD][x];
b11 = sh_B11[i/WIDTH_PER_THREAD][x];
c00 += a00*b00 + a01*b10;
c01 += a00*b01 + a01*b11;
c10 += a10*b00 + a11*b10;
c11 += a10*b01 + a11*b11;
}
__syncthreads();
}
C[row*n + col] = c00;
C[row*n + col+1] = c01;
C[(row+1)*n + col] = c10;
C[(row+1)*n + col+1] = c11;
}
int main(void)
{
cudaError_t err = cudaSuccess;
int m = N;
int n = N;
int k = N;
float *A = (float*)malloc(m*k*sizeof(float));
float *B = (float*)malloc(k*n*sizeof(float));
float *C = (float*)malloc(m*n*sizeof(float));
if (A == NULL || B == NULL || C == NULL) {
printf("allocate host error!\n");
return 1;
}
for (int i = 0; i < m*k; ++i) {
A[i] = rand()/(float)RAND_MAX;
}
for (int i = 0; i < k*n; ++i) {
B[i] = rand()/(float)RAND_MAX;
}
for (int i = 0; i < m*n; ++i) {
C[i] = rand()/(float)RAND_MAX;
}
float *dev_A = NULL;
float *dev_B = NULL;
float *dev_C = NULL;
err = cudaMalloc((void**)&dev_A, m*k*sizeof(float));
err_handling(&err, "allocate devecie error A!");
err = cudaMalloc((void**)&dev_B, k*n*sizeof(float));
err_handling(&err, "allocate devecie error B!");
err = cudaMalloc((void**)&dev_C, m*n*sizeof(float));
err_handling(&err, "allocate devecie error C!");
err = cudaMemcpy(dev_A, A, m*k*sizeof(float), cudaMemcpyHostToDevice);
err_handling(&err, "memcpy to A error!");
err = cudaMemcpy(dev_B, B, k*n*sizeof(float), cudaMemcpyHostToDevice);
err_handling(&err, "memcpy to B error!");
dim3 dimGrid((m-1)/TILE_WIDTH+1, (n-1)/TILE_WIDTH+1, 1);
dim3 dimBlock(TILE_WIDTH/WIDTH_PER_THREAD, TILE_WIDTH/WIDTH_PER_THREAD, 1);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
matMul<<<dimGrid, dimBlock>>>(dev_A, dev_B, dev_C, m, k, n);
cudaEventRecord(stop, 0);
cudaEventSynchronize(start);
cudaEventSynchronize(stop);
float time_elapsed = 0;
cudaEventElapsedTime(&time_elapsed, start, stop);
printf("%fms\n", time_elapsed);
err = cudaMemcpy(C, dev_C, m*n*sizeof(float), cudaMemcpyDeviceToHost);
err_handling(&err, "memcpy to host C error!");
printf("%f %f\n", C[100*N+100], C[234*N+234]);
err = cudaFree(dev_A);
err_handling(&err, "mem free A error!");
err = cudaFree(dev_B);
err_handling(&err, "mem free B error!");
err = cudaFree(dev_C);
err_handling(&err, "mem free C error!");
err = cudaDeviceReset();
err_handling(&err, "device reset error!");
return 0;
}
|
17882a68ec953ed71472582085ad4cbb83f361be.hip | // !!! This is a file automatically generated by hipify!!!
#include <chrono>
#include <sstream>
#include <roctracer/roctx.h>
#include "argparse/argparse.hpp"
#include "stencil/stencil.hpp"
void bench(size_t *rBytes, double *rPackTime, double *rUnpackTime,
const Dim3 sz, const Dim3 dir, const int nIters) {
std::stringstream ss;
float ms;
const Dim3 origin(0,0,0);
LocalDomain ld(sz, origin, 0);
ld.add_data<float>();
ld.set_radius(3);
ld.realize();
std::vector<Message> msgs;
msgs.push_back(Message(dir, 0, 0));
RcStream stream(0);
DevicePacker packer(stream);
DeviceUnpacker unpacker(stream);
packer.prepare(&ld, msgs);
unpacker.prepare(&ld, msgs);
if (rBytes)
*rBytes = packer.size();
hipEvent_t startEvent, stopEvent;
CUDA_RUNTIME(hipEventCreate(&startEvent));
CUDA_RUNTIME(hipEventCreate(&stopEvent));
ss << dir << " pack";
roctxRangePush(ss.str().c_str());
CUDA_RUNTIME(hipEventRecord(startEvent, stream));
for (int n = 0; n < nIters; ++n) {
packer.pack();
}
CUDA_RUNTIME(hipEventRecord(stopEvent, stream));
CUDA_RUNTIME(hipStreamSynchronize(stream));
roctxRangePop();
CUDA_RUNTIME(hipEventElapsedTime(&ms, startEvent, stopEvent));
if (rPackTime)
*rPackTime = double(ms) / 1000 / nIters;
ss << dir << " unpack";
roctxRangePush(ss.str().c_str());
CUDA_RUNTIME(hipEventRecord(startEvent, stream));
for (int n = 0; n < nIters; ++n) {
unpacker.unpack();
}
CUDA_RUNTIME(hipEventRecord(stopEvent, stream));
CUDA_RUNTIME(hipStreamSynchronize(stream));
roctxRangePop();
CUDA_RUNTIME(hipEventElapsedTime(&ms, startEvent, stopEvent));
if (rUnpackTime)
*rUnpackTime = double(ms) / 1000 / nIters;
CUDA_RUNTIME(hipEventDestroy(startEvent));
CUDA_RUNTIME(hipEventDestroy(stopEvent));
}
int main(int argc, char **argv) {
(void)argc;
(void)argv;
int nIters = 30;
argparse::Parser p;
p.add_option(nIters, "--iters");
if (!p.parse(argc, argv)) {
std::cout << p.help();
exit(EXIT_FAILURE);
}
Dim3 ext, dir;
double packTime, unpackTime;
size_t bytes;
ext = Dim3(512, 512, 512);
dir = Dim3(1, 0, 0);
bench(&bytes, &packTime, &unpackTime, ext, dir, nIters);
std::cout << ext << " " << dir << " " << bytes << " " << packTime << " "
<< unpackTime << "\n";
ext = Dim3(512, 512, 512);
dir = Dim3(0, 1, 0);
bench(&bytes, &packTime, &unpackTime, ext, dir, nIters);
std::cout << ext << " " << dir << " " << bytes << " " << packTime << " "
<< unpackTime << "\n";
ext = Dim3(512, 512, 512);
dir = Dim3(0, 0, 1);
bench(&bytes, &packTime, &unpackTime, ext, dir, nIters);
std::cout << ext << " " << dir << " " << bytes << " " << packTime << " "
<< unpackTime << "\n";
return 0;
}
| 17882a68ec953ed71472582085ad4cbb83f361be.cu | #include <chrono>
#include <sstream>
#include <nvToolsExt.h>
#include "argparse/argparse.hpp"
#include "stencil/stencil.hpp"
void bench(size_t *rBytes, double *rPackTime, double *rUnpackTime,
const Dim3 sz, const Dim3 dir, const int nIters) {
std::stringstream ss;
float ms;
const Dim3 origin(0,0,0);
LocalDomain ld(sz, origin, 0);
ld.add_data<float>();
ld.set_radius(3);
ld.realize();
std::vector<Message> msgs;
msgs.push_back(Message(dir, 0, 0));
RcStream stream(0);
DevicePacker packer(stream);
DeviceUnpacker unpacker(stream);
packer.prepare(&ld, msgs);
unpacker.prepare(&ld, msgs);
if (rBytes)
*rBytes = packer.size();
cudaEvent_t startEvent, stopEvent;
CUDA_RUNTIME(cudaEventCreate(&startEvent));
CUDA_RUNTIME(cudaEventCreate(&stopEvent));
ss << dir << " pack";
nvtxRangePush(ss.str().c_str());
CUDA_RUNTIME(cudaEventRecord(startEvent, stream));
for (int n = 0; n < nIters; ++n) {
packer.pack();
}
CUDA_RUNTIME(cudaEventRecord(stopEvent, stream));
CUDA_RUNTIME(cudaStreamSynchronize(stream));
nvtxRangePop();
CUDA_RUNTIME(cudaEventElapsedTime(&ms, startEvent, stopEvent));
if (rPackTime)
*rPackTime = double(ms) / 1000 / nIters;
ss << dir << " unpack";
nvtxRangePush(ss.str().c_str());
CUDA_RUNTIME(cudaEventRecord(startEvent, stream));
for (int n = 0; n < nIters; ++n) {
unpacker.unpack();
}
CUDA_RUNTIME(cudaEventRecord(stopEvent, stream));
CUDA_RUNTIME(cudaStreamSynchronize(stream));
nvtxRangePop();
CUDA_RUNTIME(cudaEventElapsedTime(&ms, startEvent, stopEvent));
if (rUnpackTime)
*rUnpackTime = double(ms) / 1000 / nIters;
CUDA_RUNTIME(cudaEventDestroy(startEvent));
CUDA_RUNTIME(cudaEventDestroy(stopEvent));
}
int main(int argc, char **argv) {
(void)argc;
(void)argv;
int nIters = 30;
argparse::Parser p;
p.add_option(nIters, "--iters");
if (!p.parse(argc, argv)) {
std::cout << p.help();
exit(EXIT_FAILURE);
}
Dim3 ext, dir;
double packTime, unpackTime;
size_t bytes;
ext = Dim3(512, 512, 512);
dir = Dim3(1, 0, 0);
bench(&bytes, &packTime, &unpackTime, ext, dir, nIters);
std::cout << ext << " " << dir << " " << bytes << " " << packTime << " "
<< unpackTime << "\n";
ext = Dim3(512, 512, 512);
dir = Dim3(0, 1, 0);
bench(&bytes, &packTime, &unpackTime, ext, dir, nIters);
std::cout << ext << " " << dir << " " << bytes << " " << packTime << " "
<< unpackTime << "\n";
ext = Dim3(512, 512, 512);
dir = Dim3(0, 0, 1);
bench(&bytes, &packTime, &unpackTime, ext, dir, nIters);
std::cout << ext << " " << dir << " " << bytes << " " << packTime << " "
<< unpackTime << "\n";
return 0;
}
|
32157feb0266bc017b9e6e03bfc3f09613e5713e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__device__ int f_gpu(int k, int n)
{
int i, M;
M = 0;
for (i=0; i<k; i++)
M = M + (n-i-1);
return M;
}
__device__ int compute_delta_gpu(int* a, int* b, int* p, int i, int j, int n)
{
int d; int k;
d = ( a[i*n+i] - a[j*n+j] ) * ( b[ p[j]*n + p[j] ] - b[ p[i]*n + p[i] ] ) +
( a[i*n+j] - a[j*n+i] ) * ( b[ p[j]*n + p[i] ] - b[ p[i]*n + p[j] ] );
for (k=0; k<n; k++)
if (k != i && k != j)
d = d +( a[k*n+i] - a[k*n+j] ) * ( b[ p[k]*n + p[j] ] - b[ p[k]*n + p[i] ] ) +
( a[i*n+k] - a[j*n+k] ) * ( b[ p[j]*n + p[k] ] - b[ p[i]*n + p[k] ] );
return d;
}
__global__ void main_gpu(int *voisin_device, int *a_device, int *b_device, int *solution_device, int n)
{
int id = blockIdx.x * blockDim.x + threadIdx.x; // I = id
int k;
int i,j;
if (id < n*(n-1)/2)
{
// dfinit i et j partir de I
k = 0;
while ( id >= f_gpu(k,n) )
k++;
k--;
i = k;
j = id - f_gpu(k,n) + k + 1;
// calcul le dcalage d'un voisin et le place dans le tableau voisin_device
voisin_device[id] = compute_delta_gpu(a_device, b_device, solution_device, i, j, n);
}
}
| 32157feb0266bc017b9e6e03bfc3f09613e5713e.cu | __device__ int f_gpu(int k, int n)
{
int i, M;
M = 0;
for (i=0; i<k; i++)
M = M + (n-i-1);
return M;
}
__device__ int compute_delta_gpu(int* a, int* b, int* p, int i, int j, int n)
{
int d; int k;
d = ( a[i*n+i] - a[j*n+j] ) * ( b[ p[j]*n + p[j] ] - b[ p[i]*n + p[i] ] ) +
( a[i*n+j] - a[j*n+i] ) * ( b[ p[j]*n + p[i] ] - b[ p[i]*n + p[j] ] );
for (k=0; k<n; k++)
if (k != i && k != j)
d = d +( a[k*n+i] - a[k*n+j] ) * ( b[ p[k]*n + p[j] ] - b[ p[k]*n + p[i] ] ) +
( a[i*n+k] - a[j*n+k] ) * ( b[ p[j]*n + p[k] ] - b[ p[i]*n + p[k] ] );
return d;
}
__global__ void main_gpu(int *voisin_device, int *a_device, int *b_device, int *solution_device, int n)
{
int id = blockIdx.x * blockDim.x + threadIdx.x; // I = id
int k;
int i,j;
if (id < n*(n-1)/2)
{
// définit i et j à partir de I
k = 0;
while ( id >= f_gpu(k,n) )
k++;
k--;
i = k;
j = id - f_gpu(k,n) + k + 1;
// calcul le décalage d'un voisin et le place dans le tableau voisin_device
voisin_device[id] = compute_delta_gpu(a_device, b_device, solution_device, i, j, n);
}
}
|
7fcebb694a984462d3a261f7ef13f2b5051a8fe8.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <cstdlib>
#include <hip/hip_runtime_api.h>
#include <device_launch_parameters.h>
#include <hip/driver_types.h>
#include "hip/hip_runtime.h"
#include "vector_add.h"
#include "util.h"
/// <summary>
/// Vector Add Kernel that executes on device
/// </summary>
__global__ void vector_add_kernel(const float *a, const float *b, float *c, unsigned int N)
{
// Index calculation
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// Safe-check for any extra launched threads
if(idx < N)
{
c[idx] = a[idx] + b[idx];
}
}
/// <summary>
/// Launches the test case
/// </summary>
int vector_add::runner()
{
util::clear_screen();
printf("\n\n\n[VECTOR_ADD]: STARTING vector_add example.\n");
// -------------------------------------
// Definitions
printf("Initizialing definitions\n");
const unsigned int no_of_elements = 128000000; // amount of total elements in vectors
const size_t size = no_of_elements * sizeof(float); // required size
const int threads_per_block = 512; // threads per block
const int blocks = (int)ceil((float)no_of_elements / threads_per_block); // calculate required blocks
float ms = 0, total_ms = 0;
hipEvent_t kernel_start, kernel_end, memcpy_to_start, memcpy_to_end, memcpy_from_start, memcpy_from_end; // Timing variables
hipEventCreate(&kernel_start);
hipEventCreate(&kernel_end);
hipEventCreate(&memcpy_to_start);
hipEventCreate(&memcpy_to_end);
hipEventCreate(&memcpy_from_start);
hipEventCreate(&memcpy_from_end);
printf("[Settings] Elements#: %d | reqired size: %d bytes | threads per block: %d | calculated blocks: %d\n\n", no_of_elements, size, threads_per_block, blocks);
// -------------------------------------
// -------------------------------------
// Declare and allocate memory on HOST
printf("Allocating memory on HOST\n");
float* h_a = static_cast<float*>(malloc(size));
float* h_b = static_cast<float*>(malloc(size));
float* h_c = static_cast<float*>(malloc(size));
if (h_a == nullptr || h_b == nullptr || h_c == nullptr) return -1;
printf("DONE\n\n");
// -------------------------------------
// -------------------------------------
// Declare and allocate memory on DEVICE
printf("Allocating memory on DEVICE\n");
float* d_a, * d_b, * d_c;
hipMalloc(reinterpret_cast<void**>(&d_a), size);
CUDA_CHECK_ERROR(-2);
hipMalloc(reinterpret_cast<void**>(&d_b), size);
CUDA_CHECK_ERROR(-2);
hipMalloc(reinterpret_cast<void**>(&d_c), size);
CUDA_CHECK_ERROR(-2);
printf("DONE\n\n");
// -------------------------------------
// -------------------------------------
// Init HOST input vector data
printf("Initializing HOST input vectors (all set to 1.0f)\n");
for (int i = 0; i < no_of_elements; i++)
{
h_a[i] = 1.0f;
h_b[i] = 1.0f;
}
printf("DONE\n\n");
// -------------------------------------
// -------------------------------------
// Copy HOST Input vectors to device
printf("COPYING input data from HOST to DEVICE\n");
hipEventRecord(memcpy_to_start);
CUDA_CHECK_ERROR(-3);
hipMemcpy(d_a, h_a, size, hipMemcpyHostToDevice);
CUDA_CHECK_ERROR(-3);
hipMemcpy(d_b, h_b, size, hipMemcpyHostToDevice);
CUDA_CHECK_ERROR(-3);
hipEventRecord(memcpy_to_end);
CUDA_CHECK_ERROR(-3);
printf("DONE\n\n");
// -------------------------------------
// -------------------------------------
// Kernel Launch
printf("LAUNCHING Kernel\n");
hipEventRecord(kernel_start);
CUDA_CHECK_ERROR(-4);
vector_add_kernel << <blocks, threads_per_block >> > (d_a, d_b, d_c, no_of_elements);
CUDA_CHECK_ERROR(-4);
printf("WAITING for kernel to finish execution\n");
hipDeviceSynchronize(); // BARRIER - Wait for kernel to finish execution
CUDA_CHECK_ERROR(-4);
hipEventRecord(kernel_end);
CUDA_CHECK_ERROR(-4);
printf("KERNEL finished executing\n");
// -------------------------------------
// -------------------------------------
// Copy results back to HOST
printf("COPYING result data from DEVICE to HOST\n");
hipEventRecord(memcpy_from_start);
CUDA_CHECK_ERROR(-5);
hipMemcpy(h_c, d_c, size, hipMemcpyDeviceToHost);
CUDA_CHECK_ERROR(-5);
hipEventRecord(memcpy_from_end);
CUDA_CHECK_ERROR(-5);
printf("DONE\n\n");
// -------------------------------------
// -------------------------------------
// Print result
printf("Device results sample:\n");
for (int i = 0; i < 10; i++)
{
printf("%0.2f ", h_c[i]);
}
printf("...\n\n");
// -------------------------------------
// -------------------------------------
// Time events
hipEventElapsedTime(&ms, memcpy_to_start, memcpy_to_end);
CUDA_CHECK_ERROR(-6);
total_ms += ms;
printf("Memcpy from HOST to DEVICE time: %f sec\n", ms / 1000.0);
hipEventElapsedTime(&ms, kernel_start, kernel_end);
CUDA_CHECK_ERROR(-6);
total_ms += ms;
printf("KERNEL execution time: %f sec\n", ms / 1000.0);
hipEventElapsedTime(&ms, memcpy_from_start, memcpy_from_end);
CUDA_CHECK_ERROR(-6);
total_ms += ms;
printf("Memcpy from DEVICE to HOST time: %f sec\n\n", ms / 1000.0);
printf("TOTAL Execution time: %f sec\n", total_ms / 1000.0);
// -------------------------------------
// -------------------------------------
// Free memory
hipFree(d_a);
CUDA_CHECK_ERROR(-7);
hipFree(d_b);
CUDA_CHECK_ERROR(-7);
hipFree(d_c);
CUDA_CHECK_ERROR(-7);
free(h_a);
free(h_b);
free(h_c);
// -------------------------------------
return 0;
}
| 7fcebb694a984462d3a261f7ef13f2b5051a8fe8.cu | #include <cstdio>
#include <cstdlib>
#include <cuda_runtime_api.h>
#include <device_launch_parameters.h>
#include <driver_types.h>
#include "cuda_runtime.h"
#include "vector_add.h"
#include "util.h"
/// <summary>
/// Vector Add Kernel that executes on device
/// </summary>
__global__ void vector_add_kernel(const float *a, const float *b, float *c, unsigned int N)
{
// Index calculation
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// Safe-check for any extra launched threads
if(idx < N)
{
c[idx] = a[idx] + b[idx];
}
}
/// <summary>
/// Launches the test case
/// </summary>
int vector_add::runner()
{
util::clear_screen();
printf("\n\n\n[VECTOR_ADD]: STARTING vector_add example.\n");
// -------------------------------------
// Definitions
printf("Initizialing definitions\n");
const unsigned int no_of_elements = 128000000; // amount of total elements in vectors
const size_t size = no_of_elements * sizeof(float); // required size
const int threads_per_block = 512; // threads per block
const int blocks = (int)ceil((float)no_of_elements / threads_per_block); // calculate required blocks
float ms = 0, total_ms = 0;
cudaEvent_t kernel_start, kernel_end, memcpy_to_start, memcpy_to_end, memcpy_from_start, memcpy_from_end; // Timing variables
cudaEventCreate(&kernel_start);
cudaEventCreate(&kernel_end);
cudaEventCreate(&memcpy_to_start);
cudaEventCreate(&memcpy_to_end);
cudaEventCreate(&memcpy_from_start);
cudaEventCreate(&memcpy_from_end);
printf("[Settings] Elements#: %d | reqired size: %d bytes | threads per block: %d | calculated blocks: %d\n\n", no_of_elements, size, threads_per_block, blocks);
// -------------------------------------
// -------------------------------------
// Declare and allocate memory on HOST
printf("Allocating memory on HOST\n");
float* h_a = static_cast<float*>(malloc(size));
float* h_b = static_cast<float*>(malloc(size));
float* h_c = static_cast<float*>(malloc(size));
if (h_a == nullptr || h_b == nullptr || h_c == nullptr) return -1;
printf("DONE\n\n");
// -------------------------------------
// -------------------------------------
// Declare and allocate memory on DEVICE
printf("Allocating memory on DEVICE\n");
float* d_a, * d_b, * d_c;
cudaMalloc(reinterpret_cast<void**>(&d_a), size);
CUDA_CHECK_ERROR(-2);
cudaMalloc(reinterpret_cast<void**>(&d_b), size);
CUDA_CHECK_ERROR(-2);
cudaMalloc(reinterpret_cast<void**>(&d_c), size);
CUDA_CHECK_ERROR(-2);
printf("DONE\n\n");
// -------------------------------------
// -------------------------------------
// Init HOST input vector data
printf("Initializing HOST input vectors (all set to 1.0f)\n");
for (int i = 0; i < no_of_elements; i++)
{
h_a[i] = 1.0f;
h_b[i] = 1.0f;
}
printf("DONE\n\n");
// -------------------------------------
// -------------------------------------
// Copy HOST Input vectors to device
printf("COPYING input data from HOST to DEVICE\n");
cudaEventRecord(memcpy_to_start);
CUDA_CHECK_ERROR(-3);
cudaMemcpy(d_a, h_a, size, cudaMemcpyHostToDevice);
CUDA_CHECK_ERROR(-3);
cudaMemcpy(d_b, h_b, size, cudaMemcpyHostToDevice);
CUDA_CHECK_ERROR(-3);
cudaEventRecord(memcpy_to_end);
CUDA_CHECK_ERROR(-3);
printf("DONE\n\n");
// -------------------------------------
// -------------------------------------
// Kernel Launch
printf("LAUNCHING Kernel\n");
cudaEventRecord(kernel_start);
CUDA_CHECK_ERROR(-4);
vector_add_kernel << <blocks, threads_per_block >> > (d_a, d_b, d_c, no_of_elements);
CUDA_CHECK_ERROR(-4);
printf("WAITING for kernel to finish execution\n");
cudaDeviceSynchronize(); // BARRIER - Wait for kernel to finish execution
CUDA_CHECK_ERROR(-4);
cudaEventRecord(kernel_end);
CUDA_CHECK_ERROR(-4);
printf("KERNEL finished executing\n");
// -------------------------------------
// -------------------------------------
// Copy results back to HOST
printf("COPYING result data from DEVICE to HOST\n");
cudaEventRecord(memcpy_from_start);
CUDA_CHECK_ERROR(-5);
cudaMemcpy(h_c, d_c, size, cudaMemcpyDeviceToHost);
CUDA_CHECK_ERROR(-5);
cudaEventRecord(memcpy_from_end);
CUDA_CHECK_ERROR(-5);
printf("DONE\n\n");
// -------------------------------------
// -------------------------------------
// Print result
printf("Device results sample:\n");
for (int i = 0; i < 10; i++)
{
printf("%0.2f ", h_c[i]);
}
printf("...\n\n");
// -------------------------------------
// -------------------------------------
// Time events
cudaEventElapsedTime(&ms, memcpy_to_start, memcpy_to_end);
CUDA_CHECK_ERROR(-6);
total_ms += ms;
printf("Memcpy from HOST to DEVICE time: %f sec\n", ms / 1000.0);
cudaEventElapsedTime(&ms, kernel_start, kernel_end);
CUDA_CHECK_ERROR(-6);
total_ms += ms;
printf("KERNEL execution time: %f sec\n", ms / 1000.0);
cudaEventElapsedTime(&ms, memcpy_from_start, memcpy_from_end);
CUDA_CHECK_ERROR(-6);
total_ms += ms;
printf("Memcpy from DEVICE to HOST time: %f sec\n\n", ms / 1000.0);
printf("TOTAL Execution time: %f sec\n", total_ms / 1000.0);
// -------------------------------------
// -------------------------------------
// Free memory
cudaFree(d_a);
CUDA_CHECK_ERROR(-7);
cudaFree(d_b);
CUDA_CHECK_ERROR(-7);
cudaFree(d_c);
CUDA_CHECK_ERROR(-7);
free(h_a);
free(h_b);
free(h_c);
// -------------------------------------
return 0;
}
|
0ec99e887061b16f504ab42efe0f776ad6673759.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef _GQD_API_CU_
#define _GQD_API_CU_
#include "gqd_api.h"
#include <stdio.h>
#include <stdlib.h>
#include <cutil_inline.h>
#include "cuda_header.cu"
#include "gqd.cu"
#include "gdd.cu"
#include "map.cu"
/**
* c[i] = a[i] + b[i]
*/
template<class T>
void gpu_add(const T* h_a, const T* h_b, T* h_c, const int len) {
//copy memory to GPU
T* d_a = NULL;
GPUMALLOC((void**)&d_a, sizeof(T)*len);
TOGPU(d_a, h_a, sizeof(T)*len);
T* d_b = NULL;
GPUMALLOC((void**)&d_b, sizeof(T)*len);
TOGPU(d_b, h_b, sizeof(T)*len);
T* d_c = NULL;
GPUMALLOC((void**)&d_c, sizeof(T)*len);
//kernel
const int numBlock = 128;
const int numThread = 128;
unsigned int timer = 0;
startTimer(timer);
hipLaunchKernelGGL(( map_add_kernel<T>), dim3(numBlock), dim3(numThread), 0, 0, d_a, d_b, d_c, len);
cutilCheckMsg("map_add_kernel");
CUDA_SAFE_CALL(hipDeviceSynchronize());
endTimer(timer, "GPU add kernel");
//copy results from GPU
FROMGPU(h_c, d_c, sizeof(T)*len);
//clean up
GPUFREE(d_a);
GPUFREE(d_b);
GPUFREE(d_c);
}
void gpu_dd_add(const GPU_dd* h_a, const GPU_dd* h_b,
GPU_dd* h_c, const int len) {
gpu_add<GPU_dd>(h_a, h_b, h_c, len);
}
void gpu_qd_add(const GPU_qd* h_a, const GPU_qd* h_b,
GPU_qd* h_c, const int len) {
gpu_add<GPU_qd>(h_a, h_b, h_c, len);
}
template<class T>
void gpu_sub(const T* h_a, const T* h_b, T* h_c, const int len) {
//copy memory to GPU
T* d_a = NULL;
GPUMALLOC((void**)&d_a, sizeof(T)*len);
TOGPU(d_a, h_a, sizeof(T)*len);
T* d_b = NULL;
GPUMALLOC((void**)&d_b, sizeof(T)*len);
TOGPU(d_b, h_b, sizeof(T)*len);
T* d_c = NULL;
GPUMALLOC((void**)&d_c, sizeof(T)*len);
//kernel
const int numBlock = 128;
const int numThread = 128;
unsigned int timer = 0;
startTimer(timer);
hipLaunchKernelGGL(( map_sub_kernel<T>), dim3(numBlock), dim3(numThread), 0, 0, d_a, d_b, d_c, len);
cutilCheckMsg("map_sub_kernel");
CUDA_SAFE_CALL(hipDeviceSynchronize());
endTimer(timer, "GPU sub kernel");
//copy results from GPU
FROMGPU(h_c, d_c, sizeof(T)*len);
//clean up
GPUFREE(d_a);
GPUFREE(d_b);
GPUFREE(d_c);
}
void gpu_dd_sub(const GPU_dd* h_a, const GPU_dd* h_b,
GPU_dd* h_c, const int len) {
gpu_sub<GPU_dd>(h_a, h_b, h_c, len);
}
/**
* c[i] = a[i] * b[i]
*/
template<class T>
void gpu_mul(const T* h_a, const T* h_b, T* h_c, const int len) {
//copy memory to GPU
T* d_a = NULL;
GPUMALLOC((void**)&d_a, sizeof(T)*len);
TOGPU(d_a, h_a, sizeof(T)*len);
T* d_b = NULL;
GPUMALLOC((void**)&d_b, sizeof(T)*len);
TOGPU(d_b, h_b, sizeof(T)*len);
T* d_c = NULL;
GPUMALLOC((void**)&d_c, sizeof(T)*len);
//kernel
const int numBlock = 128;
const int numThread = 128;
unsigned int timer = 0;
startTimer(timer);
hipLaunchKernelGGL(( map_mul_kernel<T>), dim3(numBlock), dim3(numThread), 0, 0, d_a, d_b, d_c, len);
cutilCheckMsg("map_mul_kernel");
CUDA_SAFE_CALL(hipDeviceSynchronize());
endTimer(timer, "GPU mul kernel");
//copy results from GPU
FROMGPU(h_c, d_c, sizeof(T)*len);
//clean up
GPUFREE(d_a);
GPUFREE(d_b);
GPUFREE(d_c);
}
void gpu_dd_mul(const GPU_dd* h_a, const GPU_dd* h_b,
GPU_dd* h_c, const int len) {
gpu_mul<GPU_dd>(h_a, h_b, h_c, len);
}
void gpu_qd_mul(const GPU_qd* h_a, const GPU_qd* h_b,
GPU_qd* h_c, const int len) {
gpu_mul<GPU_qd>(h_a, h_b, h_c, len);
}
/**
* c[i] = a[i] / b[i]
*/
template<class T>
void gpu_div(const T* h_a, const T* h_b, T* h_c, const int len) {
//copy memory to GPU
T* d_a = NULL;
GPUMALLOC((void**)&d_a, sizeof(T)*len);
TOGPU(d_a, h_a, sizeof(T)*len);
T* d_b = NULL;
GPUMALLOC((void**)&d_b, sizeof(T)*len);
TOGPU(d_b, h_b, sizeof(T)*len);
T* d_c = NULL;
GPUMALLOC((void**)&d_c, sizeof(T)*len);
//kernel
const int numBlock = 128;
const int numThread = 128;
unsigned int timer = 0;
startTimer(timer);
hipLaunchKernelGGL(( map_div_kernel<T>), dim3(numBlock), dim3(numThread), 0, 0, d_a, d_b, d_c, len);
cutilCheckMsg("map_div_kernel");
CUDA_SAFE_CALL(hipDeviceSynchronize());
endTimer(timer, "GPU div kernel");
//copy results from GPU
FROMGPU(h_c, d_c, sizeof(T)*len);
//clean up
GPUFREE(d_a);
GPUFREE(d_b);
GPUFREE(d_c);
}
void gpu_dd_div(const GPU_dd* h_a, const GPU_dd* h_b,
GPU_dd* h_c, const int len) {
gpu_div<GPU_dd>(h_a, h_b, h_c, len);
}
void gpu_qd_div(const GPU_qd* h_a, const GPU_qd* h_b,
GPU_qd* h_c, const int len) {
gpu_div<GPU_qd>(h_a, h_b, h_c, len);
}
/**
* c[i] = sqrt(a[i])
*/
template<class T>
void gpu_sqrt(const T* h_a, T* h_c, const int len) {
//copy memory to GPU
T* d_a = NULL;
GPUMALLOC((void**)&d_a, sizeof(T)*len);
TOGPU(d_a, h_a, sizeof(T)*len);
T* d_c = NULL;
GPUMALLOC((void**)&d_c, sizeof(T)*len);
//kernel
const int numBlock = 128;
const int numThread = 128;
unsigned int timer = 0;
startTimer(timer);
hipLaunchKernelGGL(( map_sqrt_kernel<T>), dim3(numBlock), dim3(numThread), 0, 0, d_a, d_c, len);
cutilCheckMsg("map_sqrt_kernel");
CUDA_SAFE_CALL(hipDeviceSynchronize());
endTimer(timer, "GPU sqrt kernel");
//copy results from GPU
FROMGPU(h_c, d_c, sizeof(T)*len);
//clean up
GPUFREE(d_a);
GPUFREE(d_c);
}
void gpu_dd_sqrt(const GPU_dd* h_a, GPU_dd* h_c, const int len) {
gpu_sqrt<GPU_dd>(h_a, h_c, len);
}
void gpu_qd_sqrt(const GPU_qd* h_a, GPU_qd* h_c, const int len) {
gpu_sqrt<GPU_qd>(h_a, h_c, len);
}
/**
* c[i] = exp(a[i])
*/
template<class T>
void gpu_exp(const T* h_a, T* h_c, const int len) {
//copy memory to GPU
T* d_a = NULL;
GPUMALLOC((void**)&d_a, sizeof(T)*len);
TOGPU(d_a, h_a, sizeof(T)*len);
T* d_c = NULL;
GPUMALLOC((void**)&d_c, sizeof(T)*len);
//kernel
const int numBlock = 128;
const int numThread = 128;
unsigned int timer = 0;
startTimer(timer);
hipLaunchKernelGGL(( map_exp_kernel<T>), dim3(numBlock), dim3(numThread), 0, 0, d_a, d_c, len);
cutilCheckMsg("map_exp_kernel");
CUDA_SAFE_CALL(hipDeviceSynchronize());
endTimer(timer, "GPU exp kernel");
//copy results from GPU
FROMGPU(h_c, d_c, sizeof(T)*len);
//clean up
GPUFREE(d_a);
GPUFREE(d_c);
}
void gpu_dd_exp(const GPU_dd* h_a, GPU_dd* h_c, const int len) {
gpu_exp<GPU_dd>(h_a, h_c, len);
}
void gpu_qd_exp(const GPU_qd* h_a, GPU_qd* h_c, const int len) {
gpu_exp<GPU_qd>(h_a, h_c, len);
}
/**
* c[i] = sin(a[i])
*/
void gpu_qd_sin(const GPU_qd* h_a, GPU_qd* h_c, const int len) {
#ifdef __DEVICE_EMULATION__
printf("NOTE: EMULATION mode\n");
#endif
//copy memory to GPU
GPU_qd* d_a = NULL;
GPUMALLOC((void**)&d_a, sizeof(GPU_qd)*len);
TOGPU(d_a, h_a, sizeof(GPU_qd)*len);
GPU_qd* d_c = NULL;
GPUMALLOC((void**)&d_c, sizeof(GPU_qd)*len);
//kernel
const int numBlock = 128;
const int numThread = 128;
unsigned int timer = 0;
startTimer(timer);
if((d_sin_table == NULL) || (d_cos_table == NULL)) {
printf("!!!sin or cos table is NULL.");
exit(0);
}
hipLaunchKernelGGL(( map_sin_kernel<GPU_qd>), dim3(numBlock), dim3(numThread), 0, 0, d_a, d_c, len,
d_sin_table, d_cos_table);
cutilCheckMsg("map_sin_kernel");
CUDA_SAFE_CALL(hipDeviceSynchronize());
endTimer(timer, "GPU sin kernel");
//copy results from GPU
FROMGPU(h_c, d_c, sizeof(GPU_qd)*len);
//clean up
GPUFREE(d_a);
GPUFREE(d_c);
}
void gpu_dd_sin(const GPU_dd* h_a, GPU_dd* h_c, const int len) {
//copy memory to GPU
GPU_dd* d_a = NULL;
GPUMALLOC((void**)&d_a, sizeof(GPU_dd)*len);
TOGPU(d_a, h_a, sizeof(GPU_dd)*len);
GPU_dd* d_c = NULL;
GPUMALLOC((void**)&d_c, sizeof(GPU_dd)*len);
//kernel
const int numBlock = 128;
const int numThread = 128;
unsigned int timer = 0;
startTimer(timer);
hipLaunchKernelGGL(( map_sin_kernel<GPU_dd>), dim3(numBlock), dim3(numThread), 0, 0, d_a, d_c, len,
d_dd_sin_table, d_dd_cos_table);
cutilCheckMsg("map_sin_kernel");
CUDA_SAFE_CALL(hipDeviceSynchronize());
endTimer(timer, "GPU sin kernel");
//copy results from GPU
FROMGPU(h_c, d_c, sizeof(GPU_dd)*len);
//clean up
GPUFREE(d_a);
GPUFREE(d_c);
}
#endif // _GQD_API_CU_
| 0ec99e887061b16f504ab42efe0f776ad6673759.cu | #ifndef _GQD_API_CU_
#define _GQD_API_CU_
#include "gqd_api.h"
#include <stdio.h>
#include <stdlib.h>
#include <cutil_inline.h>
#include "cuda_header.cu"
#include "gqd.cu"
#include "gdd.cu"
#include "map.cu"
/**
* c[i] = a[i] + b[i]
*/
template<class T>
void gpu_add(const T* h_a, const T* h_b, T* h_c, const int len) {
//copy memory to GPU
T* d_a = NULL;
GPUMALLOC((void**)&d_a, sizeof(T)*len);
TOGPU(d_a, h_a, sizeof(T)*len);
T* d_b = NULL;
GPUMALLOC((void**)&d_b, sizeof(T)*len);
TOGPU(d_b, h_b, sizeof(T)*len);
T* d_c = NULL;
GPUMALLOC((void**)&d_c, sizeof(T)*len);
//kernel
const int numBlock = 128;
const int numThread = 128;
unsigned int timer = 0;
startTimer(timer);
map_add_kernel<T><<<numBlock, numThread>>>(d_a, d_b, d_c, len);
cutilCheckMsg("map_add_kernel");
CUDA_SAFE_CALL(cudaThreadSynchronize());
endTimer(timer, "GPU add kernel");
//copy results from GPU
FROMGPU(h_c, d_c, sizeof(T)*len);
//clean up
GPUFREE(d_a);
GPUFREE(d_b);
GPUFREE(d_c);
}
void gpu_dd_add(const GPU_dd* h_a, const GPU_dd* h_b,
GPU_dd* h_c, const int len) {
gpu_add<GPU_dd>(h_a, h_b, h_c, len);
}
void gpu_qd_add(const GPU_qd* h_a, const GPU_qd* h_b,
GPU_qd* h_c, const int len) {
gpu_add<GPU_qd>(h_a, h_b, h_c, len);
}
template<class T>
void gpu_sub(const T* h_a, const T* h_b, T* h_c, const int len) {
//copy memory to GPU
T* d_a = NULL;
GPUMALLOC((void**)&d_a, sizeof(T)*len);
TOGPU(d_a, h_a, sizeof(T)*len);
T* d_b = NULL;
GPUMALLOC((void**)&d_b, sizeof(T)*len);
TOGPU(d_b, h_b, sizeof(T)*len);
T* d_c = NULL;
GPUMALLOC((void**)&d_c, sizeof(T)*len);
//kernel
const int numBlock = 128;
const int numThread = 128;
unsigned int timer = 0;
startTimer(timer);
map_sub_kernel<T><<<numBlock, numThread>>>(d_a, d_b, d_c, len);
cutilCheckMsg("map_sub_kernel");
CUDA_SAFE_CALL(cudaThreadSynchronize());
endTimer(timer, "GPU sub kernel");
//copy results from GPU
FROMGPU(h_c, d_c, sizeof(T)*len);
//clean up
GPUFREE(d_a);
GPUFREE(d_b);
GPUFREE(d_c);
}
void gpu_dd_sub(const GPU_dd* h_a, const GPU_dd* h_b,
GPU_dd* h_c, const int len) {
gpu_sub<GPU_dd>(h_a, h_b, h_c, len);
}
/**
* c[i] = a[i] * b[i]
*/
template<class T>
void gpu_mul(const T* h_a, const T* h_b, T* h_c, const int len) {
//copy memory to GPU
T* d_a = NULL;
GPUMALLOC((void**)&d_a, sizeof(T)*len);
TOGPU(d_a, h_a, sizeof(T)*len);
T* d_b = NULL;
GPUMALLOC((void**)&d_b, sizeof(T)*len);
TOGPU(d_b, h_b, sizeof(T)*len);
T* d_c = NULL;
GPUMALLOC((void**)&d_c, sizeof(T)*len);
//kernel
const int numBlock = 128;
const int numThread = 128;
unsigned int timer = 0;
startTimer(timer);
map_mul_kernel<T><<<numBlock, numThread>>>(d_a, d_b, d_c, len);
cutilCheckMsg("map_mul_kernel");
CUDA_SAFE_CALL(cudaThreadSynchronize());
endTimer(timer, "GPU mul kernel");
//copy results from GPU
FROMGPU(h_c, d_c, sizeof(T)*len);
//clean up
GPUFREE(d_a);
GPUFREE(d_b);
GPUFREE(d_c);
}
void gpu_dd_mul(const GPU_dd* h_a, const GPU_dd* h_b,
GPU_dd* h_c, const int len) {
gpu_mul<GPU_dd>(h_a, h_b, h_c, len);
}
void gpu_qd_mul(const GPU_qd* h_a, const GPU_qd* h_b,
GPU_qd* h_c, const int len) {
gpu_mul<GPU_qd>(h_a, h_b, h_c, len);
}
/**
* c[i] = a[i] / b[i]
*/
template<class T>
void gpu_div(const T* h_a, const T* h_b, T* h_c, const int len) {
//copy memory to GPU
T* d_a = NULL;
GPUMALLOC((void**)&d_a, sizeof(T)*len);
TOGPU(d_a, h_a, sizeof(T)*len);
T* d_b = NULL;
GPUMALLOC((void**)&d_b, sizeof(T)*len);
TOGPU(d_b, h_b, sizeof(T)*len);
T* d_c = NULL;
GPUMALLOC((void**)&d_c, sizeof(T)*len);
//kernel
const int numBlock = 128;
const int numThread = 128;
unsigned int timer = 0;
startTimer(timer);
map_div_kernel<T><<<numBlock, numThread>>>(d_a, d_b, d_c, len);
cutilCheckMsg("map_div_kernel");
CUDA_SAFE_CALL(cudaThreadSynchronize());
endTimer(timer, "GPU div kernel");
//copy results from GPU
FROMGPU(h_c, d_c, sizeof(T)*len);
//clean up
GPUFREE(d_a);
GPUFREE(d_b);
GPUFREE(d_c);
}
void gpu_dd_div(const GPU_dd* h_a, const GPU_dd* h_b,
GPU_dd* h_c, const int len) {
gpu_div<GPU_dd>(h_a, h_b, h_c, len);
}
void gpu_qd_div(const GPU_qd* h_a, const GPU_qd* h_b,
GPU_qd* h_c, const int len) {
gpu_div<GPU_qd>(h_a, h_b, h_c, len);
}
/**
* c[i] = sqrt(a[i])
*/
template<class T>
void gpu_sqrt(const T* h_a, T* h_c, const int len) {
//copy memory to GPU
T* d_a = NULL;
GPUMALLOC((void**)&d_a, sizeof(T)*len);
TOGPU(d_a, h_a, sizeof(T)*len);
T* d_c = NULL;
GPUMALLOC((void**)&d_c, sizeof(T)*len);
//kernel
const int numBlock = 128;
const int numThread = 128;
unsigned int timer = 0;
startTimer(timer);
map_sqrt_kernel<T><<<numBlock, numThread>>>(d_a, d_c, len);
cutilCheckMsg("map_sqrt_kernel");
CUDA_SAFE_CALL(cudaThreadSynchronize());
endTimer(timer, "GPU sqrt kernel");
//copy results from GPU
FROMGPU(h_c, d_c, sizeof(T)*len);
//clean up
GPUFREE(d_a);
GPUFREE(d_c);
}
void gpu_dd_sqrt(const GPU_dd* h_a, GPU_dd* h_c, const int len) {
gpu_sqrt<GPU_dd>(h_a, h_c, len);
}
void gpu_qd_sqrt(const GPU_qd* h_a, GPU_qd* h_c, const int len) {
gpu_sqrt<GPU_qd>(h_a, h_c, len);
}
/**
* c[i] = exp(a[i])
*/
template<class T>
void gpu_exp(const T* h_a, T* h_c, const int len) {
//copy memory to GPU
T* d_a = NULL;
GPUMALLOC((void**)&d_a, sizeof(T)*len);
TOGPU(d_a, h_a, sizeof(T)*len);
T* d_c = NULL;
GPUMALLOC((void**)&d_c, sizeof(T)*len);
//kernel
const int numBlock = 128;
const int numThread = 128;
unsigned int timer = 0;
startTimer(timer);
map_exp_kernel<T><<<numBlock, numThread>>>(d_a, d_c, len);
cutilCheckMsg("map_exp_kernel");
CUDA_SAFE_CALL(cudaThreadSynchronize());
endTimer(timer, "GPU exp kernel");
//copy results from GPU
FROMGPU(h_c, d_c, sizeof(T)*len);
//clean up
GPUFREE(d_a);
GPUFREE(d_c);
}
void gpu_dd_exp(const GPU_dd* h_a, GPU_dd* h_c, const int len) {
gpu_exp<GPU_dd>(h_a, h_c, len);
}
void gpu_qd_exp(const GPU_qd* h_a, GPU_qd* h_c, const int len) {
gpu_exp<GPU_qd>(h_a, h_c, len);
}
/**
* c[i] = sin(a[i])
*/
void gpu_qd_sin(const GPU_qd* h_a, GPU_qd* h_c, const int len) {
#ifdef __DEVICE_EMULATION__
printf("NOTE: EMULATION mode\n");
#endif
//copy memory to GPU
GPU_qd* d_a = NULL;
GPUMALLOC((void**)&d_a, sizeof(GPU_qd)*len);
TOGPU(d_a, h_a, sizeof(GPU_qd)*len);
GPU_qd* d_c = NULL;
GPUMALLOC((void**)&d_c, sizeof(GPU_qd)*len);
//kernel
const int numBlock = 128;
const int numThread = 128;
unsigned int timer = 0;
startTimer(timer);
if((d_sin_table == NULL) || (d_cos_table == NULL)) {
printf("!!!sin or cos table is NULL.");
exit(0);
}
map_sin_kernel<GPU_qd><<<numBlock, numThread>>>(d_a, d_c, len,
d_sin_table, d_cos_table);
cutilCheckMsg("map_sin_kernel");
CUDA_SAFE_CALL(cudaThreadSynchronize());
endTimer(timer, "GPU sin kernel");
//copy results from GPU
FROMGPU(h_c, d_c, sizeof(GPU_qd)*len);
//clean up
GPUFREE(d_a);
GPUFREE(d_c);
}
void gpu_dd_sin(const GPU_dd* h_a, GPU_dd* h_c, const int len) {
//copy memory to GPU
GPU_dd* d_a = NULL;
GPUMALLOC((void**)&d_a, sizeof(GPU_dd)*len);
TOGPU(d_a, h_a, sizeof(GPU_dd)*len);
GPU_dd* d_c = NULL;
GPUMALLOC((void**)&d_c, sizeof(GPU_dd)*len);
//kernel
const int numBlock = 128;
const int numThread = 128;
unsigned int timer = 0;
startTimer(timer);
map_sin_kernel<GPU_dd><<<numBlock, numThread>>>(d_a, d_c, len,
d_dd_sin_table, d_dd_cos_table);
cutilCheckMsg("map_sin_kernel");
CUDA_SAFE_CALL(cudaThreadSynchronize());
endTimer(timer, "GPU sin kernel");
//copy results from GPU
FROMGPU(h_c, d_c, sizeof(GPU_dd)*len);
//clean up
GPUFREE(d_a);
GPUFREE(d_c);
}
#endif // _GQD_API_CU_
|
6a1bc57ef11e58c892b7f7243b19a25a0a9823e4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
@generated s Tue Dec 17 13:18:45 2013
*/
#include "common_magma.h"
#define PRECISION_s
#define hemv_bs 64
#define thread_x 64
#define thread_y 4
#define bank_shift 33
#define quarter_thread_x 16
#define half_thread_x 32
/*******************************************************************************
* Lower case, where n is multiple of block size (hemv_bs)
*/
__global__ void
ssymv_kernel_fermi_L_special_mgpu_offset(
int n, float alpha,
float *A, int lda,
float *x, int incx,
float beta,
float *y, int incy,
float *WC,
int my_gpu_id,
int num_gpus,
int nb,
int the_chosen_block_id,
int the_chosen_gpu_id,
int kstan)
{
#if (__CUDA_ARCH__ >= 200)
int tx = threadIdx.x;
int ty = threadIdx.y;
int blkc = blockIdx.x;
if ( blkc < my_gpu_id ) {
return;
}
float res = MAGMA_S_ZERO;
float res_ = MAGMA_S_ZERO;
float res1 = MAGMA_S_ZERO;
float res2 = MAGMA_S_ZERO;
__shared__ float la [quarter_thread_x][thread_x+2];
__shared__ float buff [thread_x];
__shared__ float buff2 [thread_x];
float tr[4];
float b[4];
int break_d = thread_x * blkc;
const int td = (thread_x * ty) + tx;
int tx_ = td % half_thread_x;
int ty_ = td / half_thread_x;
WC += break_d + tx;
x += (break_d + tx) * incx;
A += break_d;
A += ty_ * lda + tx_;
if ( ty == 0 ) {
buff[tx] = x[0];
if ( blkc == the_chosen_block_id && my_gpu_id == the_chosen_gpu_id && tx < kstan ) {
buff[tx] = MAGMA_S_ZERO;
}
} // obtain the vector x store in buff;
// __syncthreads();
tx = tx_; ty = ty_;
int flag = 0;
if ( (blkc % num_gpus) == my_gpu_id ) {
A += lda * (blkc/num_gpus) * thread_x; // change
#pragma unroll
for(int j=0; j < half_thread_x; j += 8)
la[0][ bank_shift * (ty_+j) + tx_] = A[ j * lda];
__syncthreads();
#pragma unroll
for(int i=ty_*4; i < (ty_ * 4 + 4); i++) {
if ( i < tx_ ) {
la[0][bank_shift * tx_ + i] = ( la[0][ i * bank_shift + tx_] );
}
else
la[0][bank_shift * tx_ + i] = la[0][ bank_shift * tx_ + i];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res += ( la[0][bank_shift * tx_ + j + ty_ * 4] ) * buff[j + ty_ * 4];
__syncthreads();
la[0][bank_shift*tx_+ty_] = res;
__syncthreads();
if ( ty_ == 0 )
res1 = la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7];
else {
res1 = MAGMA_S_ZERO;
}
__syncthreads();
res = MAGMA_S_ZERO;
A += half_thread_x + half_thread_x*lda;
#pragma unroll
for(int j=0; j < half_thread_x; j += 8)
la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda];
__syncthreads();
#pragma unroll
for(int i=ty_*4; i < (4+ty_*4); i++) {
if ( i < tx_ ) {
la[0][bank_shift*tx_+i] = ( la[0][bank_shift*i+tx_] );
}
else
la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res += ( la[0][bank_shift*tx_+j+ty_*4] ) * buff[half_thread_x + j + 4 * ty_];
__syncthreads();
la[0][bank_shift*tx_+ty_] = res;
__syncthreads();
if ( ty_ == 1 )
res2 = la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7];
else {
res2 = MAGMA_S_ZERO;
}
__syncthreads();
res = MAGMA_S_ZERO;
A -= half_thread_x*lda;
res_ = MAGMA_S_ZERO;
#pragma unroll
for(int j=0; j < half_thread_x; j += 8)
tr[j/8] = A[ j * lda];
#pragma unroll
for(int j=0; j < 4; j++) {
res += tr[j] * buff[ j*8 + ty_];
la[0][bank_shift*(ty_+j*8)+tx_] = tr[j];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res_ += (la[0][bank_shift*tx_+j+ty_*4]) * buff[half_thread_x +j+ty_*4];
__syncthreads();
la[0][bank_shift*tx_+ty_] = res;
__syncthreads();
if ( ty_ == 1 )
res2 = res2
+ la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7];
else {
res2 = MAGMA_S_ZERO;
}
__syncthreads();
la[0][bank_shift*tx_+ty_] = res_;
__syncthreads();
if ( ty_ == 0 ) {
res1 = res1
+ la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7];
}
else {
res1 = MAGMA_S_ZERO;
}
__syncthreads();
A -= half_thread_x;
flag = 1;
A -= lda * (blkc/num_gpus) * thread_x;
}
tx = threadIdx.x;
ty = threadIdx.y;
if ( ty_ == 0 && ty == 0 )
res = res1;
else if ( ty_ == 1 && ty == 0 )
res = res2;
else {
res = MAGMA_S_ZERO;
}
A -= ty_ * lda;
A -= tx_;
x = x - blkc * thread_x * incx;
//x = x - tx*incx;
A += 4 * ty * lda;
A += tx;
int count = 0;
tx_ = td % quarter_thread_x;
ty_ = td / quarter_thread_x;
WC -= tx;
WC += tx_;
int num_blocks_iters = (blkc +1) /num_gpus - flag;
if ( my_gpu_id < ( (blkc+1) % num_gpus) ) {
num_blocks_iters += 1;
}
x += (my_gpu_id) * nb;
int wc_c = my_gpu_id;
if ( blkc > my_gpu_id ) {
for(int s=0; s < num_blocks_iters; s++) {
res_ = MAGMA_S_ZERO;
count++;
if ( ty == 0 ) {
buff2[tx] = x[0];
if ( my_gpu_id == the_chosen_gpu_id && tx < kstan && count == 1 ) {
buff2[tx] = MAGMA_S_ZERO;
}
}
__syncthreads();
#pragma unroll
for( int k=0; k < 4; k++ ) {
#pragma unroll
for(int j=0; j < 4; j++)
tr[j] = A[j*lda];
#pragma unroll
for(int j=0; j < 4; j++) {
res += tr[j] * buff2[ quarter_thread_x * k + ty*4 + j];
la[j + ty*4][tx] = (tr[j]) * buff[tx];
//la[j + ty*4][tx] = (tr[j]);
}
__syncthreads();
res_ = MAGMA_S_ZERO;
#pragma unroll
for(int j=0; j < 4; j++) {
//res_ += la[tx_][ty_*4+j] * b[j];
res_ += la[tx_][ty_*4+j];
}
//b[4 + k] = res_;
b[ k] = res_;
__syncthreads();
A += lda * quarter_thread_x;
}
#pragma unroll
for(int k=0; k < 4; k++) {
//la[tx_][ty_+quarter_thread_x*k] = b[4+k];
la[tx_][ty_+quarter_thread_x*k] = b[k];
}
__syncthreads();
if ( ty_ < 4 ) {
int k = ty_*quarter_thread_x;
res_ = la[tx_][0+k] + la[tx_][1+k]
+ la[tx_][2+k] + la[tx_][3+k]
+ la[tx_][4+k] + la[tx_][5+k]
+ la[tx_][6+k] + la[tx_][7+k]
+ la[tx_][8+k] + la[tx_][9+k]
+ la[tx_][10+k]+ la[tx_][11+k]
+ la[tx_][12+k]+ la[tx_][13+k]
+ la[tx_][14+k]+ la[tx_][15+k];
WC[k + wc_c*lda ] = res_;
}
wc_c += num_gpus;
x += num_gpus * nb;
__syncthreads();
}
}
WC += tx;
WC -= tx_;
la[ty][tx] = res; // res store the swipe across the row
__syncthreads();
if ( ty == 0 ) {
res = la[0][tx]+ la[1][tx]
+ la[2][tx]+ la[3][tx];
WC[0+lda*(blkc) ] = res;
}
#endif /* (__CUDA_ARCH__ >= 200) */
}
/**************************************************************
* Lower case for generic sizes
*/
__global__ void
ssymv_kernel_fermi_L_generic_mgpu_offset(
int n, float alpha,
float *A, int lda,
float *x, int incx,
float beta,
float *y, int incy,
float *WC,
int m_mod_thread_x,
int my_gpu_id,
int num_gpus,
int nb,
int the_chosen_block_id,
int the_chosen_gpu_id,
int kstan)
{
#if (__CUDA_ARCH__ >= 200)
int tx = threadIdx.x;
int ty = threadIdx.y;
int blkc = blockIdx.x;
if ( blkc < my_gpu_id ) {
return;
}
float res = MAGMA_S_ZERO;
float res_ = MAGMA_S_ZERO;
float res1 = MAGMA_S_ZERO;
float res2 = MAGMA_S_ZERO;
__shared__ float la [quarter_thread_x][thread_x+2];
__shared__ float buff [thread_x];
__shared__ float buff2 [thread_x];
float tr[4];
float b[4];
int break_d = thread_x * blkc;
const int td = (thread_x * ty) + tx;
int tx_ = td % half_thread_x;
int ty_ = td / half_thread_x;
WC += break_d + tx;
x += (break_d + tx) * incx;
A += break_d;
A += lda * ty_;
int trackA;
if ( blkc == ( gridDim.x - 1 ) ) {
if ( ty == 0 ) {
if ( tx > m_mod_thread_x ) {
buff[tx] = MAGMA_S_ZERO;
}
else
buff[tx] = x[0];
}
if ( tx_ > m_mod_thread_x )
trackA=m_mod_thread_x;
else
trackA=tx_;
A += trackA;
}
else {
if ( ty == 0 ) {
buff[tx] = x[0];
}
trackA = tx_;
A += trackA;
}
if ( ty == 0 ) {
if ( my_gpu_id == 0 && blkc == 0 && tx < kstan ) {
buff[tx] = MAGMA_S_ZERO;
}
}
int flag = 0;
if ( (blkc % num_gpus) == my_gpu_id ) {
A += lda * (blkc/num_gpus) * thread_x; // change
// Somehow merging these two if - else creates problem
// It could be a potential bug -- from synchronization or from cuda or compiler
if ( blkc == ( gridDim.x - 1 ) ) {
#pragma unroll
for(int j=0; j < half_thread_x; j += 8) {
if ( ( ty_ + j ) > m_mod_thread_x ) {
la[0][bank_shift*(ty_+j)+tx_] = MAGMA_S_MAKE( 9999, 0 );
}
else
la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda];
}
A -= trackA;
}
else {
#pragma unroll
for(int j=0; j < half_thread_x; j += 8) {
la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda];
}
}
tx = tx_;
ty = ty_;
__syncthreads();
#pragma unroll
for(int i=ty_*4; i < (ty_*4+4); i++) {
if ( i < tx_ ) {
la[0][bank_shift*tx_+i] = (la[0][i*bank_shift+tx_]);
}
else
la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res += (la[0][bank_shift*tx_+j+ty_*4])* buff[j+ty_*4];
__syncthreads();
la[0][bank_shift*tx_+ty_] = res;
__syncthreads();
if ( ty_ == 0 )
res1 = la[0][tx_*bank_shift+0]
+ la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]
+ la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]
+ la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]
+ la[0][tx_*bank_shift+7];
else {
res1 = MAGMA_S_ZERO;
}
__syncthreads();
res = MAGMA_S_ZERO;
if ( blkc == ( gridDim.x - 1 ) ) {
if ( (tx_+half_thread_x) > m_mod_thread_x )
trackA = m_mod_thread_x;
else
trackA = tx_ + half_thread_x;
A += trackA+half_thread_x*lda;
#pragma unroll
for(int j=0; j < half_thread_x; j += 8) {
if ( ( ty_ + j+half_thread_x ) > m_mod_thread_x ) {
la[0][bank_shift*(ty_+j)+tx_] = MAGMA_S_MAKE( 99999, 0 );
}
else
la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda];
}
A -= trackA+half_thread_x*lda;
A += tx_;
A += half_thread_x + half_thread_x*lda;
}
else {
A += half_thread_x + half_thread_x*lda;
#pragma unroll
for(int j=0; j < half_thread_x; j += 8) {
la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda];
}
}
__syncthreads();
#pragma unroll
for(int i=ty_*4; i < (4+ty_*4); i++) {
if ( i < tx_ ) {
la[0][bank_shift*tx_+i] = (la[0][bank_shift*i+tx_]);
}
else
la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res += (la[0][bank_shift*tx_+j+ty_*4]) * buff[half_thread_x + j + 4 * ty_];
__syncthreads();
la[0][bank_shift*tx_+ty_] = res;
__syncthreads();
if ( ty_ == 1 )
res2 = la[0][tx_*bank_shift+0]
+ la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]
+ la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]
+ la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]
+ la[0][tx_*bank_shift+7];
else {
res2 = MAGMA_S_ZERO;
}
__syncthreads();
res = MAGMA_S_ZERO;
res_ = MAGMA_S_ZERO;
A -= half_thread_x*lda;
if ( blkc == ( gridDim.x - 1 ) ) {
A -= tx_;
if ( tx_ > m_mod_thread_x )
trackA = m_mod_thread_x;
else
trackA = tx_;
A += trackA;
#pragma unroll
for(int j=0; j < half_thread_x; j += 8)
if ( ( ty_ + j ) > m_mod_thread_x ) {
tr[j/8] = MAGMA_S_MAKE( 99999, 0 );
}
else
tr[j/8] = A[ j * lda];
A -= trackA;
A += tx_;
}
else {
#pragma unroll
for(int j=0; j < half_thread_x; j += 8)
tr[j/8] = A[ j * lda];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++) {
res += tr[j] * buff[ j*8 + ty_];
la[0][bank_shift*(ty_+j*8)+tx_] = tr[j];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res_ += (la[0][bank_shift*tx_+j+ty_*4]) * buff[half_thread_x +j+ty_*4];
__syncthreads();
la[0][bank_shift*tx_+ty_] = res;
__syncthreads();
if ( ty_ == 1 )
res2 = res2
+ la[0][tx_*bank_shift+0]
+ la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]
+ la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]
+ la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]
+ la[0][tx_*bank_shift+7];
else {
res2 = MAGMA_S_ZERO;
}
__syncthreads();
la[0][bank_shift*tx_+ty_] = res_;
__syncthreads();
if ( ty_ == 0 ) {
res1 = res1
+ la[0][tx_*bank_shift+0]
+ la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]
+ la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]
+ la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]
+ la[0][tx_*bank_shift+7];
}
else {
res1 = MAGMA_S_ZERO;
}
A -= half_thread_x;
A -= lda * (blkc/num_gpus) * thread_x;
flag = 1;
}
__syncthreads();
tx = threadIdx.x;
ty = threadIdx.y;
if ( ty_ == 0 && ty == 0 )
res = res1;
else if ( ty_ == 1 && ty == 0 )
res = res2;
else {
res = MAGMA_S_ZERO;
}
A -= ty_ * lda;
A -= tx_;
x = x - break_d*incx;
//x = x - tx * incx;
A += 4 * ty * lda;
if ( blkc == ( gridDim.x - 1 ) ) {
if ( tx <= m_mod_thread_x )
A += tx;
else
A += m_mod_thread_x;
}
else {
A += tx;
}
int wc_c = my_gpu_id;
int count = 0;
tx_ = td % quarter_thread_x;
ty_ = td / quarter_thread_x;
WC -= tx;
WC += tx_;
int num_blocks_iters = (blkc +1) /num_gpus - flag;
if ( my_gpu_id < ( (blkc+1) % num_gpus) ) {
num_blocks_iters += 1;
}
x += (my_gpu_id) * nb;
if ( blkc > my_gpu_id ) {
for(int s=0; s < num_blocks_iters; s++) {
res_ = MAGMA_S_ZERO;
count++;
if ( ty == 0 ) {
buff2[tx] = x[0];
if ( my_gpu_id == the_chosen_gpu_id && tx < kstan && count == 1 ) {
buff2[tx] = MAGMA_S_ZERO;
}
}
__syncthreads();
#pragma unroll
for( int k=0; k < 4; k++ ) {
#pragma unroll
for(int j=0; j < 4; j++)
tr[j] = A[j*lda];
#pragma unroll
for(int j=0; j < 4; j++) {
res += tr[j] * buff2[ quarter_thread_x*k + ty*4+(j)];
la[( (j)+ty*4)][tx] = (tr[j]) * buff[tx];
}
__syncthreads();
res_ = MAGMA_S_ZERO;
#pragma unroll
for(int j=0; j < 4; j++)
res_ += la[tx_][ty_*4+j];
b[k] = res_;
__syncthreads();
A += lda * quarter_thread_x;
}
#pragma unroll
for(int k=0; k < 4; k++) {
la[tx_][ty_+quarter_thread_x*k] = b[k];
}
__syncthreads();
if ( ty_ < 4 ) {
int k = ty_*quarter_thread_x;
res_ = la[tx_][0+k] + la[tx_][1+k]
+ la[tx_][2+k] + la[tx_][3+k]
+ la[tx_][4+k] + la[tx_][5+k]
+ la[tx_][6+k] + la[tx_][7+k]
+ la[tx_][8+k] + la[tx_][9+k]
+ la[tx_][10+k]+ la[tx_][11+k]
+ la[tx_][12+k]+ la[tx_][13+k]
+ la[tx_][14+k]+ la[tx_][15+k];
WC[k + wc_c*lda ] = res_;
}
wc_c += num_gpus;
x += num_gpus * nb;
__syncthreads();
}
}
WC += tx;
WC -= tx_;
la[ty][tx] = res;
__syncthreads();
if ( ty == 0 ) {
res=la[0][tx]+ la[1][tx]+ la[2][tx]+ la[3][tx];
WC[0+lda*(blkc)] = res;
}
#endif /* (__CUDA_ARCH__ >= 200) */
}
__global__ void
ssymv_kernel_fermi_L_update_mgpu_offset(
int n, float alpha,
float *A, int lda,
float *x, int incx,
float beta,
float *y, int incy,
float *WC,
int my_gpu_id,
int num_gpus,
int nb,
int the_chosen_block_id,
int the_chosen_gpu_id,
int offset)
{
#if (__CUDA_ARCH__ >= 200)
/*
if ( blockIdx.x < the_chosen_block_id ) {
return;
}
*/
int i;
int tx = threadIdx.x;
int ind = blockIdx.x * thread_x + tx;
float Ca;
Ca = MAGMA_S_ZERO;
WC += ind + lda * blockIdx.x;
for(i = blockIdx.x*thread_x; i < n; i += thread_x) {
Ca += WC[0];
WC += thread_x;
}
if ( ind < n && ind >= offset )
y[ind * incy] = beta * y[ind * incy] + alpha * Ca;
#endif /* (__CUDA_ARCH__ >= 200) */
}
extern "C"
void magmablas_ssymv_fermi_L_mgpu_offset(
magma_int_t n, float alpha,
float *A, magma_int_t lda,
float *x, magma_int_t incx,
float beta,
float *y, magma_int_t incy,
float *dwork,
magma_int_t my_gpu_id,
magma_int_t num_gpus,
magma_int_t nb,
magma_int_t offset,
magma_int_t num_blocks_skipped )
{
magma_int_t the_chosen_block_id = offset / 64;
magma_int_t the_chosen_gpu_id = the_chosen_block_id % num_gpus;
magma_int_t kstan = offset % 64;
/*
printf("Enter magmablas_ssymv_fermi_L_mgpu_offset\n");
printf("the_chosen_block_id = %d\n", the_chosen_block_id);
printf("the_chosen_gpu_id = %d\n", the_chosen_gpu_id);
printf("kstan = %d\n", kstan);
*/
A += lda * num_blocks_skipped * 64 + the_chosen_block_id * 64;
x += the_chosen_block_id * 64;
y += the_chosen_block_id * 64;
magma_int_t blocks = (n - 1)/hemv_bs + 1;
blocks -= the_chosen_block_id;
dim3 grid(blocks, 1, 1);
dim3 threads(thread_x, thread_y, 1);
dim3 threads_u(hemv_bs, 1, 1);
the_chosen_block_id = 0;
the_chosen_gpu_id = 0;
/*
* If matrix size is multiple of hemv_bs, we use a specific code.
* otherwise, we call the generic case.
*/
if ( n % hemv_bs == 0 ) {
hipLaunchKernelGGL(( ssymv_kernel_fermi_L_special_mgpu_offset), dim3(grid), dim3(threads), 0, magma_stream ,
n, alpha, A, lda, x, incx, beta, y, incy, dwork,
my_gpu_id, num_gpus, nb,
the_chosen_block_id, the_chosen_gpu_id, kstan);
}
else {
magma_int_t m_mod_thread_x = (n % hemv_bs) - 1;
hipLaunchKernelGGL(( ssymv_kernel_fermi_L_generic_mgpu_offset), dim3(grid), dim3(threads), 0, magma_stream ,
n, alpha, A, lda, x, incx, beta, y, incy, dwork,
m_mod_thread_x, my_gpu_id, num_gpus, nb,
the_chosen_block_id, the_chosen_gpu_id, kstan);
}
hipLaunchKernelGGL(( ssymv_kernel_fermi_L_update_mgpu_offset), dim3(grid), dim3(threads_u), 0, magma_stream ,
n, alpha, A, lda, x, incx, beta, y, incy, dwork,
my_gpu_id, num_gpus, nb,
the_chosen_block_id, the_chosen_gpu_id, kstan);
}
/*************************************************************************
Purpose
=======
magmablas_ssymv performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n symmetric matrix.
Arguments
==========
UPLO CHARACTER*1.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
UPLO = 'U' or 'u' Only the upper triangular part of A
is to be referenced.
UPLO = 'L' or 'l' Only the lower triangular part of A
is to be referenced.
Unchanged on exit.
N INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
Unchanged on exit.
ALPHA REAL.
On entry, ALPHA specifies the scalar alpha.
Unchanged on exit.
A REAL array of DIMENSION ( LDA, n ).
Before entry with UPLO = 'U' or 'u', the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the symmetric matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = 'L' or 'l', the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the symmetric matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
Unchanged on exit.
LDA INTEGER.
On entry, LDA specifies the first dimension of A as declared
in the calling (sub) program. LDA must be at least
max( 1, n ).
Unchanged on exit.
It is recommended that lda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
X REAL array of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
Unchanged on exit.
INCX INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
Unchanged on exit.
BETA REAL.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
Unchanged on exit.
Y REAL array of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
INCY INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
Unchanged on exit.
*/
extern "C"
magma_int_t
magmablas_ssymv_mgpu_offset(
char uplo, magma_int_t n,
float alpha,
float **A, magma_int_t lda,
float **x, magma_int_t incx,
float beta,
float **y, magma_int_t incy,
float **work, magma_int_t lwork,
magma_int_t num_gpus,
magma_int_t nb,
magma_int_t offset,
magma_queue_t stream[][10])
{
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
// --------------------
// no CUDA ARCH 1.x version
fprintf( stderr, "%s not supported on CUDA arch 1.x", __func__ );
return MAGMA_ERR_NOT_SUPPORTED;
}
// --------------------
// CUDA ARCH 2.x (Fermi) version
char uplo_[2] = {uplo, 0};
int upper = lapackf77_lsame(uplo_, "U");
/*
* Test the input parameters.
*/
if ( (! upper) && (! lapackf77_lsame(uplo_, "L")) ) {
return -1;
} else if ( n < 0 ) {
return -2;
} else if ( lda < max(1,n) ) {
return -5;
} else if ( incx == 0 ) {
return -7;
} else if ( incy == 0 ) {
return -10;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_S_EQUAL(alpha, MAGMA_S_ZERO) && MAGMA_S_EQUAL(beta, MAGMA_S_ONE) ) )
return MAGMA_SUCCESS;
/* TODO: Upper case is not implemented in MAGMA */
if ( upper ) {
fprintf( stderr, "Upper case is not implemented on multi GPUs\n" );
return MAGMA_ERR_NOT_SUPPORTED;
}
else {
magma_int_t blocks = (n - 1)/thread_x + 1;
magma_int_t lwmin = lda * (blocks + 1);
if ( lwork < lwmin ) {
fprintf( stderr, "Not enough work space in %s: passed %d, required %d\n",
__func__, (int) lwork, (int) lwmin);
return -12;
}
if ( nb != 64 ) {
fprintf( stderr, "Error in %s: nb != 64, please reallocate matrix among GPUs\n", __func__ );
return MAGMA_ERR_ILLEGAL_VALUE;
}
{
magma_int_t i = 0;
for(i=0; i < num_gpus; i++) {
magma_setdevice(i);
magmablasSetKernelStream(stream[i][0]);
magma_int_t the_chosen_block_id = offset / 64;
magma_int_t the_chosen_gpu_id = the_chosen_block_id % num_gpus;
magma_int_t num_blocks_skipped = the_chosen_block_id / num_gpus;
if ( i < the_chosen_gpu_id ) {
num_blocks_skipped += 1;
}
int new_gpu_id = ( i + num_gpus - the_chosen_gpu_id ) % num_gpus;
magmablas_ssymv_fermi_L_mgpu_offset(n, alpha, A[i], lda, x[i], incx, beta, y[i], incy, work[i],
new_gpu_id, num_gpus, nb, offset, num_blocks_skipped);
}
}
}
return MAGMA_SUCCESS;
}
extern "C"
magma_int_t
magmablas_ssymv2_mgpu_offset(
char uplo, magma_int_t n,
float alpha,
float **A, magma_int_t lda,
float **x, magma_int_t incx,
float beta,
float **y, magma_int_t incy,
float **work, magma_int_t lwork,
magma_int_t num_gpus,
magma_int_t nb,
magma_int_t offset)
{
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
// --------------------
// no CUDA ARCH 1.x version
fprintf( stderr, "%s not supported on CUDA arch 1.x", __func__ );
return MAGMA_ERR_NOT_SUPPORTED;
}
// --------------------
// CUDA ARCH 2.x (Fermi) version
char uplo_[2] = {uplo, 0};
int upper = lapackf77_lsame(uplo_, "U");
/*
* Test the input parameters.
*/
if ( (! upper) && (! lapackf77_lsame(uplo_, "L")) ) {
return -1;
} else if ( n < 0 ) {
return -2;
} else if ( lda < max(1,n) ) {
return -5;
} else if ( incx == 0 ) {
return -7;
} else if ( incy == 0 ) {
return -10;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_S_EQUAL(alpha, MAGMA_S_ZERO) && MAGMA_S_EQUAL(beta, MAGMA_S_ONE) ) )
return MAGMA_SUCCESS;
/* TODO: Upper case is not implemented in MAGMA */
if ( upper ) {
fprintf( stderr, "Upper case is not implemented on multi GPUs\n" );
return MAGMA_ERR_NOT_SUPPORTED;
}
else {
magma_int_t blocks = (n - 1)/thread_x + 1;
magma_int_t lwmin = lda * (blocks + 1);
if ( lwork < lwmin ) {
fprintf( stderr, "Not enough work space in %s: passed %d, required %d\n",
__func__, (int) lwork, (int) lwmin);
return -12;
}
if ( nb != 64 ) {
fprintf( stderr, "Error in %s: nb != 64, please reallocate matrix among GPUs\n", __func__ );
return MAGMA_ERR_ILLEGAL_VALUE;
}
if ( num_gpus == 1 ) {
magmablas_ssymv_work(uplo, n-offset, alpha, A[0] + offset + lda * offset, lda, x[0] + offset, incx, beta, y[0] + offset, incy, work[0], lwork);
}
else {
magma_int_t i = 0;
for(i=0; i < num_gpus; i++) {
magma_setdevice(i);
magma_int_t the_chosen_block_id = offset / 64;
magma_int_t the_chosen_gpu_id = the_chosen_block_id % num_gpus;
magma_int_t num_blocks_skipped = the_chosen_block_id / num_gpus;
if ( i < the_chosen_gpu_id ) {
num_blocks_skipped += 1;
}
int new_gpu_id = ( i + num_gpus - the_chosen_gpu_id ) % num_gpus;
magmablas_ssymv_fermi_L_mgpu_offset(n, alpha, A[i], lda, x[i], incx, beta, y[i], incy, work[i],
new_gpu_id, num_gpus, nb, offset, num_blocks_skipped);
}
}
}
return MAGMA_SUCCESS;
}
extern "C"
magma_int_t
magmablas_ssymv2_mgpu(
char uplo, magma_int_t n,
float alpha,
float **A, magma_int_t lda,
float **x, magma_int_t incx,
float beta,
float **y, magma_int_t incy,
float **work, magma_int_t lwork,
magma_int_t num_gpus,
magma_int_t nb)
{
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
// --------------------
// no CUDA ARCH 1.x version
fprintf( stderr, "%s not supported on CUDA arch 1.x", __func__ );
return MAGMA_ERR_NOT_SUPPORTED;
}
// --------------------
// CUDA ARCH 2.x (Fermi) version
char uplo_[2] = {uplo, 0};
int upper = lapackf77_lsame(uplo_, "U");
/*
* Test the input parameters.
*/
if ( (! upper) && (! lapackf77_lsame(uplo_, "L")) ) {
return -1;
} else if ( n < 0 ) {
return -2;
} else if ( lda < max(1,n) ) {
return -5;
} else if ( incx == 0 ) {
return -7;
} else if ( incy == 0 ) {
return -10;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_S_EQUAL(alpha, MAGMA_S_ZERO) && MAGMA_S_EQUAL(beta, MAGMA_S_ONE) ) )
return MAGMA_SUCCESS;
/* TODO: Upper case is not implemented in MAGMA */
if ( upper ) {
fprintf( stderr, "Upper case is not implemented on multi GPUs\n" );
return MAGMA_ERR_NOT_SUPPORTED;
}
else {
magma_int_t blocks = (n - 1)/thread_x + 1;
magma_int_t lwmin = lda * (blocks + 1);
if ( lwork < lwmin ) {
fprintf( stderr, "Not enough work space in %s: passed %d, required %d\n",
__func__, (int) lwork, (int) lwmin);
return -12;
}
if ( nb != 64 ) {
fprintf( stderr, "Error in %s: nb != 64, please reallocate matrix among GPUs\n", __func__ );
return MAGMA_ERR_ILLEGAL_VALUE;
}
magma_int_t i = 0;
for(i=0; i < num_gpus; i++) {
magma_setdevice(i);
magmablas_ssymv_fermi_L_mgpu_offset(n, alpha, A[i], lda, x[i], incx, beta, y[i], incy, work[i],
i, num_gpus, nb, 0, 0);
}
}
return MAGMA_SUCCESS;
}
| 6a1bc57ef11e58c892b7f7243b19a25a0a9823e4.cu | /*
-- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
@generated s Tue Dec 17 13:18:45 2013
*/
#include "common_magma.h"
#define PRECISION_s
#define hemv_bs 64
#define thread_x 64
#define thread_y 4
#define bank_shift 33
#define quarter_thread_x 16
#define half_thread_x 32
/*******************************************************************************
* Lower case, where n is multiple of block size (hemv_bs)
*/
__global__ void
ssymv_kernel_fermi_L_special_mgpu_offset(
int n, float alpha,
float *A, int lda,
float *x, int incx,
float beta,
float *y, int incy,
float *WC,
int my_gpu_id,
int num_gpus,
int nb,
int the_chosen_block_id,
int the_chosen_gpu_id,
int kstan)
{
#if (__CUDA_ARCH__ >= 200)
int tx = threadIdx.x;
int ty = threadIdx.y;
int blkc = blockIdx.x;
if ( blkc < my_gpu_id ) {
return;
}
float res = MAGMA_S_ZERO;
float res_ = MAGMA_S_ZERO;
float res1 = MAGMA_S_ZERO;
float res2 = MAGMA_S_ZERO;
__shared__ float la [quarter_thread_x][thread_x+2];
__shared__ float buff [thread_x];
__shared__ float buff2 [thread_x];
float tr[4];
float b[4];
int break_d = thread_x * blkc;
const int td = (thread_x * ty) + tx;
int tx_ = td % half_thread_x;
int ty_ = td / half_thread_x;
WC += break_d + tx;
x += (break_d + tx) * incx;
A += break_d;
A += ty_ * lda + tx_;
if ( ty == 0 ) {
buff[tx] = x[0];
if ( blkc == the_chosen_block_id && my_gpu_id == the_chosen_gpu_id && tx < kstan ) {
buff[tx] = MAGMA_S_ZERO;
}
} // obtain the vector x store in buff;
// __syncthreads();
tx = tx_; ty = ty_;
int flag = 0;
if ( (blkc % num_gpus) == my_gpu_id ) {
A += lda * (blkc/num_gpus) * thread_x; // change
#pragma unroll
for(int j=0; j < half_thread_x; j += 8)
la[0][ bank_shift * (ty_+j) + tx_] = A[ j * lda];
__syncthreads();
#pragma unroll
for(int i=ty_*4; i < (ty_ * 4 + 4); i++) {
if ( i < tx_ ) {
la[0][bank_shift * tx_ + i] = ( la[0][ i * bank_shift + tx_] );
}
else
la[0][bank_shift * tx_ + i] = la[0][ bank_shift * tx_ + i];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res += ( la[0][bank_shift * tx_ + j + ty_ * 4] ) * buff[j + ty_ * 4];
__syncthreads();
la[0][bank_shift*tx_+ty_] = res;
__syncthreads();
if ( ty_ == 0 )
res1 = la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7];
else {
res1 = MAGMA_S_ZERO;
}
__syncthreads();
res = MAGMA_S_ZERO;
A += half_thread_x + half_thread_x*lda;
#pragma unroll
for(int j=0; j < half_thread_x; j += 8)
la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda];
__syncthreads();
#pragma unroll
for(int i=ty_*4; i < (4+ty_*4); i++) {
if ( i < tx_ ) {
la[0][bank_shift*tx_+i] = ( la[0][bank_shift*i+tx_] );
}
else
la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res += ( la[0][bank_shift*tx_+j+ty_*4] ) * buff[half_thread_x + j + 4 * ty_];
__syncthreads();
la[0][bank_shift*tx_+ty_] = res;
__syncthreads();
if ( ty_ == 1 )
res2 = la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7];
else {
res2 = MAGMA_S_ZERO;
}
__syncthreads();
res = MAGMA_S_ZERO;
A -= half_thread_x*lda;
res_ = MAGMA_S_ZERO;
#pragma unroll
for(int j=0; j < half_thread_x; j += 8)
tr[j/8] = A[ j * lda];
#pragma unroll
for(int j=0; j < 4; j++) {
res += tr[j] * buff[ j*8 + ty_];
la[0][bank_shift*(ty_+j*8)+tx_] = tr[j];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res_ += (la[0][bank_shift*tx_+j+ty_*4]) * buff[half_thread_x +j+ty_*4];
__syncthreads();
la[0][bank_shift*tx_+ty_] = res;
__syncthreads();
if ( ty_ == 1 )
res2 = res2
+ la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7];
else {
res2 = MAGMA_S_ZERO;
}
__syncthreads();
la[0][bank_shift*tx_+ty_] = res_;
__syncthreads();
if ( ty_ == 0 ) {
res1 = res1
+ la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7];
}
else {
res1 = MAGMA_S_ZERO;
}
__syncthreads();
A -= half_thread_x;
flag = 1;
A -= lda * (blkc/num_gpus) * thread_x;
}
tx = threadIdx.x;
ty = threadIdx.y;
if ( ty_ == 0 && ty == 0 )
res = res1;
else if ( ty_ == 1 && ty == 0 )
res = res2;
else {
res = MAGMA_S_ZERO;
}
A -= ty_ * lda;
A -= tx_;
x = x - blkc * thread_x * incx;
//x = x - tx*incx;
A += 4 * ty * lda;
A += tx;
int count = 0;
tx_ = td % quarter_thread_x;
ty_ = td / quarter_thread_x;
WC -= tx;
WC += tx_;
int num_blocks_iters = (blkc +1) /num_gpus - flag;
if ( my_gpu_id < ( (blkc+1) % num_gpus) ) {
num_blocks_iters += 1;
}
x += (my_gpu_id) * nb;
int wc_c = my_gpu_id;
if ( blkc > my_gpu_id ) {
for(int s=0; s < num_blocks_iters; s++) {
res_ = MAGMA_S_ZERO;
count++;
if ( ty == 0 ) {
buff2[tx] = x[0];
if ( my_gpu_id == the_chosen_gpu_id && tx < kstan && count == 1 ) {
buff2[tx] = MAGMA_S_ZERO;
}
}
__syncthreads();
#pragma unroll
for( int k=0; k < 4; k++ ) {
#pragma unroll
for(int j=0; j < 4; j++)
tr[j] = A[j*lda];
#pragma unroll
for(int j=0; j < 4; j++) {
res += tr[j] * buff2[ quarter_thread_x * k + ty*4 + j];
la[j + ty*4][tx] = (tr[j]) * buff[tx];
//la[j + ty*4][tx] = (tr[j]);
}
__syncthreads();
res_ = MAGMA_S_ZERO;
#pragma unroll
for(int j=0; j < 4; j++) {
//res_ += la[tx_][ty_*4+j] * b[j];
res_ += la[tx_][ty_*4+j];
}
//b[4 + k] = res_;
b[ k] = res_;
__syncthreads();
A += lda * quarter_thread_x;
}
#pragma unroll
for(int k=0; k < 4; k++) {
//la[tx_][ty_+quarter_thread_x*k] = b[4+k];
la[tx_][ty_+quarter_thread_x*k] = b[k];
}
__syncthreads();
if ( ty_ < 4 ) {
int k = ty_*quarter_thread_x;
res_ = la[tx_][0+k] + la[tx_][1+k]
+ la[tx_][2+k] + la[tx_][3+k]
+ la[tx_][4+k] + la[tx_][5+k]
+ la[tx_][6+k] + la[tx_][7+k]
+ la[tx_][8+k] + la[tx_][9+k]
+ la[tx_][10+k]+ la[tx_][11+k]
+ la[tx_][12+k]+ la[tx_][13+k]
+ la[tx_][14+k]+ la[tx_][15+k];
WC[k + wc_c*lda ] = res_;
}
wc_c += num_gpus;
x += num_gpus * nb;
__syncthreads();
}
}
WC += tx;
WC -= tx_;
la[ty][tx] = res; // res store the swipe across the row
__syncthreads();
if ( ty == 0 ) {
res = la[0][tx]+ la[1][tx]
+ la[2][tx]+ la[3][tx];
WC[0+lda*(blkc) ] = res;
}
#endif /* (__CUDA_ARCH__ >= 200) */
}
/**************************************************************
* Lower case for generic sizes
*/
__global__ void
ssymv_kernel_fermi_L_generic_mgpu_offset(
int n, float alpha,
float *A, int lda,
float *x, int incx,
float beta,
float *y, int incy,
float *WC,
int m_mod_thread_x,
int my_gpu_id,
int num_gpus,
int nb,
int the_chosen_block_id,
int the_chosen_gpu_id,
int kstan)
{
#if (__CUDA_ARCH__ >= 200)
int tx = threadIdx.x;
int ty = threadIdx.y;
int blkc = blockIdx.x;
if ( blkc < my_gpu_id ) {
return;
}
float res = MAGMA_S_ZERO;
float res_ = MAGMA_S_ZERO;
float res1 = MAGMA_S_ZERO;
float res2 = MAGMA_S_ZERO;
__shared__ float la [quarter_thread_x][thread_x+2];
__shared__ float buff [thread_x];
__shared__ float buff2 [thread_x];
float tr[4];
float b[4];
int break_d = thread_x * blkc;
const int td = (thread_x * ty) + tx;
int tx_ = td % half_thread_x;
int ty_ = td / half_thread_x;
WC += break_d + tx;
x += (break_d + tx) * incx;
A += break_d;
A += lda * ty_;
int trackA;
if ( blkc == ( gridDim.x - 1 ) ) {
if ( ty == 0 ) {
if ( tx > m_mod_thread_x ) {
buff[tx] = MAGMA_S_ZERO;
}
else
buff[tx] = x[0];
}
if ( tx_ > m_mod_thread_x )
trackA=m_mod_thread_x;
else
trackA=tx_;
A += trackA;
}
else {
if ( ty == 0 ) {
buff[tx] = x[0];
}
trackA = tx_;
A += trackA;
}
if ( ty == 0 ) {
if ( my_gpu_id == 0 && blkc == 0 && tx < kstan ) {
buff[tx] = MAGMA_S_ZERO;
}
}
int flag = 0;
if ( (blkc % num_gpus) == my_gpu_id ) {
A += lda * (blkc/num_gpus) * thread_x; // change
// Somehow merging these two if - else creates problem
// It could be a potential bug -- from synchronization or from cuda or compiler
if ( blkc == ( gridDim.x - 1 ) ) {
#pragma unroll
for(int j=0; j < half_thread_x; j += 8) {
if ( ( ty_ + j ) > m_mod_thread_x ) {
la[0][bank_shift*(ty_+j)+tx_] = MAGMA_S_MAKE( 9999, 0 );
}
else
la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda];
}
A -= trackA;
}
else {
#pragma unroll
for(int j=0; j < half_thread_x; j += 8) {
la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda];
}
}
tx = tx_;
ty = ty_;
__syncthreads();
#pragma unroll
for(int i=ty_*4; i < (ty_*4+4); i++) {
if ( i < tx_ ) {
la[0][bank_shift*tx_+i] = (la[0][i*bank_shift+tx_]);
}
else
la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res += (la[0][bank_shift*tx_+j+ty_*4])* buff[j+ty_*4];
__syncthreads();
la[0][bank_shift*tx_+ty_] = res;
__syncthreads();
if ( ty_ == 0 )
res1 = la[0][tx_*bank_shift+0]
+ la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]
+ la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]
+ la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]
+ la[0][tx_*bank_shift+7];
else {
res1 = MAGMA_S_ZERO;
}
__syncthreads();
res = MAGMA_S_ZERO;
if ( blkc == ( gridDim.x - 1 ) ) {
if ( (tx_+half_thread_x) > m_mod_thread_x )
trackA = m_mod_thread_x;
else
trackA = tx_ + half_thread_x;
A += trackA+half_thread_x*lda;
#pragma unroll
for(int j=0; j < half_thread_x; j += 8) {
if ( ( ty_ + j+half_thread_x ) > m_mod_thread_x ) {
la[0][bank_shift*(ty_+j)+tx_] = MAGMA_S_MAKE( 99999, 0 );
}
else
la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda];
}
A -= trackA+half_thread_x*lda;
A += tx_;
A += half_thread_x + half_thread_x*lda;
}
else {
A += half_thread_x + half_thread_x*lda;
#pragma unroll
for(int j=0; j < half_thread_x; j += 8) {
la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda];
}
}
__syncthreads();
#pragma unroll
for(int i=ty_*4; i < (4+ty_*4); i++) {
if ( i < tx_ ) {
la[0][bank_shift*tx_+i] = (la[0][bank_shift*i+tx_]);
}
else
la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res += (la[0][bank_shift*tx_+j+ty_*4]) * buff[half_thread_x + j + 4 * ty_];
__syncthreads();
la[0][bank_shift*tx_+ty_] = res;
__syncthreads();
if ( ty_ == 1 )
res2 = la[0][tx_*bank_shift+0]
+ la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]
+ la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]
+ la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]
+ la[0][tx_*bank_shift+7];
else {
res2 = MAGMA_S_ZERO;
}
__syncthreads();
res = MAGMA_S_ZERO;
res_ = MAGMA_S_ZERO;
A -= half_thread_x*lda;
if ( blkc == ( gridDim.x - 1 ) ) {
A -= tx_;
if ( tx_ > m_mod_thread_x )
trackA = m_mod_thread_x;
else
trackA = tx_;
A += trackA;
#pragma unroll
for(int j=0; j < half_thread_x; j += 8)
if ( ( ty_ + j ) > m_mod_thread_x ) {
tr[j/8] = MAGMA_S_MAKE( 99999, 0 );
}
else
tr[j/8] = A[ j * lda];
A -= trackA;
A += tx_;
}
else {
#pragma unroll
for(int j=0; j < half_thread_x; j += 8)
tr[j/8] = A[ j * lda];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++) {
res += tr[j] * buff[ j*8 + ty_];
la[0][bank_shift*(ty_+j*8)+tx_] = tr[j];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res_ += (la[0][bank_shift*tx_+j+ty_*4]) * buff[half_thread_x +j+ty_*4];
__syncthreads();
la[0][bank_shift*tx_+ty_] = res;
__syncthreads();
if ( ty_ == 1 )
res2 = res2
+ la[0][tx_*bank_shift+0]
+ la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]
+ la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]
+ la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]
+ la[0][tx_*bank_shift+7];
else {
res2 = MAGMA_S_ZERO;
}
__syncthreads();
la[0][bank_shift*tx_+ty_] = res_;
__syncthreads();
if ( ty_ == 0 ) {
res1 = res1
+ la[0][tx_*bank_shift+0]
+ la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]
+ la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]
+ la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]
+ la[0][tx_*bank_shift+7];
}
else {
res1 = MAGMA_S_ZERO;
}
A -= half_thread_x;
A -= lda * (blkc/num_gpus) * thread_x;
flag = 1;
}
__syncthreads();
tx = threadIdx.x;
ty = threadIdx.y;
if ( ty_ == 0 && ty == 0 )
res = res1;
else if ( ty_ == 1 && ty == 0 )
res = res2;
else {
res = MAGMA_S_ZERO;
}
A -= ty_ * lda;
A -= tx_;
x = x - break_d*incx;
//x = x - tx * incx;
A += 4 * ty * lda;
if ( blkc == ( gridDim.x - 1 ) ) {
if ( tx <= m_mod_thread_x )
A += tx;
else
A += m_mod_thread_x;
}
else {
A += tx;
}
int wc_c = my_gpu_id;
int count = 0;
tx_ = td % quarter_thread_x;
ty_ = td / quarter_thread_x;
WC -= tx;
WC += tx_;
int num_blocks_iters = (blkc +1) /num_gpus - flag;
if ( my_gpu_id < ( (blkc+1) % num_gpus) ) {
num_blocks_iters += 1;
}
x += (my_gpu_id) * nb;
if ( blkc > my_gpu_id ) {
for(int s=0; s < num_blocks_iters; s++) {
res_ = MAGMA_S_ZERO;
count++;
if ( ty == 0 ) {
buff2[tx] = x[0];
if ( my_gpu_id == the_chosen_gpu_id && tx < kstan && count == 1 ) {
buff2[tx] = MAGMA_S_ZERO;
}
}
__syncthreads();
#pragma unroll
for( int k=0; k < 4; k++ ) {
#pragma unroll
for(int j=0; j < 4; j++)
tr[j] = A[j*lda];
#pragma unroll
for(int j=0; j < 4; j++) {
res += tr[j] * buff2[ quarter_thread_x*k + ty*4+(j)];
la[( (j)+ty*4)][tx] = (tr[j]) * buff[tx];
}
__syncthreads();
res_ = MAGMA_S_ZERO;
#pragma unroll
for(int j=0; j < 4; j++)
res_ += la[tx_][ty_*4+j];
b[k] = res_;
__syncthreads();
A += lda * quarter_thread_x;
}
#pragma unroll
for(int k=0; k < 4; k++) {
la[tx_][ty_+quarter_thread_x*k] = b[k];
}
__syncthreads();
if ( ty_ < 4 ) {
int k = ty_*quarter_thread_x;
res_ = la[tx_][0+k] + la[tx_][1+k]
+ la[tx_][2+k] + la[tx_][3+k]
+ la[tx_][4+k] + la[tx_][5+k]
+ la[tx_][6+k] + la[tx_][7+k]
+ la[tx_][8+k] + la[tx_][9+k]
+ la[tx_][10+k]+ la[tx_][11+k]
+ la[tx_][12+k]+ la[tx_][13+k]
+ la[tx_][14+k]+ la[tx_][15+k];
WC[k + wc_c*lda ] = res_;
}
wc_c += num_gpus;
x += num_gpus * nb;
__syncthreads();
}
}
WC += tx;
WC -= tx_;
la[ty][tx] = res;
__syncthreads();
if ( ty == 0 ) {
res=la[0][tx]+ la[1][tx]+ la[2][tx]+ la[3][tx];
WC[0+lda*(blkc)] = res;
}
#endif /* (__CUDA_ARCH__ >= 200) */
}
__global__ void
ssymv_kernel_fermi_L_update_mgpu_offset(
int n, float alpha,
float *A, int lda,
float *x, int incx,
float beta,
float *y, int incy,
float *WC,
int my_gpu_id,
int num_gpus,
int nb,
int the_chosen_block_id,
int the_chosen_gpu_id,
int offset)
{
#if (__CUDA_ARCH__ >= 200)
/*
if ( blockIdx.x < the_chosen_block_id ) {
return;
}
*/
int i;
int tx = threadIdx.x;
int ind = blockIdx.x * thread_x + tx;
float Ca;
Ca = MAGMA_S_ZERO;
WC += ind + lda * blockIdx.x;
for(i = blockIdx.x*thread_x; i < n; i += thread_x) {
Ca += WC[0];
WC += thread_x;
}
if ( ind < n && ind >= offset )
y[ind * incy] = beta * y[ind * incy] + alpha * Ca;
#endif /* (__CUDA_ARCH__ >= 200) */
}
extern "C"
void magmablas_ssymv_fermi_L_mgpu_offset(
magma_int_t n, float alpha,
float *A, magma_int_t lda,
float *x, magma_int_t incx,
float beta,
float *y, magma_int_t incy,
float *dwork,
magma_int_t my_gpu_id,
magma_int_t num_gpus,
magma_int_t nb,
magma_int_t offset,
magma_int_t num_blocks_skipped )
{
magma_int_t the_chosen_block_id = offset / 64;
magma_int_t the_chosen_gpu_id = the_chosen_block_id % num_gpus;
magma_int_t kstan = offset % 64;
/*
printf("Enter magmablas_ssymv_fermi_L_mgpu_offset\n");
printf("the_chosen_block_id = %d\n", the_chosen_block_id);
printf("the_chosen_gpu_id = %d\n", the_chosen_gpu_id);
printf("kstan = %d\n", kstan);
*/
A += lda * num_blocks_skipped * 64 + the_chosen_block_id * 64;
x += the_chosen_block_id * 64;
y += the_chosen_block_id * 64;
magma_int_t blocks = (n - 1)/hemv_bs + 1;
blocks -= the_chosen_block_id;
dim3 grid(blocks, 1, 1);
dim3 threads(thread_x, thread_y, 1);
dim3 threads_u(hemv_bs, 1, 1);
the_chosen_block_id = 0;
the_chosen_gpu_id = 0;
/*
* If matrix size is multiple of hemv_bs, we use a specific code.
* otherwise, we call the generic case.
*/
if ( n % hemv_bs == 0 ) {
ssymv_kernel_fermi_L_special_mgpu_offset<<< grid, threads, 0, magma_stream >>>(
n, alpha, A, lda, x, incx, beta, y, incy, dwork,
my_gpu_id, num_gpus, nb,
the_chosen_block_id, the_chosen_gpu_id, kstan);
}
else {
magma_int_t m_mod_thread_x = (n % hemv_bs) - 1;
ssymv_kernel_fermi_L_generic_mgpu_offset<<< grid, threads, 0, magma_stream >>>(
n, alpha, A, lda, x, incx, beta, y, incy, dwork,
m_mod_thread_x, my_gpu_id, num_gpus, nb,
the_chosen_block_id, the_chosen_gpu_id, kstan);
}
ssymv_kernel_fermi_L_update_mgpu_offset<<< grid, threads_u, 0, magma_stream >>>(
n, alpha, A, lda, x, incx, beta, y, incy, dwork,
my_gpu_id, num_gpus, nb,
the_chosen_block_id, the_chosen_gpu_id, kstan);
}
/*************************************************************************
Purpose
=======
magmablas_ssymv performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n symmetric matrix.
Arguments
==========
UPLO CHARACTER*1.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
UPLO = 'U' or 'u' Only the upper triangular part of A
is to be referenced.
UPLO = 'L' or 'l' Only the lower triangular part of A
is to be referenced.
Unchanged on exit.
N INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
Unchanged on exit.
ALPHA REAL.
On entry, ALPHA specifies the scalar alpha.
Unchanged on exit.
A REAL array of DIMENSION ( LDA, n ).
Before entry with UPLO = 'U' or 'u', the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the symmetric matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = 'L' or 'l', the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the symmetric matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
Unchanged on exit.
LDA INTEGER.
On entry, LDA specifies the first dimension of A as declared
in the calling (sub) program. LDA must be at least
max( 1, n ).
Unchanged on exit.
It is recommended that lda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
X REAL array of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
Unchanged on exit.
INCX INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
Unchanged on exit.
BETA REAL.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
Unchanged on exit.
Y REAL array of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
INCY INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
Unchanged on exit.
*/
extern "C"
magma_int_t
magmablas_ssymv_mgpu_offset(
char uplo, magma_int_t n,
float alpha,
float **A, magma_int_t lda,
float **x, magma_int_t incx,
float beta,
float **y, magma_int_t incy,
float **work, magma_int_t lwork,
magma_int_t num_gpus,
magma_int_t nb,
magma_int_t offset,
magma_queue_t stream[][10])
{
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
// --------------------
// no CUDA ARCH 1.x version
fprintf( stderr, "%s not supported on CUDA arch 1.x", __func__ );
return MAGMA_ERR_NOT_SUPPORTED;
}
// --------------------
// CUDA ARCH 2.x (Fermi) version
char uplo_[2] = {uplo, 0};
int upper = lapackf77_lsame(uplo_, "U");
/*
* Test the input parameters.
*/
if ( (! upper) && (! lapackf77_lsame(uplo_, "L")) ) {
return -1;
} else if ( n < 0 ) {
return -2;
} else if ( lda < max(1,n) ) {
return -5;
} else if ( incx == 0 ) {
return -7;
} else if ( incy == 0 ) {
return -10;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_S_EQUAL(alpha, MAGMA_S_ZERO) && MAGMA_S_EQUAL(beta, MAGMA_S_ONE) ) )
return MAGMA_SUCCESS;
/* TODO: Upper case is not implemented in MAGMA */
if ( upper ) {
fprintf( stderr, "Upper case is not implemented on multi GPUs\n" );
return MAGMA_ERR_NOT_SUPPORTED;
}
else {
magma_int_t blocks = (n - 1)/thread_x + 1;
magma_int_t lwmin = lda * (blocks + 1);
if ( lwork < lwmin ) {
fprintf( stderr, "Not enough work space in %s: passed %d, required %d\n",
__func__, (int) lwork, (int) lwmin);
return -12;
}
if ( nb != 64 ) {
fprintf( stderr, "Error in %s: nb != 64, please reallocate matrix among GPUs\n", __func__ );
return MAGMA_ERR_ILLEGAL_VALUE;
}
{
magma_int_t i = 0;
for(i=0; i < num_gpus; i++) {
magma_setdevice(i);
magmablasSetKernelStream(stream[i][0]);
magma_int_t the_chosen_block_id = offset / 64;
magma_int_t the_chosen_gpu_id = the_chosen_block_id % num_gpus;
magma_int_t num_blocks_skipped = the_chosen_block_id / num_gpus;
if ( i < the_chosen_gpu_id ) {
num_blocks_skipped += 1;
}
int new_gpu_id = ( i + num_gpus - the_chosen_gpu_id ) % num_gpus;
magmablas_ssymv_fermi_L_mgpu_offset(n, alpha, A[i], lda, x[i], incx, beta, y[i], incy, work[i],
new_gpu_id, num_gpus, nb, offset, num_blocks_skipped);
}
}
}
return MAGMA_SUCCESS;
}
extern "C"
magma_int_t
magmablas_ssymv2_mgpu_offset(
char uplo, magma_int_t n,
float alpha,
float **A, magma_int_t lda,
float **x, magma_int_t incx,
float beta,
float **y, magma_int_t incy,
float **work, magma_int_t lwork,
magma_int_t num_gpus,
magma_int_t nb,
magma_int_t offset)
{
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
// --------------------
// no CUDA ARCH 1.x version
fprintf( stderr, "%s not supported on CUDA arch 1.x", __func__ );
return MAGMA_ERR_NOT_SUPPORTED;
}
// --------------------
// CUDA ARCH 2.x (Fermi) version
char uplo_[2] = {uplo, 0};
int upper = lapackf77_lsame(uplo_, "U");
/*
* Test the input parameters.
*/
if ( (! upper) && (! lapackf77_lsame(uplo_, "L")) ) {
return -1;
} else if ( n < 0 ) {
return -2;
} else if ( lda < max(1,n) ) {
return -5;
} else if ( incx == 0 ) {
return -7;
} else if ( incy == 0 ) {
return -10;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_S_EQUAL(alpha, MAGMA_S_ZERO) && MAGMA_S_EQUAL(beta, MAGMA_S_ONE) ) )
return MAGMA_SUCCESS;
/* TODO: Upper case is not implemented in MAGMA */
if ( upper ) {
fprintf( stderr, "Upper case is not implemented on multi GPUs\n" );
return MAGMA_ERR_NOT_SUPPORTED;
}
else {
magma_int_t blocks = (n - 1)/thread_x + 1;
magma_int_t lwmin = lda * (blocks + 1);
if ( lwork < lwmin ) {
fprintf( stderr, "Not enough work space in %s: passed %d, required %d\n",
__func__, (int) lwork, (int) lwmin);
return -12;
}
if ( nb != 64 ) {
fprintf( stderr, "Error in %s: nb != 64, please reallocate matrix among GPUs\n", __func__ );
return MAGMA_ERR_ILLEGAL_VALUE;
}
if ( num_gpus == 1 ) {
magmablas_ssymv_work(uplo, n-offset, alpha, A[0] + offset + lda * offset, lda, x[0] + offset, incx, beta, y[0] + offset, incy, work[0], lwork);
}
else {
magma_int_t i = 0;
for(i=0; i < num_gpus; i++) {
magma_setdevice(i);
magma_int_t the_chosen_block_id = offset / 64;
magma_int_t the_chosen_gpu_id = the_chosen_block_id % num_gpus;
magma_int_t num_blocks_skipped = the_chosen_block_id / num_gpus;
if ( i < the_chosen_gpu_id ) {
num_blocks_skipped += 1;
}
int new_gpu_id = ( i + num_gpus - the_chosen_gpu_id ) % num_gpus;
magmablas_ssymv_fermi_L_mgpu_offset(n, alpha, A[i], lda, x[i], incx, beta, y[i], incy, work[i],
new_gpu_id, num_gpus, nb, offset, num_blocks_skipped);
}
}
}
return MAGMA_SUCCESS;
}
extern "C"
magma_int_t
magmablas_ssymv2_mgpu(
char uplo, magma_int_t n,
float alpha,
float **A, magma_int_t lda,
float **x, magma_int_t incx,
float beta,
float **y, magma_int_t incy,
float **work, magma_int_t lwork,
magma_int_t num_gpus,
magma_int_t nb)
{
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
// --------------------
// no CUDA ARCH 1.x version
fprintf( stderr, "%s not supported on CUDA arch 1.x", __func__ );
return MAGMA_ERR_NOT_SUPPORTED;
}
// --------------------
// CUDA ARCH 2.x (Fermi) version
char uplo_[2] = {uplo, 0};
int upper = lapackf77_lsame(uplo_, "U");
/*
* Test the input parameters.
*/
if ( (! upper) && (! lapackf77_lsame(uplo_, "L")) ) {
return -1;
} else if ( n < 0 ) {
return -2;
} else if ( lda < max(1,n) ) {
return -5;
} else if ( incx == 0 ) {
return -7;
} else if ( incy == 0 ) {
return -10;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_S_EQUAL(alpha, MAGMA_S_ZERO) && MAGMA_S_EQUAL(beta, MAGMA_S_ONE) ) )
return MAGMA_SUCCESS;
/* TODO: Upper case is not implemented in MAGMA */
if ( upper ) {
fprintf( stderr, "Upper case is not implemented on multi GPUs\n" );
return MAGMA_ERR_NOT_SUPPORTED;
}
else {
magma_int_t blocks = (n - 1)/thread_x + 1;
magma_int_t lwmin = lda * (blocks + 1);
if ( lwork < lwmin ) {
fprintf( stderr, "Not enough work space in %s: passed %d, required %d\n",
__func__, (int) lwork, (int) lwmin);
return -12;
}
if ( nb != 64 ) {
fprintf( stderr, "Error in %s: nb != 64, please reallocate matrix among GPUs\n", __func__ );
return MAGMA_ERR_ILLEGAL_VALUE;
}
magma_int_t i = 0;
for(i=0; i < num_gpus; i++) {
magma_setdevice(i);
magmablas_ssymv_fermi_L_mgpu_offset(n, alpha, A[i], lda, x[i], incx, beta, y[i], incy, work[i],
i, num_gpus, nb, 0, 0);
}
}
return MAGMA_SUCCESS;
}
|
46d6faabb66b792fc7b4ce45e12aed75e440f27b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*//*
*/
/** @file mlp-learning-an-image.cu
* @author Thomas Mller, NVIDIA
* @brief Sample application that uses the tiny cuda nn framework to learn a
2D function that represents an image.
*/
#include <tiny-cuda-nn/misc_kernels.h>
#include <tiny-cuda-nn/config.h>
#include "tinyexr_wrapper.h"
#include <chrono>
#include <cstdlib>
#include <fstream>
#include <iostream>
#include <random>
#include <stdexcept>
#include <string>
#include <thread>
#include <vector>
using namespace tcnn;
using precision_t = network_precision_t;
GPUMemory<float> load_image(const std::string& filename, int& width, int& height) {
float* out; // width * height * RGBA
load_exr(&out, &width, &height, filename.c_str());
GPUMemory<float> result(width * height * 4);
result.copy_from_host(out);
free(out); // release memory of image data
return result;
}
template <typename T>
void save_image(const T* image, int width, int height, int n_channels, int channel_stride, const std::string& filename) {
std::vector<T> host_data(width * height * n_channels);
CUDA_CHECK_THROW(hipMemcpy(host_data.data(), image, host_data.size()*sizeof(T), hipMemcpyDeviceToHost));
std::vector<float> float_host_data(host_data.size());
for (size_t i = 0; i < host_data.size(); ++i) {
float_host_data[i] = (float)host_data[i];
}
save_exr(float_host_data.data(), width, height, n_channels, channel_stride, filename.c_str());
}
template <uint32_t stride>
__global__ void eval_image(uint32_t n_elements, hipTextureObject_t texture, float* __restrict__ xs_and_ys, float* __restrict__ result) {
uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= n_elements) return;
uint32_t output_idx = i * stride;
uint32_t input_idx = i * 2;
float4 val = tex2D<float4>(texture, xs_and_ys[input_idx], xs_and_ys[input_idx+1]);
result[output_idx + 0] = val.x;
result[output_idx + 1] = val.y;
result[output_idx + 2] = val.z;
for (uint32_t i = 3; i < stride; ++i) {
result[output_idx + i] = 1;
}
}
int main(int argc, char* argv[]) {
if (!(__CUDACC_VER_MAJOR__ > 10 || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 2))) {
std::cout << "Turing Tensor Core operations must be compiled with CUDA 10.2 Toolkit or later." << std::endl;
return -1;
}
hipDeviceProp_t props;
hipError_t error = hipGetDeviceProperties(&props, 0);
if (error != hipSuccess) {
std::cout << "hipGetDeviceProperties() returned an error: " << hipGetErrorString(error) << std::endl;
return -1;
}
if (!((props.major * 10 + props.minor) >= 75)) {
std::cout << "Turing Tensor Core operations must be run on a machine with compute capability at least 75."
<< std::endl;
return -1;
}
if (argc < 2) {
std::cout << "USAGE: " << argv[0] << " " << "path-to-image.exr [path-to-optional-config.json]" << std::endl;
std::cout << "Sample EXR files are provided in 'data/images'." << std::endl;
return 0;
}
try {
json config = {
{"loss", {
{"otype", "RelativeL2"}
}},
{"optimizer", {
{"otype", "Adam"},
// {"otype", "Shampoo"},
{"learning_rate", 1e-2},
{"beta1", 0.9f},
{"beta2", 0.99f},
{"l2_reg", 0.0f},
// The following parameters are only used when the optimizer is "Shampoo".
{"beta3", 0.9f},
{"beta_shampoo", 0.0f},
{"identity", 0.0001f},
{"cg_on_momentum", false},
{"frobenius_normalization", true},
}},
{"encoding", {
{"otype", "OneBlob"},
{"n_bins", 32},
}},
{"network", {
{"otype", "FullyFusedMLP"},
// {"otype", "MLP"},
// {"otype", "ResNet"},
{"n_neurons", 64},
{"n_layers", 4},
{"activation", "ReLU"},
{"output_activation", "None"},
}},
};
if (argc >= 3) {
std::cout << "Loading custom json config '" << argv[2] << "'." << std::endl;
std::ifstream f{argv[2]};
config = json::parse(f, nullptr, true, /*skip_comments=*/true);
}
// First step: load an image that we'd like to learn
int width, height;
GPUMemory<float> image = load_image(argv[1], width, height);
// Second step: create a cuda texture out of this image. It'll be used to generate training data efficiently on the fly
hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = hipResourceTypePitch2D;
resDesc.res.pitch2D.devPtr = image.data();
resDesc.res.pitch2D.desc = hipCreateChannelDesc(32, 32, 32, 32, hipChannelFormatKindFloat);
resDesc.res.pitch2D.width = width;
resDesc.res.pitch2D.height = height;
resDesc.res.pitch2D.pitchInBytes = width * 4 * sizeof(float);
hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.filterMode = hipFilterModeLinear;
texDesc.normalizedCoords = true;
texDesc.addressMode[0] = hipAddressModeClamp;
texDesc.addressMode[1] = hipAddressModeClamp;
texDesc.addressMode[2] = hipAddressModeClamp;
hipResourceViewDesc viewDesc;
memset(&viewDesc, 0, sizeof(viewDesc));
viewDesc.format = hipResViewFormatFloat4;
viewDesc.width = width;
viewDesc.height = height;
hipTextureObject_t texture;
CUDA_CHECK_THROW(hipCreateTextureObject(&texture, &resDesc, &texDesc, &viewDesc));
// Third step: sample a reference image to dump to disk. Visual comparison of this reference image and the learned
// function will be eventually possible.
int sampling_width = width;
int sampling_height = height;
// Uncomment to fix the resolution of the training task independent of input image
// int sampling_width = 1024;
// int sampling_height = 1024;
uint32_t n_coords = sampling_width * sampling_height;
uint32_t n_coords_padded = (n_coords + 255) / 256 * 256;
GPUMemory<float> sampled_image(n_coords * 3);
GPUMemory<float> xs_and_ys(n_coords_padded * 2);
std::vector<float> host_xs_and_ys(n_coords * 2);
for (int y = 0; y < sampling_height; ++y) {
for (int x = 0; x < sampling_width; ++x) {
int idx = (y * sampling_width + x) * 2;
host_xs_and_ys[idx+0] = (float)(x + 0.5) / (float)sampling_width;
host_xs_and_ys[idx+1] = (float)(y + 0.5) / (float)sampling_height;
}
}
xs_and_ys.copy_from_host(host_xs_and_ys.data());
linear_kernel(eval_image<3>, 0, nullptr, n_coords, texture, xs_and_ys.data(), sampled_image.data());
save_image(sampled_image.data(), sampling_width, sampling_height, 3, 3, "reference.exr");
// Fourth step: train the model by sampling the above image and optimizing an error metric
// Various constants for the network and optimization
const uint32_t batch_size = 1 << 16;
const uint32_t n_training_steps = argc >= 4 ? atoi(argv[3]) : 10000000;
const uint32_t n_input_dims = 2; // 2-D image coordinate
const uint32_t n_output_dims = 3; // RGB color
// Input & corresponding RNG
hiprandGenerator_t rng;
CURAND_CHECK_THROW(hiprandCreateGenerator(&rng, HIPRAND_RNG_PSEUDO_DEFAULT));
CURAND_CHECK_THROW(hiprandSetPseudoRandomGeneratorSeed(rng, 1337ULL));
hipStream_t inference_stream;
CUDA_CHECK_THROW(hipStreamCreate(&inference_stream));
hipStream_t training_stream = inference_stream;
CURAND_CHECK_THROW(hiprandSetStream(rng, training_stream));
// Auxiliary matrices for training
GPUMatrix<float> training_target(n_output_dims, batch_size);
GPUMatrix<float> training_batch(n_input_dims, batch_size);
// Auxiliary matrices for evaluation
GPUMatrix<float> prediction(n_output_dims, n_coords_padded);
GPUMatrix<float> inference_batch(xs_and_ys.data(), n_input_dims, n_coords_padded);
json encoding_opts = config.value("encoding", json::object());
json loss_opts = config.value("loss", json::object());
json optimizer_opts = config.value("optimizer", json::object());
json network_opts = config.value("network", json::object());
std::shared_ptr<Loss<precision_t>> loss{create_loss<precision_t>(loss_opts)};
std::shared_ptr<Optimizer<precision_t>> optimizer{create_optimizer<precision_t>(optimizer_opts)};
std::shared_ptr<NetworkWithInputEncoding<precision_t>> network = std::make_shared<NetworkWithInputEncoding<precision_t>>(n_input_dims, 0, n_output_dims, encoding_opts, network_opts);
auto trainer = std::make_shared<Trainer<float, precision_t, precision_t>>(network, optimizer, loss);
std::chrono::steady_clock::time_point begin = std::chrono::steady_clock::now();
float tmp_loss = 0;
uint32_t tmp_loss_counter = 0;
std::cout << "Beginning optimization with " << n_training_steps << " training steps." << std::endl;
for (uint32_t i = 0; i < n_training_steps; ++i) {
bool print_loss = i % 1000 == 0;
bool visualize_learned_func = argc < 5 && i % 1000 == 0;
// Compute reference values at random coordinates
{
CURAND_CHECK_THROW(hiprandGenerateUniform(rng, training_batch.data(), batch_size * n_input_dims));
linear_kernel(eval_image<n_output_dims>, 0, training_stream, batch_size, texture, training_batch.data(), training_target.data());
}
// Training step
float loss_value;
{
trainer->training_step(training_stream, training_batch, training_target, &loss_value);
}
tmp_loss += loss_value;
++tmp_loss_counter;
// Debug outputs
{
if (print_loss) {
std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now();
std::cout << "Step#" << i << ": " << "loss=" << tmp_loss/(float)tmp_loss_counter << " time=" << std::chrono::duration_cast<std::chrono::microseconds>(end - begin).count() << "[s]" << std::endl;
tmp_loss = 0;
tmp_loss_counter = 0;
}
if (visualize_learned_func) {
network->inference(inference_stream, inference_batch, prediction);
save_image(prediction.data(), sampling_width, sampling_height, 3, n_output_dims, std::to_string(i) + ".exr");
}
// Don't count visualizing as part of timing
// (assumes visualize_learned_pdf is only true when print_loss is true)
if (print_loss) {
begin = std::chrono::steady_clock::now();
}
}
}
// Dump final image if a name was specified
if (argc >= 5) {
network->inference(inference_stream, inference_batch, prediction);
save_image(prediction.data(), sampling_width, sampling_height, 3, n_output_dims, argv[4]);
}
} catch (std::exception& e) {
std::cout << "Uncaught exception: " << e.what() << std::endl;
}
return EXIT_SUCCESS;
}
| 46d6faabb66b792fc7b4ce45e12aed75e440f27b.cu | /*
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*//*
*/
/** @file mlp-learning-an-image.cu
* @author Thomas Müller, NVIDIA
* @brief Sample application that uses the tiny cuda nn framework to learn a
2D function that represents an image.
*/
#include <tiny-cuda-nn/misc_kernels.h>
#include <tiny-cuda-nn/config.h>
#include "tinyexr_wrapper.h"
#include <chrono>
#include <cstdlib>
#include <fstream>
#include <iostream>
#include <random>
#include <stdexcept>
#include <string>
#include <thread>
#include <vector>
using namespace tcnn;
using precision_t = network_precision_t;
GPUMemory<float> load_image(const std::string& filename, int& width, int& height) {
float* out; // width * height * RGBA
load_exr(&out, &width, &height, filename.c_str());
GPUMemory<float> result(width * height * 4);
result.copy_from_host(out);
free(out); // release memory of image data
return result;
}
template <typename T>
void save_image(const T* image, int width, int height, int n_channels, int channel_stride, const std::string& filename) {
std::vector<T> host_data(width * height * n_channels);
CUDA_CHECK_THROW(cudaMemcpy(host_data.data(), image, host_data.size()*sizeof(T), cudaMemcpyDeviceToHost));
std::vector<float> float_host_data(host_data.size());
for (size_t i = 0; i < host_data.size(); ++i) {
float_host_data[i] = (float)host_data[i];
}
save_exr(float_host_data.data(), width, height, n_channels, channel_stride, filename.c_str());
}
template <uint32_t stride>
__global__ void eval_image(uint32_t n_elements, cudaTextureObject_t texture, float* __restrict__ xs_and_ys, float* __restrict__ result) {
uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= n_elements) return;
uint32_t output_idx = i * stride;
uint32_t input_idx = i * 2;
float4 val = tex2D<float4>(texture, xs_and_ys[input_idx], xs_and_ys[input_idx+1]);
result[output_idx + 0] = val.x;
result[output_idx + 1] = val.y;
result[output_idx + 2] = val.z;
for (uint32_t i = 3; i < stride; ++i) {
result[output_idx + i] = 1;
}
}
int main(int argc, char* argv[]) {
if (!(__CUDACC_VER_MAJOR__ > 10 || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 2))) {
std::cout << "Turing Tensor Core operations must be compiled with CUDA 10.2 Toolkit or later." << std::endl;
return -1;
}
cudaDeviceProp props;
cudaError_t error = cudaGetDeviceProperties(&props, 0);
if (error != cudaSuccess) {
std::cout << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl;
return -1;
}
if (!((props.major * 10 + props.minor) >= 75)) {
std::cout << "Turing Tensor Core operations must be run on a machine with compute capability at least 75."
<< std::endl;
return -1;
}
if (argc < 2) {
std::cout << "USAGE: " << argv[0] << " " << "path-to-image.exr [path-to-optional-config.json]" << std::endl;
std::cout << "Sample EXR files are provided in 'data/images'." << std::endl;
return 0;
}
try {
json config = {
{"loss", {
{"otype", "RelativeL2"}
}},
{"optimizer", {
{"otype", "Adam"},
// {"otype", "Shampoo"},
{"learning_rate", 1e-2},
{"beta1", 0.9f},
{"beta2", 0.99f},
{"l2_reg", 0.0f},
// The following parameters are only used when the optimizer is "Shampoo".
{"beta3", 0.9f},
{"beta_shampoo", 0.0f},
{"identity", 0.0001f},
{"cg_on_momentum", false},
{"frobenius_normalization", true},
}},
{"encoding", {
{"otype", "OneBlob"},
{"n_bins", 32},
}},
{"network", {
{"otype", "FullyFusedMLP"},
// {"otype", "MLP"},
// {"otype", "ResNet"},
{"n_neurons", 64},
{"n_layers", 4},
{"activation", "ReLU"},
{"output_activation", "None"},
}},
};
if (argc >= 3) {
std::cout << "Loading custom json config '" << argv[2] << "'." << std::endl;
std::ifstream f{argv[2]};
config = json::parse(f, nullptr, true, /*skip_comments=*/true);
}
// First step: load an image that we'd like to learn
int width, height;
GPUMemory<float> image = load_image(argv[1], width, height);
// Second step: create a cuda texture out of this image. It'll be used to generate training data efficiently on the fly
cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypePitch2D;
resDesc.res.pitch2D.devPtr = image.data();
resDesc.res.pitch2D.desc = cudaCreateChannelDesc(32, 32, 32, 32, cudaChannelFormatKindFloat);
resDesc.res.pitch2D.width = width;
resDesc.res.pitch2D.height = height;
resDesc.res.pitch2D.pitchInBytes = width * 4 * sizeof(float);
cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.filterMode = cudaFilterModeLinear;
texDesc.normalizedCoords = true;
texDesc.addressMode[0] = cudaAddressModeClamp;
texDesc.addressMode[1] = cudaAddressModeClamp;
texDesc.addressMode[2] = cudaAddressModeClamp;
cudaResourceViewDesc viewDesc;
memset(&viewDesc, 0, sizeof(viewDesc));
viewDesc.format = cudaResViewFormatFloat4;
viewDesc.width = width;
viewDesc.height = height;
cudaTextureObject_t texture;
CUDA_CHECK_THROW(cudaCreateTextureObject(&texture, &resDesc, &texDesc, &viewDesc));
// Third step: sample a reference image to dump to disk. Visual comparison of this reference image and the learned
// function will be eventually possible.
int sampling_width = width;
int sampling_height = height;
// Uncomment to fix the resolution of the training task independent of input image
// int sampling_width = 1024;
// int sampling_height = 1024;
uint32_t n_coords = sampling_width * sampling_height;
uint32_t n_coords_padded = (n_coords + 255) / 256 * 256;
GPUMemory<float> sampled_image(n_coords * 3);
GPUMemory<float> xs_and_ys(n_coords_padded * 2);
std::vector<float> host_xs_and_ys(n_coords * 2);
for (int y = 0; y < sampling_height; ++y) {
for (int x = 0; x < sampling_width; ++x) {
int idx = (y * sampling_width + x) * 2;
host_xs_and_ys[idx+0] = (float)(x + 0.5) / (float)sampling_width;
host_xs_and_ys[idx+1] = (float)(y + 0.5) / (float)sampling_height;
}
}
xs_and_ys.copy_from_host(host_xs_and_ys.data());
linear_kernel(eval_image<3>, 0, nullptr, n_coords, texture, xs_and_ys.data(), sampled_image.data());
save_image(sampled_image.data(), sampling_width, sampling_height, 3, 3, "reference.exr");
// Fourth step: train the model by sampling the above image and optimizing an error metric
// Various constants for the network and optimization
const uint32_t batch_size = 1 << 16;
const uint32_t n_training_steps = argc >= 4 ? atoi(argv[3]) : 10000000;
const uint32_t n_input_dims = 2; // 2-D image coordinate
const uint32_t n_output_dims = 3; // RGB color
// Input & corresponding RNG
curandGenerator_t rng;
CURAND_CHECK_THROW(curandCreateGenerator(&rng, CURAND_RNG_PSEUDO_DEFAULT));
CURAND_CHECK_THROW(curandSetPseudoRandomGeneratorSeed(rng, 1337ULL));
cudaStream_t inference_stream;
CUDA_CHECK_THROW(cudaStreamCreate(&inference_stream));
cudaStream_t training_stream = inference_stream;
CURAND_CHECK_THROW(curandSetStream(rng, training_stream));
// Auxiliary matrices for training
GPUMatrix<float> training_target(n_output_dims, batch_size);
GPUMatrix<float> training_batch(n_input_dims, batch_size);
// Auxiliary matrices for evaluation
GPUMatrix<float> prediction(n_output_dims, n_coords_padded);
GPUMatrix<float> inference_batch(xs_and_ys.data(), n_input_dims, n_coords_padded);
json encoding_opts = config.value("encoding", json::object());
json loss_opts = config.value("loss", json::object());
json optimizer_opts = config.value("optimizer", json::object());
json network_opts = config.value("network", json::object());
std::shared_ptr<Loss<precision_t>> loss{create_loss<precision_t>(loss_opts)};
std::shared_ptr<Optimizer<precision_t>> optimizer{create_optimizer<precision_t>(optimizer_opts)};
std::shared_ptr<NetworkWithInputEncoding<precision_t>> network = std::make_shared<NetworkWithInputEncoding<precision_t>>(n_input_dims, 0, n_output_dims, encoding_opts, network_opts);
auto trainer = std::make_shared<Trainer<float, precision_t, precision_t>>(network, optimizer, loss);
std::chrono::steady_clock::time_point begin = std::chrono::steady_clock::now();
float tmp_loss = 0;
uint32_t tmp_loss_counter = 0;
std::cout << "Beginning optimization with " << n_training_steps << " training steps." << std::endl;
for (uint32_t i = 0; i < n_training_steps; ++i) {
bool print_loss = i % 1000 == 0;
bool visualize_learned_func = argc < 5 && i % 1000 == 0;
// Compute reference values at random coordinates
{
CURAND_CHECK_THROW(curandGenerateUniform(rng, training_batch.data(), batch_size * n_input_dims));
linear_kernel(eval_image<n_output_dims>, 0, training_stream, batch_size, texture, training_batch.data(), training_target.data());
}
// Training step
float loss_value;
{
trainer->training_step(training_stream, training_batch, training_target, &loss_value);
}
tmp_loss += loss_value;
++tmp_loss_counter;
// Debug outputs
{
if (print_loss) {
std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now();
std::cout << "Step#" << i << ": " << "loss=" << tmp_loss/(float)tmp_loss_counter << " time=" << std::chrono::duration_cast<std::chrono::microseconds>(end - begin).count() << "[µs]" << std::endl;
tmp_loss = 0;
tmp_loss_counter = 0;
}
if (visualize_learned_func) {
network->inference(inference_stream, inference_batch, prediction);
save_image(prediction.data(), sampling_width, sampling_height, 3, n_output_dims, std::to_string(i) + ".exr");
}
// Don't count visualizing as part of timing
// (assumes visualize_learned_pdf is only true when print_loss is true)
if (print_loss) {
begin = std::chrono::steady_clock::now();
}
}
}
// Dump final image if a name was specified
if (argc >= 5) {
network->inference(inference_stream, inference_batch, prediction);
save_image(prediction.data(), sampling_width, sampling_height, 3, n_output_dims, argv[4]);
}
} catch (std::exception& e) {
std::cout << "Uncaught exception: " << e.what() << std::endl;
}
return EXIT_SUCCESS;
}
|
6772c0007065a2eef6b6f69a6cb561b8fcb59497.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#include "../common/book.h"
__global__ void add( int a, int b, int *c ) {
*c = a + b;
}
int main( void ) {
int c;
int *dev_c;
HANDLE_ERROR( hipMalloc( (void**)&dev_c, sizeof(int) ) );
hipLaunchKernelGGL(( add), dim3(1),dim3(1), 0, 0, 2, 7, dev_c );
HANDLE_ERROR( hipMemcpy( &c, dev_c, sizeof(int),
hipMemcpyDeviceToHost ) );
printf( "2 + 7 = %d\n", c );
HANDLE_ERROR( hipFree( dev_c ) );
return 0;
}
| 6772c0007065a2eef6b6f69a6cb561b8fcb59497.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#include "../common/book.h"
__global__ void add( int a, int b, int *c ) {
*c = a + b;
}
int main( void ) {
int c;
int *dev_c;
HANDLE_ERROR( cudaMalloc( (void**)&dev_c, sizeof(int) ) );
add<<<1,1>>>( 2, 7, dev_c );
HANDLE_ERROR( cudaMemcpy( &c, dev_c, sizeof(int),
cudaMemcpyDeviceToHost ) );
printf( "2 + 7 = %d\n", c );
HANDLE_ERROR( cudaFree( dev_c ) );
return 0;
}
|
74ff932b8c174f34238d56ef019820045314b373.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright (c) 2012-2013 The Ohio State University.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <stdio.h>
#include <string.h>
#include <time.h>
#include <hip/hip_runtime.h>
#include "../include/common.h"
#include "../include/schema.h"
#include "../include/gpuCudaLib.h"
#ifdef HAS_GMM
#include "gmm.h"
#endif
#define CHECK_POINTER(p) do { \
if(p == NULL){ \
perror("Failed to allocate host memory"); \
exit(-1); \
}} while(0)
__global__ static void materialize(char ** content, int colNum, int *attrSize, long tupleNum, int tupleSize, char *result){
int startIndex = blockIdx.x*blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(long i=startIndex;i<tupleNum;i+=stride){
int offset = 0;
for(int j=0;j<colNum;j++){
int aSize = attrSize[j];
memcpy(result+i*tupleSize + offset, content[j]+ i*aSize, aSize);
offset += aSize;
}
}
}
char * materializeCol(struct materializeNode * mn, struct statistic * pp){
struct timespec start,end;
clock_gettime(CLOCK_REALTIME,&start);
struct tableNode *tn = mn->table;
char * res, * gpuResult;
char **gpuContent, **column;
long size = tn->tupleNum * tn->tupleSize;
int * gpuAttrSize;
column = (char **) malloc(sizeof(char *) * tn->totalAttr);
CHECK_POINTER(column);
#ifdef HAS_GMM
CUDA_SAFE_CALL_NO_SYNC(cudaMallocEx((void **)&gpuContent, sizeof(char *) * tn->totalAttr, HINT_PTARRAY));
#else
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void **)&gpuContent, sizeof(char *) * tn->totalAttr));
#endif
res = (char *) malloc(size);
CHECK_POINTER(res);
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void **)&gpuResult, size));
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void **)&gpuAttrSize,sizeof(int) * tn->totalAttr));
for(int i=0;i<tn->totalAttr;i++){
if(tn->dataPos[i] == MEM){
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void**)&column[i], tn->tupleNum*tn->attrSize[i]));
CUDA_SAFE_CALL_NO_SYNC(hipMemcpy(column[i], tn->content[i], tn->tupleNum *tn->attrSize[i], hipMemcpyHostToDevice));
CUDA_SAFE_CALL_NO_SYNC(hipMemcpy(&gpuContent[i], &column[i], sizeof(char *), hipMemcpyHostToDevice));
}else if(tn->dataPos[i] == GPU){
CUDA_SAFE_CALL_NO_SYNC(hipMemcpy(&gpuContent[i], &tn->content[i], sizeof(char *), hipMemcpyHostToDevice));
}
}
CUDA_SAFE_CALL_NO_SYNC(hipMemcpy(gpuAttrSize, tn->attrSize, sizeof(int) * tn->totalAttr, hipMemcpyHostToDevice));
dim3 grid(512);
dim3 block(128);
GMM_CALL(cudaReference(0, HINT_READ|HINT_PTARRAY|HINT_PTAREAD));
GMM_CALL(cudaReference(2, HINT_READ));
GMM_CALL(cudaReference(5, HINT_WRITE));
hipLaunchKernelGGL(( materialize), dim3(grid),dim3(block), 0, 0, gpuContent, tn->totalAttr, gpuAttrSize, tn->tupleNum, tn->tupleSize, gpuResult);
CUDA_SAFE_CALL_NO_SYNC(hipMemcpy(res, gpuResult, size, hipMemcpyDeviceToHost));
for(int i=0;i<tn->totalAttr;i++){
if(tn->dataPos[i] == MEM){
CUDA_SAFE_CALL_NO_SYNC(hipFree(column[i]));
}
}
free(column);
CUDA_SAFE_CALL_NO_SYNC(hipFree(gpuContent));
CUDA_SAFE_CALL_NO_SYNC(hipFree(gpuAttrSize));
CUDA_SAFE_CALL_NO_SYNC(hipFree(gpuResult));
clock_gettime(CLOCK_REALTIME,&end);
double timeE = (end.tv_sec - start.tv_sec)* BILLION + end.tv_nsec - start.tv_nsec;
printf("Materialization Time: %lf\n", timeE/(1000*1000));
return res;
}
| 74ff932b8c174f34238d56ef019820045314b373.cu | /*
Copyright (c) 2012-2013 The Ohio State University.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <stdio.h>
#include <string.h>
#include <time.h>
#include <cuda.h>
#include "../include/common.h"
#include "../include/schema.h"
#include "../include/gpuCudaLib.h"
#ifdef HAS_GMM
#include "gmm.h"
#endif
#define CHECK_POINTER(p) do { \
if(p == NULL){ \
perror("Failed to allocate host memory"); \
exit(-1); \
}} while(0)
__global__ static void materialize(char ** content, int colNum, int *attrSize, long tupleNum, int tupleSize, char *result){
int startIndex = blockIdx.x*blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(long i=startIndex;i<tupleNum;i+=stride){
int offset = 0;
for(int j=0;j<colNum;j++){
int aSize = attrSize[j];
memcpy(result+i*tupleSize + offset, content[j]+ i*aSize, aSize);
offset += aSize;
}
}
}
char * materializeCol(struct materializeNode * mn, struct statistic * pp){
struct timespec start,end;
clock_gettime(CLOCK_REALTIME,&start);
struct tableNode *tn = mn->table;
char * res, * gpuResult;
char **gpuContent, **column;
long size = tn->tupleNum * tn->tupleSize;
int * gpuAttrSize;
column = (char **) malloc(sizeof(char *) * tn->totalAttr);
CHECK_POINTER(column);
#ifdef HAS_GMM
CUDA_SAFE_CALL_NO_SYNC(cudaMallocEx((void **)&gpuContent, sizeof(char *) * tn->totalAttr, HINT_PTARRAY));
#else
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&gpuContent, sizeof(char *) * tn->totalAttr));
#endif
res = (char *) malloc(size);
CHECK_POINTER(res);
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&gpuResult, size));
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&gpuAttrSize,sizeof(int) * tn->totalAttr));
for(int i=0;i<tn->totalAttr;i++){
if(tn->dataPos[i] == MEM){
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void**)&column[i], tn->tupleNum*tn->attrSize[i]));
CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(column[i], tn->content[i], tn->tupleNum *tn->attrSize[i], cudaMemcpyHostToDevice));
CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(&gpuContent[i], &column[i], sizeof(char *), cudaMemcpyHostToDevice));
}else if(tn->dataPos[i] == GPU){
CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(&gpuContent[i], &tn->content[i], sizeof(char *), cudaMemcpyHostToDevice));
}
}
CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(gpuAttrSize, tn->attrSize, sizeof(int) * tn->totalAttr, cudaMemcpyHostToDevice));
dim3 grid(512);
dim3 block(128);
GMM_CALL(cudaReference(0, HINT_READ|HINT_PTARRAY|HINT_PTAREAD));
GMM_CALL(cudaReference(2, HINT_READ));
GMM_CALL(cudaReference(5, HINT_WRITE));
materialize<<<grid,block>>> (gpuContent, tn->totalAttr, gpuAttrSize, tn->tupleNum, tn->tupleSize, gpuResult);
CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(res, gpuResult, size, cudaMemcpyDeviceToHost));
for(int i=0;i<tn->totalAttr;i++){
if(tn->dataPos[i] == MEM){
CUDA_SAFE_CALL_NO_SYNC(cudaFree(column[i]));
}
}
free(column);
CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuContent));
CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuAttrSize));
CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuResult));
clock_gettime(CLOCK_REALTIME,&end);
double timeE = (end.tv_sec - start.tv_sec)* BILLION + end.tv_nsec - start.tv_nsec;
printf("Materialization Time: %lf\n", timeE/(1000*1000));
return res;
}
|
e79b44ca58d646822e0987a6353bcbff7ab31d54.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/arange_kernel.h"
#include "paddle/phi/kernels/copy_kernel.h"
#include "paddle/phi/kernels/funcs/range_function.h"
namespace phi {
template <typename T, typename Context>
inline T GetValue(const Context& dev_ctx, const DenseTensor& x) {
T value = static_cast<T>(0);
if (x.place() != CPUPlace()) {
DenseTensor cpu_x;
Copy(dev_ctx, x, CPUPlace(), true, &cpu_x);
value = cpu_x.data<T>()[0];
} else {
value = x.data<T>()[0];
}
return value;
}
template <typename T>
__global__ void Range(T start, T step, int64_t size, T* out) {
CUDA_KERNEL_LOOP(index, size) { out[index] = start + step * index; }
}
template <typename T, typename Context>
void ArangeKernel(const Context& dev_ctx,
const DenseTensor& start,
const DenseTensor& end,
const DenseTensor& step,
DenseTensor* out) {
T start_value = GetValue<T, Context>(dev_ctx, start);
T end_value = GetValue<T, Context>(dev_ctx, end);
T step_value = GetValue<T, Context>(dev_ctx, step);
int64_t size = 0;
phi::funcs::GetSize(start_value, end_value, step_value, &size);
out->Resize(phi::make_ddim({size}));
T* out_data = dev_ctx.template Alloc<T>(out);
auto stream = dev_ctx.stream();
int block = ::min(size, static_cast<int64_t>(256));
int grid = (size + block - 1) / block;
hipLaunchKernelGGL(( Range<T>), dim3(grid), dim3(block), 0, stream, start_value, step_value, size, out_data);
}
} // namespace phi
PD_REGISTER_KERNEL(
arange, GPU, ALL_LAYOUT, phi::ArangeKernel, float, double, int64_t, int) {
kernel->InputAt(0).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(1).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(2).SetBackend(phi::Backend::ALL_BACKEND);
}
| e79b44ca58d646822e0987a6353bcbff7ab31d54.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/arange_kernel.h"
#include "paddle/phi/kernels/copy_kernel.h"
#include "paddle/phi/kernels/funcs/range_function.h"
namespace phi {
template <typename T, typename Context>
inline T GetValue(const Context& dev_ctx, const DenseTensor& x) {
T value = static_cast<T>(0);
if (x.place() != CPUPlace()) {
DenseTensor cpu_x;
Copy(dev_ctx, x, CPUPlace(), true, &cpu_x);
value = cpu_x.data<T>()[0];
} else {
value = x.data<T>()[0];
}
return value;
}
template <typename T>
__global__ void Range(T start, T step, int64_t size, T* out) {
CUDA_KERNEL_LOOP(index, size) { out[index] = start + step * index; }
}
template <typename T, typename Context>
void ArangeKernel(const Context& dev_ctx,
const DenseTensor& start,
const DenseTensor& end,
const DenseTensor& step,
DenseTensor* out) {
T start_value = GetValue<T, Context>(dev_ctx, start);
T end_value = GetValue<T, Context>(dev_ctx, end);
T step_value = GetValue<T, Context>(dev_ctx, step);
int64_t size = 0;
phi::funcs::GetSize(start_value, end_value, step_value, &size);
out->Resize(phi::make_ddim({size}));
T* out_data = dev_ctx.template Alloc<T>(out);
auto stream = dev_ctx.stream();
int block = std::min(size, static_cast<int64_t>(256));
int grid = (size + block - 1) / block;
Range<T><<<grid, block, 0, stream>>>(start_value, step_value, size, out_data);
}
} // namespace phi
PD_REGISTER_KERNEL(
arange, GPU, ALL_LAYOUT, phi::ArangeKernel, float, double, int64_t, int) {
kernel->InputAt(0).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(1).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(2).SetBackend(phi::Backend::ALL_BACKEND);
}
|
502439691f8d3ec84e06a97e99535d0ed8985ec8.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "cu_drelu.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *src = NULL;
hipMalloc(&src, XSIZE*YSIZE);
float *dst = NULL;
hipMalloc(&dst, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
cu_drelu), dim3(gridBlock),dim3(threadBlock), 0, 0, src,dst,n);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
cu_drelu), dim3(gridBlock),dim3(threadBlock), 0, 0, src,dst,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
cu_drelu), dim3(gridBlock),dim3(threadBlock), 0, 0, src,dst,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 502439691f8d3ec84e06a97e99535d0ed8985ec8.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "cu_drelu.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *src = NULL;
cudaMalloc(&src, XSIZE*YSIZE);
float *dst = NULL;
cudaMalloc(&dst, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
cu_drelu<<<gridBlock,threadBlock>>>(src,dst,n);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
cu_drelu<<<gridBlock,threadBlock>>>(src,dst,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
cu_drelu<<<gridBlock,threadBlock>>>(src,dst,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
b1b08d7ed5ba8e1a363f696ccdec5a0127d7c861.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <ops/declarable/helpers/dynamic.h>
#include <helpers/PointersManager.h>
#include <helpers/ConstantTadHelper.h>
namespace sd {
namespace ops {
namespace helpers {
template <typename X, typename Y>
static _CUDA_G void dynamicPartitionScalarKernel(const void *vx, const Nd4jLong *xShapeInfo, const void *vi, const Nd4jLong *iShapeInfo, void **vz, Nd4jLong **zShapeInfos, const Nd4jLong numOutputs) {
auto x = reinterpret_cast<const X*>(vx);
auto i = reinterpret_cast<const Y*>(vi);
auto xLength = shape::length(xShapeInfo);
auto iLength = shape::length(iShapeInfo);
extern __shared__ char shmem[];
__shared__ Y *rawIndices;
__shared__ Y *trueIndices;
if (threadIdx.x == 0) {
rawIndices = reinterpret_cast<Y*>(shmem);
trueIndices = rawIndices + blockDim.x;
}
__syncthreads();
// we run things in blocks, 1 partition per block of threads
for (Nd4jLong o = blockIdx.x; o < numOutputs; o += gridDim.x) {
auto z = reinterpret_cast<X*>(vz[o]);
auto zShapeInfo = zShapeInfos[o];
auto zLength = shape::length(zShapeInfo);
// iLimit should be multiple of blockDim.x
auto iLimit = iLength <= blockDim.x ? blockDim.x : (iLength + (blockDim.x - (iLength % blockDim.x)));
int cnt = 0;
for (Nd4jLong e = threadIdx.x; e < iLimit; e += blockDim.x) {
// load set of indices into shared memory
if (e < iLength)
rawIndices[threadIdx.x] = i[shape::getIndexOffset(e, iShapeInfo)];
__syncthreads();
// now we need to find out where our actual updates will be mapped
// TODO: this can be improved obviously, by using prefix-sum like approach
if (threadIdx.x == 0) {
for (int f = 0; f < blockDim.x; f++) {
if (rawIndices[f] == static_cast<Y>(o))
trueIndices[f] = cnt++;
else
trueIndices[f] = -1;
}
}
__syncthreads();
// doing actual update
if (e < iLength)
if (trueIndices[threadIdx.x] >= 0) {
z[trueIndices[threadIdx.x]] = x[shape::getIndexOffset(e, xShapeInfo)];
}
__syncthreads();
}
}
}
template <typename X, typename Y>
static _CUDA_G void dynamicPartitionTadKernel(const void *vx, const Nd4jLong *xTadShapeInfo, const Nd4jLong *xTadOffsets, Nd4jLong xLength, const void *vindices, const Nd4jLong *iShapeInfo, Nd4jLong iLength, void **vz, Nd4jLong **zTadShapeInfos, Nd4jLong **zTadOffsets, Nd4jLong numOutputs) {
auto x = reinterpret_cast<const X*>(vx);
auto indices = reinterpret_cast<const Y*>(vindices);
// we run things in blocks, 1 partition per block of threads
for (int i = blockIdx.x; i < numOutputs; i += gridDim.x) {
auto z = reinterpret_cast<X*>(vz[i]);
// each thread has own counter for partitions
int outCnt = 0;
for (Nd4jLong e = 0; e < iLength; e++) {
if (indices[shape::getIndexOffset(e, iShapeInfo)] == i) {
auto dx = x + xTadOffsets[e];
auto dz = z + zTadOffsets[i][outCnt++];
for (int f = threadIdx.x; f < xLength; f += blockDim.x) {
dz[shape::getIndexOffset(f, zTadShapeInfos[i])] = dx[shape::getIndexOffset(f, xTadShapeInfo)];
}
}
}
}
}
template <typename X, typename Y>
static void _dynamicPartitionFunctor(sd::LaunchContext * context, NDArray const* input, NDArray const* indices, std::vector<NDArray*>& outputList) {
std::vector<std::pair<NDArray *, int>> outputs(outputList.size());
int sourceDimsLen = input->rankOf() - indices->rankOf();
unsigned int outSize = outputList.size();
PointersManager pm(context, "dynamicPartition");
if (sourceDimsLen) { // non-linear case
std::vector<int> sourceDims(sourceDimsLen);
for (int i = sourceDimsLen; i > 0; i--)
sourceDims[sourceDimsLen - i] = input->rankOf() - i;
//compute tad array for given dimensions
auto packX = ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), sourceDims);
std::vector<void *> outBuffers(outSize);
std::vector<const Nd4jLong *> tadShapes(outSize);
std::vector<const Nd4jLong *> tadOffsets(outSize);
std::vector<Nd4jLong> numTads(outSize);
// fill up dimensions array for before kernel
for (unsigned int i = 0; i < outSize; i++) {
outputs[i].first = outputList[i];
std::vector<int> outDims(outputs[i].first->rankOf() - 1);
int r = outputs[i].first->rankOf();
for (int k = 1; k < r; k++)
outDims[k - 1] = k;
auto packZ = ConstantTadHelper::getInstance().tadForDimensions(outputList.at(i)->shapeInfo(), outDims);
outBuffers[i] = outputList.at(i)->specialBuffer();
tadShapes[i] = packZ.platformShapeInfo();
tadOffsets[i] = packZ.platformOffsets();
}
// we copy pointers to device
auto dOutBuffers = reinterpret_cast<void **>(pm.replicatePointer(outBuffers.data(), outBuffers.size() * sizeof(void *)));
auto dOutTadShapes = reinterpret_cast<Nd4jLong **>(pm.replicatePointer(tadShapes.data(), tadShapes.size() * sizeof(Nd4jLong *)));
auto dOutTadOffsets = reinterpret_cast<Nd4jLong **>(pm.replicatePointer(tadOffsets.data(), tadOffsets.size() * sizeof(Nd4jLong *)));
// run kernel on device
hipLaunchKernelGGL(( dynamicPartitionTadKernel<X,Y>), dim3(256), dim3(256), 1024, *context->getCudaStream(), input->specialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), shape::length(packX.primaryShapeInfo()), indices->specialBuffer(), indices->specialShapeInfo(), indices->lengthOf(), dOutBuffers, dOutTadShapes, dOutTadOffsets, outSize);
} else { // linear case
auto numThreads = 256;
auto shmemSize = numThreads * sizeof(Y) * 2 + 1024;
std::vector<void *> outBuffers;
std::vector<const Nd4jLong *> outShapes;
for (auto v:outputList) {
outBuffers.emplace_back(v->specialBuffer());
outShapes.emplace_back(v->specialShapeInfo());
}
auto dOutBuffers = reinterpret_cast<void **>(pm.replicatePointer(outBuffers.data(), outBuffers.size() * sizeof(void *)));
auto dOutShapes = reinterpret_cast<Nd4jLong **>(pm.replicatePointer(outShapes.data(), outShapes.size() * sizeof(Nd4jLong *)));
hipLaunchKernelGGL(( dynamicPartitionScalarKernel<X,Y>), dim3(256), dim3(numThreads), shmemSize, *context->getCudaStream(), input->specialBuffer(), input->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), dOutBuffers, dOutShapes, outSize);
}
pm.synchronize();
}
template <typename X, typename Y>
static _CUDA_G void dynamicStitchScalarKernel(void **vx, Nd4jLong **xShapeInfos, void **vindices, Nd4jLong **iShapeInfos, int inputSize, void *vz, const Nd4jLong *zShapeInfo, Nd4jLong zLength) {
auto z = reinterpret_cast<X*>(vz);
for (int e = blockIdx.x; e < inputSize; e += gridDim.x) {
auto x = reinterpret_cast<X*>(vx[e]);
auto indices = reinterpret_cast<Y*>(vindices[e]);
auto xShapeInfo = xShapeInfos[e];
auto iShapeInfo = iShapeInfos[e];
auto iLength = shape::length(iShapeInfo);
for (int i = threadIdx.x; i < iLength; i += blockDim.x) {
auto idx = indices[shape::getIndexOffset(i, iShapeInfo)];
if (idx >= 0 && idx < zLength)
z[shape::getIndexOffset(idx, zShapeInfo)] = x[shape::getIndexOffset(i, xShapeInfo)];
}
}
}
template <typename X, typename Y>
static _CUDA_G void dynamicStitchTadKernel(void **vx, Nd4jLong **xTadShapeInfos, Nd4jLong **xTadOffsets, void **vindices, Nd4jLong **iShapeInfos, int inputSize, void *vz, const Nd4jLong *zTadShapeInfo, const Nd4jLong *zTadOffsets) {
auto bz = reinterpret_cast<X*>(vz);
for (int e = blockIdx.x; e < inputSize; e += gridDim.x) {
auto indices = reinterpret_cast<Y*>(vindices[e]);
auto iShapeInfo = iShapeInfos[e];
if (shape::isEmpty(iShapeInfo))
continue;
auto iLength = shape::length(iShapeInfo);
auto zLength = shape::length(zTadShapeInfo);
auto xShapeInfo = xTadShapeInfos[e];
auto xLength = shape::length(xShapeInfo);
for (int i = 0; i < iLength; i++) {
auto idx = indices[shape::getIndexOffset(i, iShapeInfo)];
auto z = bz + zTadOffsets[idx];
auto x = reinterpret_cast<X*>(vx[e]) + xTadOffsets[e][i];
for (int f = threadIdx.x; f < zLength; f += blockDim.x) {
z[shape::getIndexOffset(f, zTadShapeInfo)] = x[shape::getIndexOffset(f, xShapeInfo)];
}
__syncthreads();
}
}
}
template <typename X, typename Y>
static int _dynamicStitchFunctor(sd::LaunchContext * context, std::vector<NDArray*> const& inputs, std::vector<NDArray*> const& indices, NDArray* output){
int inputSize = inputs.size();
PointersManager pm(context, "dynamicStitch");
if (output->isVector()) {
std::vector<const void *> inputBuffers(inputSize);
std::vector<const Nd4jLong *> inputShapes(inputSize);
std::vector<const void *> indicesBuffers(inputSize);
std::vector<const Nd4jLong *> indicesShapes(inputSize);
for (int e = 0; e < inputSize; e++) {
inputBuffers[e] = inputs.at(e)->specialBuffer();
indicesBuffers[e] = indices.at(e)->specialBuffer();
inputShapes[e] = inputs.at(e)->specialShapeInfo();
indicesShapes[e] = indices.at(e)->specialShapeInfo();
}
// copying pointers to buffers to device
auto dInputBuffers = reinterpret_cast<void **>(pm.replicatePointer(inputBuffers.data(), inputSize * sizeof(void *)));
auto dIndicesBuffers = reinterpret_cast<void **>(pm.replicatePointer(indicesBuffers.data(), inputSize * sizeof(void *)));
auto dInputShapes = reinterpret_cast<Nd4jLong **>(pm.replicatePointer(inputShapes.data(), inputSize * sizeof(Nd4jLong *)));
auto dIndicesShapes = reinterpret_cast<Nd4jLong **>(pm.replicatePointer(indicesShapes.data(), inputSize * sizeof(Nd4jLong *)));
hipLaunchKernelGGL(( dynamicStitchScalarKernel<X,Y>), dim3(256), dim3(256), 1024, *context->getCudaStream(), dInputBuffers, dInputShapes, dIndicesBuffers, dIndicesShapes, inputSize, output->specialBuffer(), output->specialShapeInfo(), output->lengthOf());
} else {
std::vector<int> restDims(output->rankOf() - 1);
for (int i = restDims.size(); i > 0; i--)
restDims[restDims.size() - i] = output->rankOf() - i;
auto packZ = ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), restDims);
std::vector<const void *> inputBuffers(inputSize);
std::vector<const Nd4jLong *> inputTadShapes(inputSize);
std::vector<const Nd4jLong *> inputTadOffsets(inputSize);
std::vector<const void *> indicesBuffers(inputSize);
std::vector<const Nd4jLong *> indicesShapes(inputSize);
for (int e = 0; e < inputSize; e++) {
std::vector<int> sourceDims(inputs[e]->rankOf() - indices[e]->rankOf());
for (int i = sourceDims.size(); i > 0; i--)
sourceDims[sourceDims.size() - i] = inputs[e]->rankOf() - i;
auto packX = ConstantTadHelper::getInstance().tadForDimensions(inputs[e]->shapeInfo(), sourceDims);
indicesBuffers[e] = indices[e]->specialBuffer();
indicesShapes[e] = indices[e]->specialShapeInfo();
inputBuffers[e] = inputs[e]->specialBuffer();
inputTadShapes[e] = packX.platformShapeInfo();
inputTadOffsets[e] = packX.platformOffsets();
}
// copying pointers to buffers to device
auto dInputBuffers = reinterpret_cast<void **>(pm.replicatePointer(inputBuffers.data(), inputSize * sizeof(void *)));
auto dInputTadShapes = reinterpret_cast<Nd4jLong **>(pm.replicatePointer(inputTadShapes.data(), inputSize * sizeof(Nd4jLong *)));
auto dInputTadOffsets = reinterpret_cast<Nd4jLong **>(pm.replicatePointer(inputTadOffsets.data(), inputSize * sizeof(Nd4jLong *)));
auto dIndicesBuffers = reinterpret_cast<void **>(pm.replicatePointer(indicesBuffers.data(), inputSize * sizeof(void *)));
auto dIndicesShapes = reinterpret_cast<Nd4jLong **>(pm.replicatePointer(indicesShapes.data(), inputSize * sizeof(Nd4jLong *)));
hipLaunchKernelGGL(( dynamicStitchTadKernel<X,Y>), dim3(256), dim3(256), 1024, *context->getCudaStream(), dInputBuffers, dInputTadShapes, dInputTadOffsets, dIndicesBuffers, dIndicesShapes, inputSize, output->specialBuffer(), packZ.platformShapeInfo(), packZ.platformOffsets());
}
pm.synchronize();
return Status::OK();
}
template <typename T>
static void _dynamicPartitionFunctorBP(NDArray const* input, NDArray const* indices, std::vector<NDArray*> const& inputGradientList, std::vector<NDArray*>& outputList) {
}
ND4J_LOCAL void dynamicPartitionFunctor(sd::LaunchContext * context, NDArray const* input, NDArray const* indices, std::vector<NDArray*>& outputList) {
auto xType = input->dataType();
auto yType = indices->dataType();
NDArray::prepareSpecialUse({}, {indices, input});
BUILD_DOUBLE_SELECTOR(xType, yType, _dynamicPartitionFunctor, (context, input, indices, outputList), NUMERIC_TYPES, INDEXING_TYPES);
NDArray::registerSpecialUse({}, {indices, input});
// TODO: it would be nice to have NDArray::registerSpecialUse signature that accepts something else beyond initializer_list
for (auto v:outputList) {
v->tickWriteDevice();
}
}
template <typename T>
static int _dynamicStitchFunctorBP(std::vector<NDArray*> const& inputs, std::vector<NDArray*> const& indices, NDArray const* gradInput, std::vector<NDArray*>& outputList){
throw std::runtime_error("Not umplemented yet");
}
ND4J_LOCAL int dynamicStitchFunctor(sd::LaunchContext * context, std::vector<NDArray*> const& inputs, std::vector<NDArray*> const& indices, NDArray* output){
auto xType = inputs.at(0)->dataType();
auto yType = indices.at(0)->dataType();
for (auto v:indices) {
v->syncToDevice();
v->tickReadDevice();
}
for (auto v:inputs) {
v->syncToDevice();
v->tickReadDevice();
}
NDArray::prepareSpecialUse({output}, {});
BUILD_DOUBLE_SELECTOR(xType, yType, _dynamicStitchFunctor, (context, inputs, indices, output), NUMERIC_TYPES, INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {});
return Status::OK();
}
ND4J_LOCAL int dynamicStitchFunctorBP(sd::LaunchContext * context, std::vector<NDArray*> const& inputs, std::vector<NDArray*> const& indices, NDArray const* gradInput, std::vector<NDArray*>& outputList) {
auto xType = inputs.at(0)->dataType();
BUILD_SINGLE_SELECTOR(xType, return _dynamicStitchFunctorBP, (inputs, indices, gradInput, outputList), NUMERIC_TYPES);
}
ND4J_LOCAL void dynamicPartitionFunctorBP(sd::LaunchContext * context, NDArray const* input, NDArray const* indices, std::vector<NDArray*> const& inputGradientList, std::vector<NDArray*>& outputList) {
auto xType = input->dataType();
BUILD_SINGLE_SELECTOR(xType, _dynamicPartitionFunctorBP, (input, indices, inputGradientList, outputList), NUMERIC_TYPES);
}
}
}
}
| b1b08d7ed5ba8e1a363f696ccdec5a0127d7c861.cu | /* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <ops/declarable/helpers/dynamic.h>
#include <helpers/PointersManager.h>
#include <helpers/ConstantTadHelper.h>
namespace sd {
namespace ops {
namespace helpers {
template <typename X, typename Y>
static _CUDA_G void dynamicPartitionScalarKernel(const void *vx, const Nd4jLong *xShapeInfo, const void *vi, const Nd4jLong *iShapeInfo, void **vz, Nd4jLong **zShapeInfos, const Nd4jLong numOutputs) {
auto x = reinterpret_cast<const X*>(vx);
auto i = reinterpret_cast<const Y*>(vi);
auto xLength = shape::length(xShapeInfo);
auto iLength = shape::length(iShapeInfo);
extern __shared__ char shmem[];
__shared__ Y *rawIndices;
__shared__ Y *trueIndices;
if (threadIdx.x == 0) {
rawIndices = reinterpret_cast<Y*>(shmem);
trueIndices = rawIndices + blockDim.x;
}
__syncthreads();
// we run things in blocks, 1 partition per block of threads
for (Nd4jLong o = blockIdx.x; o < numOutputs; o += gridDim.x) {
auto z = reinterpret_cast<X*>(vz[o]);
auto zShapeInfo = zShapeInfos[o];
auto zLength = shape::length(zShapeInfo);
// iLimit should be multiple of blockDim.x
auto iLimit = iLength <= blockDim.x ? blockDim.x : (iLength + (blockDim.x - (iLength % blockDim.x)));
int cnt = 0;
for (Nd4jLong e = threadIdx.x; e < iLimit; e += blockDim.x) {
// load set of indices into shared memory
if (e < iLength)
rawIndices[threadIdx.x] = i[shape::getIndexOffset(e, iShapeInfo)];
__syncthreads();
// now we need to find out where our actual updates will be mapped
// TODO: this can be improved obviously, by using prefix-sum like approach
if (threadIdx.x == 0) {
for (int f = 0; f < blockDim.x; f++) {
if (rawIndices[f] == static_cast<Y>(o))
trueIndices[f] = cnt++;
else
trueIndices[f] = -1;
}
}
__syncthreads();
// doing actual update
if (e < iLength)
if (trueIndices[threadIdx.x] >= 0) {
z[trueIndices[threadIdx.x]] = x[shape::getIndexOffset(e, xShapeInfo)];
}
__syncthreads();
}
}
}
template <typename X, typename Y>
static _CUDA_G void dynamicPartitionTadKernel(const void *vx, const Nd4jLong *xTadShapeInfo, const Nd4jLong *xTadOffsets, Nd4jLong xLength, const void *vindices, const Nd4jLong *iShapeInfo, Nd4jLong iLength, void **vz, Nd4jLong **zTadShapeInfos, Nd4jLong **zTadOffsets, Nd4jLong numOutputs) {
auto x = reinterpret_cast<const X*>(vx);
auto indices = reinterpret_cast<const Y*>(vindices);
// we run things in blocks, 1 partition per block of threads
for (int i = blockIdx.x; i < numOutputs; i += gridDim.x) {
auto z = reinterpret_cast<X*>(vz[i]);
// each thread has own counter for partitions
int outCnt = 0;
for (Nd4jLong e = 0; e < iLength; e++) {
if (indices[shape::getIndexOffset(e, iShapeInfo)] == i) {
auto dx = x + xTadOffsets[e];
auto dz = z + zTadOffsets[i][outCnt++];
for (int f = threadIdx.x; f < xLength; f += blockDim.x) {
dz[shape::getIndexOffset(f, zTadShapeInfos[i])] = dx[shape::getIndexOffset(f, xTadShapeInfo)];
}
}
}
}
}
template <typename X, typename Y>
static void _dynamicPartitionFunctor(sd::LaunchContext * context, NDArray const* input, NDArray const* indices, std::vector<NDArray*>& outputList) {
std::vector<std::pair<NDArray *, int>> outputs(outputList.size());
int sourceDimsLen = input->rankOf() - indices->rankOf();
unsigned int outSize = outputList.size();
PointersManager pm(context, "dynamicPartition");
if (sourceDimsLen) { // non-linear case
std::vector<int> sourceDims(sourceDimsLen);
for (int i = sourceDimsLen; i > 0; i--)
sourceDims[sourceDimsLen - i] = input->rankOf() - i;
//compute tad array for given dimensions
auto packX = ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), sourceDims);
std::vector<void *> outBuffers(outSize);
std::vector<const Nd4jLong *> tadShapes(outSize);
std::vector<const Nd4jLong *> tadOffsets(outSize);
std::vector<Nd4jLong> numTads(outSize);
// fill up dimensions array for before kernel
for (unsigned int i = 0; i < outSize; i++) {
outputs[i].first = outputList[i];
std::vector<int> outDims(outputs[i].first->rankOf() - 1);
int r = outputs[i].first->rankOf();
for (int k = 1; k < r; k++)
outDims[k - 1] = k;
auto packZ = ConstantTadHelper::getInstance().tadForDimensions(outputList.at(i)->shapeInfo(), outDims);
outBuffers[i] = outputList.at(i)->specialBuffer();
tadShapes[i] = packZ.platformShapeInfo();
tadOffsets[i] = packZ.platformOffsets();
}
// we copy pointers to device
auto dOutBuffers = reinterpret_cast<void **>(pm.replicatePointer(outBuffers.data(), outBuffers.size() * sizeof(void *)));
auto dOutTadShapes = reinterpret_cast<Nd4jLong **>(pm.replicatePointer(tadShapes.data(), tadShapes.size() * sizeof(Nd4jLong *)));
auto dOutTadOffsets = reinterpret_cast<Nd4jLong **>(pm.replicatePointer(tadOffsets.data(), tadOffsets.size() * sizeof(Nd4jLong *)));
// run kernel on device
dynamicPartitionTadKernel<X,Y><<<256, 256, 1024, *context->getCudaStream()>>>(input->specialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), shape::length(packX.primaryShapeInfo()), indices->specialBuffer(), indices->specialShapeInfo(), indices->lengthOf(), dOutBuffers, dOutTadShapes, dOutTadOffsets, outSize);
} else { // linear case
auto numThreads = 256;
auto shmemSize = numThreads * sizeof(Y) * 2 + 1024;
std::vector<void *> outBuffers;
std::vector<const Nd4jLong *> outShapes;
for (auto v:outputList) {
outBuffers.emplace_back(v->specialBuffer());
outShapes.emplace_back(v->specialShapeInfo());
}
auto dOutBuffers = reinterpret_cast<void **>(pm.replicatePointer(outBuffers.data(), outBuffers.size() * sizeof(void *)));
auto dOutShapes = reinterpret_cast<Nd4jLong **>(pm.replicatePointer(outShapes.data(), outShapes.size() * sizeof(Nd4jLong *)));
dynamicPartitionScalarKernel<X,Y><<<256, numThreads, shmemSize, *context->getCudaStream()>>>(input->specialBuffer(), input->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), dOutBuffers, dOutShapes, outSize);
}
pm.synchronize();
}
template <typename X, typename Y>
static _CUDA_G void dynamicStitchScalarKernel(void **vx, Nd4jLong **xShapeInfos, void **vindices, Nd4jLong **iShapeInfos, int inputSize, void *vz, const Nd4jLong *zShapeInfo, Nd4jLong zLength) {
auto z = reinterpret_cast<X*>(vz);
for (int e = blockIdx.x; e < inputSize; e += gridDim.x) {
auto x = reinterpret_cast<X*>(vx[e]);
auto indices = reinterpret_cast<Y*>(vindices[e]);
auto xShapeInfo = xShapeInfos[e];
auto iShapeInfo = iShapeInfos[e];
auto iLength = shape::length(iShapeInfo);
for (int i = threadIdx.x; i < iLength; i += blockDim.x) {
auto idx = indices[shape::getIndexOffset(i, iShapeInfo)];
if (idx >= 0 && idx < zLength)
z[shape::getIndexOffset(idx, zShapeInfo)] = x[shape::getIndexOffset(i, xShapeInfo)];
}
}
}
template <typename X, typename Y>
static _CUDA_G void dynamicStitchTadKernel(void **vx, Nd4jLong **xTadShapeInfos, Nd4jLong **xTadOffsets, void **vindices, Nd4jLong **iShapeInfos, int inputSize, void *vz, const Nd4jLong *zTadShapeInfo, const Nd4jLong *zTadOffsets) {
auto bz = reinterpret_cast<X*>(vz);
for (int e = blockIdx.x; e < inputSize; e += gridDim.x) {
auto indices = reinterpret_cast<Y*>(vindices[e]);
auto iShapeInfo = iShapeInfos[e];
if (shape::isEmpty(iShapeInfo))
continue;
auto iLength = shape::length(iShapeInfo);
auto zLength = shape::length(zTadShapeInfo);
auto xShapeInfo = xTadShapeInfos[e];
auto xLength = shape::length(xShapeInfo);
for (int i = 0; i < iLength; i++) {
auto idx = indices[shape::getIndexOffset(i, iShapeInfo)];
auto z = bz + zTadOffsets[idx];
auto x = reinterpret_cast<X*>(vx[e]) + xTadOffsets[e][i];
for (int f = threadIdx.x; f < zLength; f += blockDim.x) {
z[shape::getIndexOffset(f, zTadShapeInfo)] = x[shape::getIndexOffset(f, xShapeInfo)];
}
__syncthreads();
}
}
}
template <typename X, typename Y>
static int _dynamicStitchFunctor(sd::LaunchContext * context, std::vector<NDArray*> const& inputs, std::vector<NDArray*> const& indices, NDArray* output){
int inputSize = inputs.size();
PointersManager pm(context, "dynamicStitch");
if (output->isVector()) {
std::vector<const void *> inputBuffers(inputSize);
std::vector<const Nd4jLong *> inputShapes(inputSize);
std::vector<const void *> indicesBuffers(inputSize);
std::vector<const Nd4jLong *> indicesShapes(inputSize);
for (int e = 0; e < inputSize; e++) {
inputBuffers[e] = inputs.at(e)->specialBuffer();
indicesBuffers[e] = indices.at(e)->specialBuffer();
inputShapes[e] = inputs.at(e)->specialShapeInfo();
indicesShapes[e] = indices.at(e)->specialShapeInfo();
}
// copying pointers to buffers to device
auto dInputBuffers = reinterpret_cast<void **>(pm.replicatePointer(inputBuffers.data(), inputSize * sizeof(void *)));
auto dIndicesBuffers = reinterpret_cast<void **>(pm.replicatePointer(indicesBuffers.data(), inputSize * sizeof(void *)));
auto dInputShapes = reinterpret_cast<Nd4jLong **>(pm.replicatePointer(inputShapes.data(), inputSize * sizeof(Nd4jLong *)));
auto dIndicesShapes = reinterpret_cast<Nd4jLong **>(pm.replicatePointer(indicesShapes.data(), inputSize * sizeof(Nd4jLong *)));
dynamicStitchScalarKernel<X,Y><<<256, 256, 1024, *context->getCudaStream()>>>(dInputBuffers, dInputShapes, dIndicesBuffers, dIndicesShapes, inputSize, output->specialBuffer(), output->specialShapeInfo(), output->lengthOf());
} else {
std::vector<int> restDims(output->rankOf() - 1);
for (int i = restDims.size(); i > 0; i--)
restDims[restDims.size() - i] = output->rankOf() - i;
auto packZ = ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), restDims);
std::vector<const void *> inputBuffers(inputSize);
std::vector<const Nd4jLong *> inputTadShapes(inputSize);
std::vector<const Nd4jLong *> inputTadOffsets(inputSize);
std::vector<const void *> indicesBuffers(inputSize);
std::vector<const Nd4jLong *> indicesShapes(inputSize);
for (int e = 0; e < inputSize; e++) {
std::vector<int> sourceDims(inputs[e]->rankOf() - indices[e]->rankOf());
for (int i = sourceDims.size(); i > 0; i--)
sourceDims[sourceDims.size() - i] = inputs[e]->rankOf() - i;
auto packX = ConstantTadHelper::getInstance().tadForDimensions(inputs[e]->shapeInfo(), sourceDims);
indicesBuffers[e] = indices[e]->specialBuffer();
indicesShapes[e] = indices[e]->specialShapeInfo();
inputBuffers[e] = inputs[e]->specialBuffer();
inputTadShapes[e] = packX.platformShapeInfo();
inputTadOffsets[e] = packX.platformOffsets();
}
// copying pointers to buffers to device
auto dInputBuffers = reinterpret_cast<void **>(pm.replicatePointer(inputBuffers.data(), inputSize * sizeof(void *)));
auto dInputTadShapes = reinterpret_cast<Nd4jLong **>(pm.replicatePointer(inputTadShapes.data(), inputSize * sizeof(Nd4jLong *)));
auto dInputTadOffsets = reinterpret_cast<Nd4jLong **>(pm.replicatePointer(inputTadOffsets.data(), inputSize * sizeof(Nd4jLong *)));
auto dIndicesBuffers = reinterpret_cast<void **>(pm.replicatePointer(indicesBuffers.data(), inputSize * sizeof(void *)));
auto dIndicesShapes = reinterpret_cast<Nd4jLong **>(pm.replicatePointer(indicesShapes.data(), inputSize * sizeof(Nd4jLong *)));
dynamicStitchTadKernel<X,Y><<<256, 256, 1024, *context->getCudaStream()>>>(dInputBuffers, dInputTadShapes, dInputTadOffsets, dIndicesBuffers, dIndicesShapes, inputSize, output->specialBuffer(), packZ.platformShapeInfo(), packZ.platformOffsets());
}
pm.synchronize();
return Status::OK();
}
template <typename T>
static void _dynamicPartitionFunctorBP(NDArray const* input, NDArray const* indices, std::vector<NDArray*> const& inputGradientList, std::vector<NDArray*>& outputList) {
}
ND4J_LOCAL void dynamicPartitionFunctor(sd::LaunchContext * context, NDArray const* input, NDArray const* indices, std::vector<NDArray*>& outputList) {
auto xType = input->dataType();
auto yType = indices->dataType();
NDArray::prepareSpecialUse({}, {indices, input});
BUILD_DOUBLE_SELECTOR(xType, yType, _dynamicPartitionFunctor, (context, input, indices, outputList), NUMERIC_TYPES, INDEXING_TYPES);
NDArray::registerSpecialUse({}, {indices, input});
// TODO: it would be nice to have NDArray::registerSpecialUse signature that accepts something else beyond initializer_list
for (auto v:outputList) {
v->tickWriteDevice();
}
}
template <typename T>
static int _dynamicStitchFunctorBP(std::vector<NDArray*> const& inputs, std::vector<NDArray*> const& indices, NDArray const* gradInput, std::vector<NDArray*>& outputList){
throw std::runtime_error("Not umplemented yet");
}
ND4J_LOCAL int dynamicStitchFunctor(sd::LaunchContext * context, std::vector<NDArray*> const& inputs, std::vector<NDArray*> const& indices, NDArray* output){
auto xType = inputs.at(0)->dataType();
auto yType = indices.at(0)->dataType();
for (auto v:indices) {
v->syncToDevice();
v->tickReadDevice();
}
for (auto v:inputs) {
v->syncToDevice();
v->tickReadDevice();
}
NDArray::prepareSpecialUse({output}, {});
BUILD_DOUBLE_SELECTOR(xType, yType, _dynamicStitchFunctor, (context, inputs, indices, output), NUMERIC_TYPES, INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {});
return Status::OK();
}
ND4J_LOCAL int dynamicStitchFunctorBP(sd::LaunchContext * context, std::vector<NDArray*> const& inputs, std::vector<NDArray*> const& indices, NDArray const* gradInput, std::vector<NDArray*>& outputList) {
auto xType = inputs.at(0)->dataType();
BUILD_SINGLE_SELECTOR(xType, return _dynamicStitchFunctorBP, (inputs, indices, gradInput, outputList), NUMERIC_TYPES);
}
ND4J_LOCAL void dynamicPartitionFunctorBP(sd::LaunchContext * context, NDArray const* input, NDArray const* indices, std::vector<NDArray*> const& inputGradientList, std::vector<NDArray*>& outputList) {
auto xType = input->dataType();
BUILD_SINGLE_SELECTOR(xType, _dynamicPartitionFunctorBP, (input, indices, inputGradientList, outputList), NUMERIC_TYPES);
}
}
}
}
|
e7422b3f69ea405a220942a3556a458b6baa79b8.hip | // !!! This is a file automatically generated by hipify!!!
#include <cudf/cudf.h>
#include <rmm/rmm.h>
#include <utilities/cudf_utils.h>
#include <utilities/error_utils.hpp>
#include <utilities/type_dispatcher.hpp>
#include <utilities/wrapper_types.hpp>
#include <cub/device/device_segmented_radix_sort.cuh>
struct SegmentedRadixSortPlan{
const gdf_size_type num_items;
// temporary storage
void *storage;
size_t storage_bytes;
void *back_key, *back_val;
size_t back_key_size, back_val_size;
hipStream_t stream;
int descending;
unsigned begin_bit, end_bit;
SegmentedRadixSortPlan(size_t num_items, int descending,
unsigned begin_bit, unsigned end_bit)
: num_items(num_items),
storage(nullptr), storage_bytes(0),
back_key(nullptr), back_val(nullptr),
back_key_size(0), back_val_size(0),
stream(0), descending(descending),
begin_bit(begin_bit), end_bit(end_bit)
{}
gdf_error setup(size_t sizeof_key, size_t sizeof_val) {
back_key_size = num_items * sizeof_key;
back_val_size = num_items * sizeof_val;
RMM_TRY( RMM_ALLOC(&back_key, back_key_size, stream) ); // TODO: non-default stream
RMM_TRY( RMM_ALLOC(&back_val, back_val_size, stream) );
return GDF_SUCCESS;
}
gdf_error teardown() {
RMM_TRY(RMM_FREE(back_key, stream));
RMM_TRY(RMM_FREE(back_val, stream));
RMM_TRY(RMM_FREE(storage, stream));
return GDF_SUCCESS;
}
};
template <typename Tk, typename Tv>
struct SegmentedRadixSort {
static
gdf_error sort( SegmentedRadixSortPlan *plan,
Tk *d_key_buf, Tv *d_value_buf,
unsigned num_segments,
unsigned *d_begin_offsets,
unsigned *d_end_offsets) {
unsigned num_items = plan->num_items;
Tk *d_key_alt_buf = (Tk*)plan->back_key;
Tv *d_value_alt_buf = (Tv*)plan->back_val;
hipStream_t stream = plan->stream;
int descending = plan->descending;
unsigned begin_bit = plan->begin_bit;
unsigned end_bit = plan->end_bit;
cub::DoubleBuffer<Tk> d_keys(d_key_buf, d_key_alt_buf);
typedef hipcub::DeviceSegmentedRadixSort Sorter;
if (d_value_buf) {
// Sort KeyValue pairs
cub::DoubleBuffer<Tv> d_values(d_value_buf, d_value_alt_buf);
if (descending) {
Sorter::SortPairsDescending(plan->storage,
plan->storage_bytes,
d_keys,
d_values,
num_items,
num_segments,
d_begin_offsets,
d_end_offsets,
begin_bit,
end_bit,
stream);
} else {
Sorter::SortPairs( plan->storage,
plan->storage_bytes,
d_keys,
d_values,
num_items,
num_segments,
d_begin_offsets,
d_end_offsets,
begin_bit,
end_bit,
stream );
}
CUDA_CHECK_LAST();
if (plan->storage && d_value_buf != d_values.Current()){
hipMemcpyAsync(d_value_buf, d_value_alt_buf,
num_items * sizeof(Tv),
hipMemcpyDeviceToDevice,
stream);
CUDA_CHECK_LAST();
}
} else {
// Sort Keys only
if (descending) {
Sorter::SortKeysDescending( plan->storage,
plan->storage_bytes,
d_keys,
num_items,
num_segments,
d_begin_offsets,
d_end_offsets,
begin_bit,
end_bit,
stream );
CUDA_CHECK_LAST()
} else {
Sorter::SortKeys( plan->storage,
plan->storage_bytes,
d_keys,
num_items,
num_segments,
d_begin_offsets,
d_end_offsets,
begin_bit,
end_bit,
stream );
}
CUDA_CHECK_LAST();
}
if ( plan->storage ) {
// We have operated and the result is not in front buffer
if (d_key_buf != d_keys.Current()){
hipMemcpyAsync(d_key_buf, d_key_alt_buf, num_items * sizeof(Tk),
hipMemcpyDeviceToDevice, stream);
CUDA_CHECK_LAST();
}
} else {
// We have not operated.
// Just checking for temporary storage requirement
RMM_TRY( RMM_ALLOC(&plan->storage, plan->storage_bytes, plan->stream) ); // TODO: non-default stream
CUDA_CHECK_LAST();
// Now that we have allocated, do real work.
return sort(plan, d_key_buf, d_value_buf, num_segments,
d_begin_offsets, d_end_offsets);
}
return GDF_SUCCESS;
}
};
gdf_segmented_radixsort_plan_type* cffi_wrap(SegmentedRadixSortPlan* obj){
return reinterpret_cast<gdf_segmented_radixsort_plan_type*>(obj);
}
SegmentedRadixSortPlan* cffi_unwrap(gdf_segmented_radixsort_plan_type* hdl){
return reinterpret_cast<SegmentedRadixSortPlan*>(hdl);
}
gdf_segmented_radixsort_plan_type* gdf_segmented_radixsort_plan(
size_t num_items, int descending,
unsigned begin_bit, unsigned end_bit)
{
return cffi_wrap(new SegmentedRadixSortPlan(num_items, descending,
begin_bit, end_bit));
}
gdf_error gdf_segmented_radixsort_plan_setup(
gdf_segmented_radixsort_plan_type *hdl,
size_t sizeof_key, size_t sizeof_val)
{
return cffi_unwrap(hdl)->setup(sizeof_key, sizeof_val);
}
gdf_error gdf_segmented_radixsort_plan_free(gdf_segmented_radixsort_plan_type *hdl)
{
auto plan = cffi_unwrap(hdl);
gdf_error status = plan->teardown();
delete plan;
return status;
}
template <typename Tv>
struct gdf_segmented_radixsort_functor
{
template <typename Tk>
gdf_error
operator()( gdf_segmented_radixsort_plan_type *hdl,
gdf_column *keycol,
gdf_column *valcol,
unsigned num_segments,
unsigned *d_begin_offsets,
unsigned *d_end_offsets)
{
/* validity mask must be empty */
GDF_REQUIRE(!keycol->valid || !keycol->null_count, GDF_VALIDITY_UNSUPPORTED);
GDF_REQUIRE(!valcol->valid || !valcol->null_count, GDF_VALIDITY_UNSUPPORTED);
/* size of columns must match */
GDF_REQUIRE(keycol->size == valcol->size, GDF_COLUMN_SIZE_MISMATCH);
SegmentedRadixSortPlan *plan = cffi_unwrap(hdl);
/* num_items must match */
GDF_REQUIRE(plan->num_items == keycol->size, GDF_COLUMN_SIZE_MISMATCH);
/* back buffer size must match */
GDF_REQUIRE(sizeof(Tk) * plan->num_items == plan->back_key_size,
GDF_COLUMN_SIZE_MISMATCH);
GDF_REQUIRE(sizeof(Tv) * plan->num_items == plan->back_val_size,
GDF_COLUMN_SIZE_MISMATCH);
/* Do sort */
return SegmentedRadixSort<Tk, Tv>::sort(plan,
(Tk*)keycol->data, (Tv*)valcol->data,
num_segments, d_begin_offsets, d_end_offsets);
}
};
gdf_error gdf_segmented_radixsort(gdf_segmented_radixsort_plan_type *hdl,
gdf_column *keycol,
gdf_column *valcol,
unsigned num_segments,
unsigned *d_begin_offsets,
unsigned *d_end_offsets)
{
GDF_REQUIRE(valcol->dtype == GDF_INT64, GDF_UNSUPPORTED_DTYPE);
return cudf::type_dispatcher(keycol->dtype,
gdf_segmented_radixsort_functor<int64_t>{},
hdl, keycol, valcol,
num_segments, d_begin_offsets,
d_end_offsets);
}
| e7422b3f69ea405a220942a3556a458b6baa79b8.cu | #include <cudf/cudf.h>
#include <rmm/rmm.h>
#include <utilities/cudf_utils.h>
#include <utilities/error_utils.hpp>
#include <utilities/type_dispatcher.hpp>
#include <utilities/wrapper_types.hpp>
#include <cub/device/device_segmented_radix_sort.cuh>
struct SegmentedRadixSortPlan{
const gdf_size_type num_items;
// temporary storage
void *storage;
size_t storage_bytes;
void *back_key, *back_val;
size_t back_key_size, back_val_size;
cudaStream_t stream;
int descending;
unsigned begin_bit, end_bit;
SegmentedRadixSortPlan(size_t num_items, int descending,
unsigned begin_bit, unsigned end_bit)
: num_items(num_items),
storage(nullptr), storage_bytes(0),
back_key(nullptr), back_val(nullptr),
back_key_size(0), back_val_size(0),
stream(0), descending(descending),
begin_bit(begin_bit), end_bit(end_bit)
{}
gdf_error setup(size_t sizeof_key, size_t sizeof_val) {
back_key_size = num_items * sizeof_key;
back_val_size = num_items * sizeof_val;
RMM_TRY( RMM_ALLOC(&back_key, back_key_size, stream) ); // TODO: non-default stream
RMM_TRY( RMM_ALLOC(&back_val, back_val_size, stream) );
return GDF_SUCCESS;
}
gdf_error teardown() {
RMM_TRY(RMM_FREE(back_key, stream));
RMM_TRY(RMM_FREE(back_val, stream));
RMM_TRY(RMM_FREE(storage, stream));
return GDF_SUCCESS;
}
};
template <typename Tk, typename Tv>
struct SegmentedRadixSort {
static
gdf_error sort( SegmentedRadixSortPlan *plan,
Tk *d_key_buf, Tv *d_value_buf,
unsigned num_segments,
unsigned *d_begin_offsets,
unsigned *d_end_offsets) {
unsigned num_items = plan->num_items;
Tk *d_key_alt_buf = (Tk*)plan->back_key;
Tv *d_value_alt_buf = (Tv*)plan->back_val;
cudaStream_t stream = plan->stream;
int descending = plan->descending;
unsigned begin_bit = plan->begin_bit;
unsigned end_bit = plan->end_bit;
cub::DoubleBuffer<Tk> d_keys(d_key_buf, d_key_alt_buf);
typedef cub::DeviceSegmentedRadixSort Sorter;
if (d_value_buf) {
// Sort KeyValue pairs
cub::DoubleBuffer<Tv> d_values(d_value_buf, d_value_alt_buf);
if (descending) {
Sorter::SortPairsDescending(plan->storage,
plan->storage_bytes,
d_keys,
d_values,
num_items,
num_segments,
d_begin_offsets,
d_end_offsets,
begin_bit,
end_bit,
stream);
} else {
Sorter::SortPairs( plan->storage,
plan->storage_bytes,
d_keys,
d_values,
num_items,
num_segments,
d_begin_offsets,
d_end_offsets,
begin_bit,
end_bit,
stream );
}
CUDA_CHECK_LAST();
if (plan->storage && d_value_buf != d_values.Current()){
cudaMemcpyAsync(d_value_buf, d_value_alt_buf,
num_items * sizeof(Tv),
cudaMemcpyDeviceToDevice,
stream);
CUDA_CHECK_LAST();
}
} else {
// Sort Keys only
if (descending) {
Sorter::SortKeysDescending( plan->storage,
plan->storage_bytes,
d_keys,
num_items,
num_segments,
d_begin_offsets,
d_end_offsets,
begin_bit,
end_bit,
stream );
CUDA_CHECK_LAST()
} else {
Sorter::SortKeys( plan->storage,
plan->storage_bytes,
d_keys,
num_items,
num_segments,
d_begin_offsets,
d_end_offsets,
begin_bit,
end_bit,
stream );
}
CUDA_CHECK_LAST();
}
if ( plan->storage ) {
// We have operated and the result is not in front buffer
if (d_key_buf != d_keys.Current()){
cudaMemcpyAsync(d_key_buf, d_key_alt_buf, num_items * sizeof(Tk),
cudaMemcpyDeviceToDevice, stream);
CUDA_CHECK_LAST();
}
} else {
// We have not operated.
// Just checking for temporary storage requirement
RMM_TRY( RMM_ALLOC(&plan->storage, plan->storage_bytes, plan->stream) ); // TODO: non-default stream
CUDA_CHECK_LAST();
// Now that we have allocated, do real work.
return sort(plan, d_key_buf, d_value_buf, num_segments,
d_begin_offsets, d_end_offsets);
}
return GDF_SUCCESS;
}
};
gdf_segmented_radixsort_plan_type* cffi_wrap(SegmentedRadixSortPlan* obj){
return reinterpret_cast<gdf_segmented_radixsort_plan_type*>(obj);
}
SegmentedRadixSortPlan* cffi_unwrap(gdf_segmented_radixsort_plan_type* hdl){
return reinterpret_cast<SegmentedRadixSortPlan*>(hdl);
}
gdf_segmented_radixsort_plan_type* gdf_segmented_radixsort_plan(
size_t num_items, int descending,
unsigned begin_bit, unsigned end_bit)
{
return cffi_wrap(new SegmentedRadixSortPlan(num_items, descending,
begin_bit, end_bit));
}
gdf_error gdf_segmented_radixsort_plan_setup(
gdf_segmented_radixsort_plan_type *hdl,
size_t sizeof_key, size_t sizeof_val)
{
return cffi_unwrap(hdl)->setup(sizeof_key, sizeof_val);
}
gdf_error gdf_segmented_radixsort_plan_free(gdf_segmented_radixsort_plan_type *hdl)
{
auto plan = cffi_unwrap(hdl);
gdf_error status = plan->teardown();
delete plan;
return status;
}
template <typename Tv>
struct gdf_segmented_radixsort_functor
{
template <typename Tk>
gdf_error
operator()( gdf_segmented_radixsort_plan_type *hdl,
gdf_column *keycol,
gdf_column *valcol,
unsigned num_segments,
unsigned *d_begin_offsets,
unsigned *d_end_offsets)
{
/* validity mask must be empty */
GDF_REQUIRE(!keycol->valid || !keycol->null_count, GDF_VALIDITY_UNSUPPORTED);
GDF_REQUIRE(!valcol->valid || !valcol->null_count, GDF_VALIDITY_UNSUPPORTED);
/* size of columns must match */
GDF_REQUIRE(keycol->size == valcol->size, GDF_COLUMN_SIZE_MISMATCH);
SegmentedRadixSortPlan *plan = cffi_unwrap(hdl);
/* num_items must match */
GDF_REQUIRE(plan->num_items == keycol->size, GDF_COLUMN_SIZE_MISMATCH);
/* back buffer size must match */
GDF_REQUIRE(sizeof(Tk) * plan->num_items == plan->back_key_size,
GDF_COLUMN_SIZE_MISMATCH);
GDF_REQUIRE(sizeof(Tv) * plan->num_items == plan->back_val_size,
GDF_COLUMN_SIZE_MISMATCH);
/* Do sort */
return SegmentedRadixSort<Tk, Tv>::sort(plan,
(Tk*)keycol->data, (Tv*)valcol->data,
num_segments, d_begin_offsets, d_end_offsets);
}
};
gdf_error gdf_segmented_radixsort(gdf_segmented_radixsort_plan_type *hdl,
gdf_column *keycol,
gdf_column *valcol,
unsigned num_segments,
unsigned *d_begin_offsets,
unsigned *d_end_offsets)
{
GDF_REQUIRE(valcol->dtype == GDF_INT64, GDF_UNSUPPORTED_DTYPE);
return cudf::type_dispatcher(keycol->dtype,
gdf_segmented_radixsort_functor<int64_t>{},
hdl, keycol, valcol,
num_segments, d_begin_offsets,
d_end_offsets);
}
|
31c49f49e5b065c0111640846e9257a700836f72.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Indice2D.h"
#include "cudaTools.h"
#include "Device.h"
#include "MandelbrotMath.h"
#include "IndiceTools_GPU.h"
#include "DomaineMath_GPU.h"
using namespace gpu;
// Attention : Choix du nom est impotant!
// VagueDevice.cu et non Vague.cu
// Dans ce dernier cas, problme de linkage, car le nom du .cu est le meme que le nom d'un .cpp (host)
// On a donc ajouter Device (ou n'importequoi) pour que les noms soient diffrents!
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__global__ void mandelbrot(uchar4* ptrDevPixels, uint w, uint h, float t, uint n, DomaineMath domaine);
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__global__ void mandelbrot(uchar4* ptrDevPixels, uint w, uint h, float t,
uint n, DomaineMath domaineMath)
{
MandelbrotMath mandelbrotMath = MandelbrotMath(n);
const int TID = Indice2D::tid();
const int NB_THREAD = Indice2D::nbThread();
const int WH = w * h;
uchar4 color;
double x, y;
int i, j;
int s = TID;
while (s < WH)
{
IndiceTools::toIJ(s, w, &i, &j);
domaineMath.toXY(i, j, &x, &y);
mandelbrotMath.colorXY(&color, x, y, t);
ptrDevPixels[s] = color;
s += NB_THREAD;
}
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
| 31c49f49e5b065c0111640846e9257a700836f72.cu | #include "Indice2D.h"
#include "cudaTools.h"
#include "Device.h"
#include "MandelbrotMath.h"
#include "IndiceTools_GPU.h"
#include "DomaineMath_GPU.h"
using namespace gpu;
// Attention : Choix du nom est impotant!
// VagueDevice.cu et non Vague.cu
// Dans ce dernier cas, probl�me de linkage, car le nom du .cu est le meme que le nom d'un .cpp (host)
// On a donc ajouter Device (ou n'importequoi) pour que les noms soient diff�rents!
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__global__ void mandelbrot(uchar4* ptrDevPixels, uint w, uint h, float t, uint n, DomaineMath domaine);
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__global__ void mandelbrot(uchar4* ptrDevPixels, uint w, uint h, float t,
uint n, DomaineMath domaineMath)
{
MandelbrotMath mandelbrotMath = MandelbrotMath(n);
const int TID = Indice2D::tid();
const int NB_THREAD = Indice2D::nbThread();
const int WH = w * h;
uchar4 color;
double x, y;
int i, j;
int s = TID;
while (s < WH)
{
IndiceTools::toIJ(s, w, &i, &j);
domaineMath.toXY(i, j, &x, &y);
mandelbrotMath.colorXY(&color, x, y, t);
ptrDevPixels[s] = color;
s += NB_THREAD;
}
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
04a0b8680dc0a38e36b7582d99fc2e08c6332289.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2011, Alex Krizhevsky ([email protected])
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <Python.h>
#include <arrayobject.h>
#include <assert.h>
#include <cutil_inline.h>
#include <rocblas.h>
#include <time.h>
#include <vector>
#include <matrix.h>
#include <queue.h>
#include <worker.cuh>
#include <util.cuh>
#include <cost.cuh>
#include <pyconvnet.cuh>
#include <convnet.cuh>
using namespace std;
static ConvNet* model = NULL;
static PyMethodDef _ConvNetMethods[] = { { "initModel", initModel, METH_VARARGS },
{ "startBatch", startBatch, METH_VARARGS },
{ "finishBatch", finishBatch, METH_VARARGS },
{ "checkGradients", checkGradients, METH_VARARGS },
{ "startMultiviewTest", startMultiviewTest, METH_VARARGS },
{ "startFeatureWriter", startFeatureWriter, METH_VARARGS },
{ "syncWithHost", syncWithHost, METH_VARARGS },
{ NULL, NULL }
};
#if defined(_WIN64) || defined(_WIN32)
extern "C" __declspec(dllexport) void initpyconvnet() {
(void) Py_InitModule("pyconvnet", _ConvNetMethods);
import_array();
}
#else
void INITNAME() {
(void) Py_InitModule(QUOTEME(MODELNAME), _ConvNetMethods);
import_array();
}
#endif
PyObject* initModel(PyObject *self, PyObject *args) {
assert(model == NULL);
PyListObject* pyLayerParams;
int pyMinibatchSize;
int pyDeviceID;
int fix_nan;
if (!PyArg_ParseTuple(args, "O!iii",
&PyList_Type, &pyLayerParams,
&pyMinibatchSize,
&pyDeviceID,
&fix_nan)) {
return NULL;
}
model = new ConvNet(pyLayerParams,
pyMinibatchSize,
pyDeviceID,
fix_nan);
model->start();
return Py_BuildValue("i", 0);
}
/*
* Starts training/testing on the given batch (asynchronous -- returns immediately).
*/
PyObject* startBatch(PyObject *self, PyObject *args) {
assert(model != NULL);
PyListObject* data;
int test = 0;
int epoch = 0;
float epsScale = 0;
if (!PyArg_ParseTuple(args, "O!|iif",
&PyList_Type, &data,
&epoch,
&test,
&epsScale)) {
return NULL;
}
MatrixV& mvec = *getMatrixV((PyObject*)data);
TrainingWorker* wr = new TrainingWorker(*model, *new CPUData(mvec), test, epoch, epsScale);
model->getWorkerQueue().enqueue(wr);
return Py_BuildValue("i", 0);
}
/*
* Starts testing on the given batch (asynchronous -- returns immediately).
*/
PyObject* startMultiviewTest(PyObject *self, PyObject *args) {
assert(model != NULL);
PyListObject* data;
int numViews, logregIdx;
if (!PyArg_ParseTuple(args, "O!ii",
&PyList_Type, &data,
&numViews,
&logregIdx)) {
return NULL;
}
MatrixV& mvec = *getMatrixV((PyObject*)data);
MultiviewTestWorker* wr = new MultiviewTestWorker(*model, *new CPUData(mvec), numViews, logregIdx);
model->getWorkerQueue().enqueue(wr);
return Py_BuildValue("i", 0);
}
PyObject* startFeatureWriter(PyObject *self, PyObject *args) {
assert(model != NULL);
PyListObject* data;
int layerIdx;
if (!PyArg_ParseTuple(args, "O!i",
&PyList_Type, &data,
&layerIdx)) {
return NULL;
}
MatrixV& mvec = *getMatrixV((PyObject*)data);
Matrix& ftrs = *mvec.back();
mvec.pop_back();
FeatureWorker* wr = new FeatureWorker(*model, *new CPUData(mvec), ftrs, layerIdx);
model->getWorkerQueue().enqueue(wr);
return Py_BuildValue("i", 0);
}
/*
* Waits for the trainer to finish training on the batch given to startBatch.
*/
PyObject* finishBatch(PyObject *self, PyObject *args) {
assert(model != NULL);
WorkResult* res = model->getResultQueue().dequeue();
assert(res != NULL);
assert(res->getResultType() == WorkResult::BATCH_DONE);
Cost& cost = res->getResults();
PyObject* dict = PyDict_New();
CostMap& costMap = cost.getCostMap();
for (CostMap::const_iterator it = costMap.begin(); it != costMap.end(); ++it) {
PyObject* v = PyList_New(0);
for (vector<double>::const_iterator iv = it->second->begin(); iv != it->second->end(); ++iv) {
PyObject* f = PyFloat_FromDouble(*iv);
PyList_Append(v, f);
}
PyDict_SetItemString(dict, it->first.c_str(), v);
}
PyObject* retVal = Py_BuildValue("Ni", dict, cost.getNumCases());
delete res; // Deletes cost too
return retVal;
}
PyObject* checkGradients(PyObject *self, PyObject *args) {
assert(model != NULL);
PyListObject* data;
if (!PyArg_ParseTuple(args, "O!",
&PyList_Type, &data)) {
return NULL;
}
MatrixV& mvec = *getMatrixV((PyObject*)data);
GradCheckWorker* wr = new GradCheckWorker(*model, *new CPUData(mvec));
model->getWorkerQueue().enqueue(wr);
WorkResult* res = model->getResultQueue().dequeue();
assert(res != NULL);
assert(res->getResultType() == WorkResult::BATCH_DONE);
delete res;
return Py_BuildValue("i", 0);
}
/*
* Copies weight matrices from GPU to system memory.
*/
PyObject* syncWithHost(PyObject *self, PyObject *args) {
assert(model != NULL);
SyncWorker* wr = new SyncWorker(*model);
model->getWorkerQueue().enqueue(wr);
WorkResult* res = model->getResultQueue().dequeue();
assert(res != NULL);
assert(res->getResultType() == WorkResult::SYNC_DONE);
delete res;
return Py_BuildValue("i", 0);
}
| 04a0b8680dc0a38e36b7582d99fc2e08c6332289.cu | /*
* Copyright (c) 2011, Alex Krizhevsky ([email protected])
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <Python.h>
#include <arrayobject.h>
#include <assert.h>
#include <cutil_inline.h>
#include <cublas.h>
#include <time.h>
#include <vector>
#include <matrix.h>
#include <queue.h>
#include <worker.cuh>
#include <util.cuh>
#include <cost.cuh>
#include <pyconvnet.cuh>
#include <convnet.cuh>
using namespace std;
static ConvNet* model = NULL;
static PyMethodDef _ConvNetMethods[] = { { "initModel", initModel, METH_VARARGS },
{ "startBatch", startBatch, METH_VARARGS },
{ "finishBatch", finishBatch, METH_VARARGS },
{ "checkGradients", checkGradients, METH_VARARGS },
{ "startMultiviewTest", startMultiviewTest, METH_VARARGS },
{ "startFeatureWriter", startFeatureWriter, METH_VARARGS },
{ "syncWithHost", syncWithHost, METH_VARARGS },
{ NULL, NULL }
};
#if defined(_WIN64) || defined(_WIN32)
extern "C" __declspec(dllexport) void initpyconvnet() {
(void) Py_InitModule("pyconvnet", _ConvNetMethods);
import_array();
}
#else
void INITNAME() {
(void) Py_InitModule(QUOTEME(MODELNAME), _ConvNetMethods);
import_array();
}
#endif
PyObject* initModel(PyObject *self, PyObject *args) {
assert(model == NULL);
PyListObject* pyLayerParams;
int pyMinibatchSize;
int pyDeviceID;
int fix_nan;
if (!PyArg_ParseTuple(args, "O!iii",
&PyList_Type, &pyLayerParams,
&pyMinibatchSize,
&pyDeviceID,
&fix_nan)) {
return NULL;
}
model = new ConvNet(pyLayerParams,
pyMinibatchSize,
pyDeviceID,
fix_nan);
model->start();
return Py_BuildValue("i", 0);
}
/*
* Starts training/testing on the given batch (asynchronous -- returns immediately).
*/
PyObject* startBatch(PyObject *self, PyObject *args) {
assert(model != NULL);
PyListObject* data;
int test = 0;
int epoch = 0;
float epsScale = 0;
if (!PyArg_ParseTuple(args, "O!|iif",
&PyList_Type, &data,
&epoch,
&test,
&epsScale)) {
return NULL;
}
MatrixV& mvec = *getMatrixV((PyObject*)data);
TrainingWorker* wr = new TrainingWorker(*model, *new CPUData(mvec), test, epoch, epsScale);
model->getWorkerQueue().enqueue(wr);
return Py_BuildValue("i", 0);
}
/*
* Starts testing on the given batch (asynchronous -- returns immediately).
*/
PyObject* startMultiviewTest(PyObject *self, PyObject *args) {
assert(model != NULL);
PyListObject* data;
int numViews, logregIdx;
if (!PyArg_ParseTuple(args, "O!ii",
&PyList_Type, &data,
&numViews,
&logregIdx)) {
return NULL;
}
MatrixV& mvec = *getMatrixV((PyObject*)data);
MultiviewTestWorker* wr = new MultiviewTestWorker(*model, *new CPUData(mvec), numViews, logregIdx);
model->getWorkerQueue().enqueue(wr);
return Py_BuildValue("i", 0);
}
PyObject* startFeatureWriter(PyObject *self, PyObject *args) {
assert(model != NULL);
PyListObject* data;
int layerIdx;
if (!PyArg_ParseTuple(args, "O!i",
&PyList_Type, &data,
&layerIdx)) {
return NULL;
}
MatrixV& mvec = *getMatrixV((PyObject*)data);
Matrix& ftrs = *mvec.back();
mvec.pop_back();
FeatureWorker* wr = new FeatureWorker(*model, *new CPUData(mvec), ftrs, layerIdx);
model->getWorkerQueue().enqueue(wr);
return Py_BuildValue("i", 0);
}
/*
* Waits for the trainer to finish training on the batch given to startBatch.
*/
PyObject* finishBatch(PyObject *self, PyObject *args) {
assert(model != NULL);
WorkResult* res = model->getResultQueue().dequeue();
assert(res != NULL);
assert(res->getResultType() == WorkResult::BATCH_DONE);
Cost& cost = res->getResults();
PyObject* dict = PyDict_New();
CostMap& costMap = cost.getCostMap();
for (CostMap::const_iterator it = costMap.begin(); it != costMap.end(); ++it) {
PyObject* v = PyList_New(0);
for (vector<double>::const_iterator iv = it->second->begin(); iv != it->second->end(); ++iv) {
PyObject* f = PyFloat_FromDouble(*iv);
PyList_Append(v, f);
}
PyDict_SetItemString(dict, it->first.c_str(), v);
}
PyObject* retVal = Py_BuildValue("Ni", dict, cost.getNumCases());
delete res; // Deletes cost too
return retVal;
}
PyObject* checkGradients(PyObject *self, PyObject *args) {
assert(model != NULL);
PyListObject* data;
if (!PyArg_ParseTuple(args, "O!",
&PyList_Type, &data)) {
return NULL;
}
MatrixV& mvec = *getMatrixV((PyObject*)data);
GradCheckWorker* wr = new GradCheckWorker(*model, *new CPUData(mvec));
model->getWorkerQueue().enqueue(wr);
WorkResult* res = model->getResultQueue().dequeue();
assert(res != NULL);
assert(res->getResultType() == WorkResult::BATCH_DONE);
delete res;
return Py_BuildValue("i", 0);
}
/*
* Copies weight matrices from GPU to system memory.
*/
PyObject* syncWithHost(PyObject *self, PyObject *args) {
assert(model != NULL);
SyncWorker* wr = new SyncWorker(*model);
model->getWorkerQueue().enqueue(wr);
WorkResult* res = model->getResultQueue().dequeue();
assert(res != NULL);
assert(res->getResultType() == WorkResult::SYNC_DONE);
delete res;
return Py_BuildValue("i", 0);
}
|
32b596f981fe4aa91ad21ff7f5478aa08f74569d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <chrono>
#include <math.h>
#include <hip/hip_runtime.h>
#include "reference.h"
template <typename T, typename G>
__global__
void adam (
T* __restrict__ p,
T* __restrict__ m,
T* __restrict__ v,
const G* __restrict__ g,
const float b1,
const float b2,
const float eps,
const float grad_scale,
const float step_size,
const int time_step,
const size_t vector_size,
adamMode_t mode,
const float decay)
{
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
const size_t totThreads = gridDim.x*blockDim.x;
for (size_t j = i; j < vector_size; j += totThreads) {
for (int t = 0; t < time_step; t++) {
T scaled_grad = g[j]/grad_scale;
m[j] = b1*m[j] + (1.f-b1)*scaled_grad;
v[j] = b2*v[j] + (1.f-b2)*scaled_grad*scaled_grad;
float m_corrected = m[j] / (1.f-powf(b1, t));
float v_corrected = v[j] / (1.f-powf(b2, t));
float denom;
if (mode == ADAM_MODE_0)
denom = sqrtf(v_corrected + eps);
else // Mode 1
denom = sqrtf(v_corrected) + eps;
float update = (m_corrected/denom) + (decay*p[j]);
p[j] -= (step_size*update);
}
}
}
int main(int argc, char* argv[])
{
if (argc != 4) {
printf("Usage: %s <vector size> <number of time steps> <repeat>\n", argv[0]);
return 1;
}
const int vector_size = atoi(argv[1]);
const int time_step = atoi(argv[2]);
const int repeat = atoi(argv[3]);
size_t size_bytes = vector_size * sizeof(float);
float *m = (float*) malloc (size_bytes);
float *v = (float*) malloc (size_bytes);
float *g = (float*) malloc (size_bytes);
float *p = (float*) malloc (size_bytes);
float *r = (float*) malloc (size_bytes);
srand(123);
for (int i = 0; i < vector_size; i++) {
m[i] = rand() / (float)RAND_MAX;
v[i] = rand() / (float)RAND_MAX;
g[i] = rand() / (float)RAND_MAX;
r[i] = p[i] = rand() / (float)RAND_MAX;
}
float *d_m, *d_v, *d_g, *d_p;
hipMalloc((void**)&d_m, size_bytes);
hipMemcpy(d_m, m, size_bytes, hipMemcpyHostToDevice);
hipMalloc((void**)&d_v, size_bytes);
hipMemcpy(d_v, v, size_bytes, hipMemcpyHostToDevice);
hipMalloc((void**)&d_g, size_bytes);
hipMemcpy(d_g, g, size_bytes, hipMemcpyHostToDevice);
hipMalloc((void**)&d_p, size_bytes);
hipMemcpy(d_p, p, size_bytes, hipMemcpyHostToDevice);
// Arbitrary constants
const float step_size = 1e-3f;
const float decay = 0.5f;
const float beta1 = 0.9f;
const float beta2 = 0.999f;
const float eps = 1e-8f;
const float grad_scale = 256.f;
const int threadsPerBlock = 256;
const dim3 grids ((vector_size+threadsPerBlock-1) / threadsPerBlock);
const dim3 blocks (threadsPerBlock);
adamMode_t mode = ADAM_MODE_0;
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++) {
hipLaunchKernelGGL(( adam<float, float>), dim3(grids), dim3(blocks), 0, 0,
d_p, d_m, d_v, d_g,
beta1, beta2,
eps,
grad_scale,
step_size,
time_step,
vector_size,
mode,
decay);
}
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average kernel execution time %f (ms)\n", time * 1e-6f / repeat);
hipMemcpy(p, d_p, size_bytes, hipMemcpyDeviceToHost);
hipFree(d_p);
hipFree(d_m);
hipFree(d_v);
hipFree(d_g);
// verify
reference<float, float>(
repeat,
r, m, v, g,
beta1, beta2,
eps,
grad_scale,
step_size,
time_step,
vector_size,
mode,
decay);
bool ok = true;
for (int i = 0; i < vector_size; i++) {
if (r[i] - p[i] > 1e-3f) {
ok = false;
break;
}
}
printf("%s\n", ok ? "PASS" : "FAIL");
free(p);
free(m);
free(v);
free(g);
free(r);
return 0;
}
| 32b596f981fe4aa91ad21ff7f5478aa08f74569d.cu | #include <stdio.h>
#include <stdlib.h>
#include <chrono>
#include <math.h>
#include <hip/hip_runtime.h>
#include "reference.h"
template <typename T, typename G>
__global__
void adam (
T* __restrict__ p,
T* __restrict__ m,
T* __restrict__ v,
const G* __restrict__ g,
const float b1,
const float b2,
const float eps,
const float grad_scale,
const float step_size,
const int time_step,
const size_t vector_size,
adamMode_t mode,
const float decay)
{
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
const size_t totThreads = gridDim.x*blockDim.x;
for (size_t j = i; j < vector_size; j += totThreads) {
for (int t = 0; t < time_step; t++) {
T scaled_grad = g[j]/grad_scale;
m[j] = b1*m[j] + (1.f-b1)*scaled_grad;
v[j] = b2*v[j] + (1.f-b2)*scaled_grad*scaled_grad;
float m_corrected = m[j] / (1.f-powf(b1, t));
float v_corrected = v[j] / (1.f-powf(b2, t));
float denom;
if (mode == ADAM_MODE_0)
denom = sqrtf(v_corrected + eps);
else // Mode 1
denom = sqrtf(v_corrected) + eps;
float update = (m_corrected/denom) + (decay*p[j]);
p[j] -= (step_size*update);
}
}
}
int main(int argc, char* argv[])
{
if (argc != 4) {
printf("Usage: %s <vector size> <number of time steps> <repeat>\n", argv[0]);
return 1;
}
const int vector_size = atoi(argv[1]);
const int time_step = atoi(argv[2]);
const int repeat = atoi(argv[3]);
size_t size_bytes = vector_size * sizeof(float);
float *m = (float*) malloc (size_bytes);
float *v = (float*) malloc (size_bytes);
float *g = (float*) malloc (size_bytes);
float *p = (float*) malloc (size_bytes);
float *r = (float*) malloc (size_bytes);
srand(123);
for (int i = 0; i < vector_size; i++) {
m[i] = rand() / (float)RAND_MAX;
v[i] = rand() / (float)RAND_MAX;
g[i] = rand() / (float)RAND_MAX;
r[i] = p[i] = rand() / (float)RAND_MAX;
}
float *d_m, *d_v, *d_g, *d_p;
hipMalloc((void**)&d_m, size_bytes);
hipMemcpy(d_m, m, size_bytes, hipMemcpyHostToDevice);
hipMalloc((void**)&d_v, size_bytes);
hipMemcpy(d_v, v, size_bytes, hipMemcpyHostToDevice);
hipMalloc((void**)&d_g, size_bytes);
hipMemcpy(d_g, g, size_bytes, hipMemcpyHostToDevice);
hipMalloc((void**)&d_p, size_bytes);
hipMemcpy(d_p, p, size_bytes, hipMemcpyHostToDevice);
// Arbitrary constants
const float step_size = 1e-3f;
const float decay = 0.5f;
const float beta1 = 0.9f;
const float beta2 = 0.999f;
const float eps = 1e-8f;
const float grad_scale = 256.f;
const int threadsPerBlock = 256;
const dim3 grids ((vector_size+threadsPerBlock-1) / threadsPerBlock);
const dim3 blocks (threadsPerBlock);
adamMode_t mode = ADAM_MODE_0;
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++) {
adam<float, float><<<grids, blocks>>> (
d_p, d_m, d_v, d_g,
beta1, beta2,
eps,
grad_scale,
step_size,
time_step,
vector_size,
mode,
decay);
}
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average kernel execution time %f (ms)\n", time * 1e-6f / repeat);
hipMemcpy(p, d_p, size_bytes, hipMemcpyDeviceToHost);
hipFree(d_p);
hipFree(d_m);
hipFree(d_v);
hipFree(d_g);
// verify
reference<float, float>(
repeat,
r, m, v, g,
beta1, beta2,
eps,
grad_scale,
step_size,
time_step,
vector_size,
mode,
decay);
bool ok = true;
for (int i = 0; i < vector_size; i++) {
if (r[i] - p[i] > 1e-3f) {
ok = false;
break;
}
}
printf("%s\n", ok ? "PASS" : "FAIL");
free(p);
free(m);
free(v);
free(g);
free(r);
return 0;
}
|
60ca48d49dac5471f2ae32cc3f4cf5c9f61101d9.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <book.h>
#include <gpu_anim.h>
#define DIM 1024
#define MAX_TEMP 1.0f
#define MIN_TEMP 0.0001f
#define SPEED 0.25f
// these exist on the GPU side
texture<float> texConstSrc;
texture<float> texIn;
texture<float> texOut;
// this kernel takes in a 2-d array of floats
// it updates the value-of-interest by a scaled value based
// on itself and its nearest neighbors
__global__
void blend_kernel(float *dst, bool dstOut){
// map from threadIdx/BlockIdx to pixel position
int x=threadIdx.x+blockIdx.x*blockDim.x;
int y=threadIdx.y+blockIdx.y*blockDim.y;
int offset=x+y*blockDim.x*gridDim.x;
int left=offset-1;
int right=offset+1;
if(x==0) left++;
if(x==DIM-1) right--;
int top=offset-DIM;
int bottom=offset+DIM;
if(y==0) top+=DIM;
if(y==DIM-1) bottom-=DIM;
float t, l, c, r, b;
if(dstOut){
t=tex1Dfetch(texIn, top);
l=tex1Dfetch(texIn, left);
c=tex1Dfetch(texIn, offset);
r=tex1Dfetch(texIn, right);
b=tex1Dfetch(texIn, bottom);
}else{
t=tex1Dfetch(texOut, top);
l=tex1Dfetch(texOut, left);
c=tex1Dfetch(texOut, offset);
r=tex1Dfetch(texOut, right);
b=tex1Dfetch(texOut, bottom);
}
dst[offset]=c+SPEED*(t+b+r+l-4*c);
}
// NOTE - texOffsetConstSrc could either be passed as a
// parameter to this function, or passed in __constant__ memory
// if we declared it as a global above, it would be
// a parameter here:
// __global__ void copy_const_kernel(float *iptr,
//size_t texOffset)
__global__
void copy_const_kernel(float *iptr){
// map from threadIdx/BlockIdx to pixel position
int x=threadIdx.x+blockIdx.x*blockDim.x;
int y=threadIdx.y+blockIdx.y*blockDim.y;
int offset=x+y*blockDim.x*gridDim.x;
float c=tex1Dfetch(texConstSrc, offset);
if(c!=0)
iptr[offset]=c;
}
// globals needed by the update routine
struct DataBlock {
float *dev_inSrc;
float *dev_outSrc;
float *dev_constSrc;
hipEvent_t start, stop;
float totalTime;
float frames;
};
void anim_gpu(uchar4* outputBitmap, DataBlock *d, int ticks) {
HANDLE_ERROR(hipEventRecord(d->start, 0));
int b=(DIM+15)/16;
dim3 blocks(b, b);
dim3 threads(16,16);
// since tex is global and bound, we have to use a flag to
// select which is in/out per iteration
volatile bool dstOut=true;
for(int i=0; i<90; i++){
float *in, *out;
if(dstOut){
in=d->dev_inSrc;
out=d->dev_outSrc;
}else{
out=d->dev_inSrc;
in =d->dev_outSrc;
}
hipLaunchKernelGGL(( copy_const_kernel), dim3(blocks),dim3(threads), 0, 0, in);
hipLaunchKernelGGL(( blend_kernel), dim3(blocks),dim3(threads), 0, 0, out, dstOut);
dstOut=!dstOut;
}
hipLaunchKernelGGL(( float_to_color), dim3(blocks),dim3(threads), 0, 0, outputBitmap, d->dev_inSrc);
HANDLE_ERROR(hipEventRecord(d->stop, 0));
HANDLE_ERROR(hipEventSynchronize(d->stop));
float elapsedTime;
HANDLE_ERROR(hipEventElapsedTime(&elapsedTime, d->start, d->stop));
d->totalTime+=elapsedTime;
++d->frames;
printf("Average Time per frame: %3.1f ms\n", d->totalTime/d->frames);
}
// clean up memory allocated on the GPU
void anim_exit(DataBlock *d) {
HANDLE_ERROR(hipUnbindTexture(texIn));
HANDLE_ERROR(hipUnbindTexture(texOut));
HANDLE_ERROR(hipUnbindTexture(texConstSrc));
HANDLE_ERROR(hipFree(d->dev_inSrc));
HANDLE_ERROR(hipFree(d->dev_outSrc));
HANDLE_ERROR(hipFree(d->dev_constSrc));
HANDLE_ERROR(hipEventDestroy(d->start));
HANDLE_ERROR(hipEventDestroy(d->stop));
}
int main(){
DataBlock data;
GPUAnimBitmap bitmap(DIM, DIM, &data);
data.totalTime=0;
data.frames=0;
HANDLE_ERROR(hipEventCreate(&data.start));
HANDLE_ERROR(hipEventCreate(&data.stop));
int imageSize=bitmap.image_size();
// assume float == 4 chars in size (ie rgba)
HANDLE_ERROR(hipMalloc((void**)&data.dev_inSrc, imageSize));
HANDLE_ERROR(hipMalloc((void**)&data.dev_outSrc, imageSize));
HANDLE_ERROR(hipMalloc((void**)&data.dev_constSrc, imageSize));
HANDLE_ERROR(hipBindTexture(NULL, texConstSrc, data.dev_constSrc, imageSize));
HANDLE_ERROR(hipBindTexture(NULL, texIn, data.dev_inSrc, imageSize));
HANDLE_ERROR(hipBindTexture(NULL, texOut, data.dev_outSrc, imageSize));
// intialize the constant data
float *temp=new float[imageSize];
for(int i=0; i<DIM*DIM; i++){
temp[i]=0;
int x=i%DIM;
int y=i/DIM;
if((x>300) && (x<600) && (y>310) && (y<601)){
temp[i]=MAX_TEMP;
}
}
temp[DIM*100+100]=(MAX_TEMP+MIN_TEMP)/2;
temp[DIM*700+100]=MIN_TEMP;
temp[DIM*300+300]=MIN_TEMP;
temp[DIM*200+700]=MIN_TEMP;
for(int y=800; y<900; y++){
for(int x=400; x<500; x++){
temp[x+y*DIM]=MIN_TEMP;
}
}
HANDLE_ERROR(hipMemcpy(data.dev_constSrc, temp, imageSize, hipMemcpyHostToDevice));
// initialize the input data
for(int y=800; y<DIM; y++){
for(int x=0; x<200; x++){
temp[x+y*DIM]=MAX_TEMP;
}
}
HANDLE_ERROR(hipMemcpy(data.dev_inSrc, temp, imageSize, hipMemcpyHostToDevice));
free(temp);
bitmap.anim_and_exit((void (*)(uchar4*,void*,int))anim_gpu, (void (*)(void*))anim_exit);
}
| 60ca48d49dac5471f2ae32cc3f4cf5c9f61101d9.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <book.h>
#include <gpu_anim.h>
#define DIM 1024
#define MAX_TEMP 1.0f
#define MIN_TEMP 0.0001f
#define SPEED 0.25f
// these exist on the GPU side
texture<float> texConstSrc;
texture<float> texIn;
texture<float> texOut;
// this kernel takes in a 2-d array of floats
// it updates the value-of-interest by a scaled value based
// on itself and its nearest neighbors
__global__
void blend_kernel(float *dst, bool dstOut){
// map from threadIdx/BlockIdx to pixel position
int x=threadIdx.x+blockIdx.x*blockDim.x;
int y=threadIdx.y+blockIdx.y*blockDim.y;
int offset=x+y*blockDim.x*gridDim.x;
int left=offset-1;
int right=offset+1;
if(x==0) left++;
if(x==DIM-1) right--;
int top=offset-DIM;
int bottom=offset+DIM;
if(y==0) top+=DIM;
if(y==DIM-1) bottom-=DIM;
float t, l, c, r, b;
if(dstOut){
t=tex1Dfetch(texIn, top);
l=tex1Dfetch(texIn, left);
c=tex1Dfetch(texIn, offset);
r=tex1Dfetch(texIn, right);
b=tex1Dfetch(texIn, bottom);
}else{
t=tex1Dfetch(texOut, top);
l=tex1Dfetch(texOut, left);
c=tex1Dfetch(texOut, offset);
r=tex1Dfetch(texOut, right);
b=tex1Dfetch(texOut, bottom);
}
dst[offset]=c+SPEED*(t+b+r+l-4*c);
}
// NOTE - texOffsetConstSrc could either be passed as a
// parameter to this function, or passed in __constant__ memory
// if we declared it as a global above, it would be
// a parameter here:
// __global__ void copy_const_kernel(float *iptr,
//size_t texOffset)
__global__
void copy_const_kernel(float *iptr){
// map from threadIdx/BlockIdx to pixel position
int x=threadIdx.x+blockIdx.x*blockDim.x;
int y=threadIdx.y+blockIdx.y*blockDim.y;
int offset=x+y*blockDim.x*gridDim.x;
float c=tex1Dfetch(texConstSrc, offset);
if(c!=0)
iptr[offset]=c;
}
// globals needed by the update routine
struct DataBlock {
float *dev_inSrc;
float *dev_outSrc;
float *dev_constSrc;
cudaEvent_t start, stop;
float totalTime;
float frames;
};
void anim_gpu(uchar4* outputBitmap, DataBlock *d, int ticks) {
HANDLE_ERROR(cudaEventRecord(d->start, 0));
int b=(DIM+15)/16;
dim3 blocks(b, b);
dim3 threads(16,16);
// since tex is global and bound, we have to use a flag to
// select which is in/out per iteration
volatile bool dstOut=true;
for(int i=0; i<90; i++){
float *in, *out;
if(dstOut){
in=d->dev_inSrc;
out=d->dev_outSrc;
}else{
out=d->dev_inSrc;
in =d->dev_outSrc;
}
copy_const_kernel<<<blocks,threads>>>(in);
blend_kernel<<<blocks,threads>>>(out, dstOut);
dstOut=!dstOut;
}
float_to_color<<<blocks,threads>>>(outputBitmap, d->dev_inSrc);
HANDLE_ERROR(cudaEventRecord(d->stop, 0));
HANDLE_ERROR(cudaEventSynchronize(d->stop));
float elapsedTime;
HANDLE_ERROR(cudaEventElapsedTime(&elapsedTime, d->start, d->stop));
d->totalTime+=elapsedTime;
++d->frames;
printf("Average Time per frame: %3.1f ms\n", d->totalTime/d->frames);
}
// clean up memory allocated on the GPU
void anim_exit(DataBlock *d) {
HANDLE_ERROR(cudaUnbindTexture(texIn));
HANDLE_ERROR(cudaUnbindTexture(texOut));
HANDLE_ERROR(cudaUnbindTexture(texConstSrc));
HANDLE_ERROR(cudaFree(d->dev_inSrc));
HANDLE_ERROR(cudaFree(d->dev_outSrc));
HANDLE_ERROR(cudaFree(d->dev_constSrc));
HANDLE_ERROR(cudaEventDestroy(d->start));
HANDLE_ERROR(cudaEventDestroy(d->stop));
}
int main(){
DataBlock data;
GPUAnimBitmap bitmap(DIM, DIM, &data);
data.totalTime=0;
data.frames=0;
HANDLE_ERROR(cudaEventCreate(&data.start));
HANDLE_ERROR(cudaEventCreate(&data.stop));
int imageSize=bitmap.image_size();
// assume float == 4 chars in size (ie rgba)
HANDLE_ERROR(cudaMalloc((void**)&data.dev_inSrc, imageSize));
HANDLE_ERROR(cudaMalloc((void**)&data.dev_outSrc, imageSize));
HANDLE_ERROR(cudaMalloc((void**)&data.dev_constSrc, imageSize));
HANDLE_ERROR(cudaBindTexture(NULL, texConstSrc, data.dev_constSrc, imageSize));
HANDLE_ERROR(cudaBindTexture(NULL, texIn, data.dev_inSrc, imageSize));
HANDLE_ERROR(cudaBindTexture(NULL, texOut, data.dev_outSrc, imageSize));
// intialize the constant data
float *temp=new float[imageSize];
for(int i=0; i<DIM*DIM; i++){
temp[i]=0;
int x=i%DIM;
int y=i/DIM;
if((x>300) && (x<600) && (y>310) && (y<601)){
temp[i]=MAX_TEMP;
}
}
temp[DIM*100+100]=(MAX_TEMP+MIN_TEMP)/2;
temp[DIM*700+100]=MIN_TEMP;
temp[DIM*300+300]=MIN_TEMP;
temp[DIM*200+700]=MIN_TEMP;
for(int y=800; y<900; y++){
for(int x=400; x<500; x++){
temp[x+y*DIM]=MIN_TEMP;
}
}
HANDLE_ERROR(cudaMemcpy(data.dev_constSrc, temp, imageSize, cudaMemcpyHostToDevice));
// initialize the input data
for(int y=800; y<DIM; y++){
for(int x=0; x<200; x++){
temp[x+y*DIM]=MAX_TEMP;
}
}
HANDLE_ERROR(cudaMemcpy(data.dev_inSrc, temp, imageSize, cudaMemcpyHostToDevice));
free(temp);
bitmap.anim_and_exit((void (*)(uchar4*,void*,int))anim_gpu, (void (*)(void*))anim_exit);
}
|
3aca91a50b3d82f7f9be7db85cec7e792415e5f6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/hip/Atomic.cuh>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/cub.cuh>
#include <ATen/TensorUtils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/native/hip/SortingCommon.cuh>
#include <ATen/AccumulateType.h>
#include <c10/macros/Macros.h>
namespace at {
namespace native {
namespace {
/* This code computes the sum of the weights in two-steps:
1) Each GPU warp sums `NROWS_PER_THREAD` number of row given by `indeces`
2) Each partial-sum from 1) are summed and scatter into `grad_weight`
Notice, `NROWS_PER_THREAD` impacts the Achieved Occupancy of the
kernel execution. If it is high, the size of the thread blocks will be
too small to achieve good occupancy. Similarly, a very low value will
make the size of the thread blocks in the final sum in step 2) too small.
*/
constexpr int NROWS_PER_THREAD = 10;
// Fast ceil division (no overflow checking)
__host__ __device__ __forceinline__
int64_t ceil_div(int64_t x, int64_t y) {
return (x + y - 1) / y;
}
template <typename index_t>
__global__
void krn_partials_per_segment(index_t *ret, const index_t *segment_offsets,
int64_t num_of_segments, int64_t numel) {
const int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < num_of_segments) {
const int64_t idx_start = segment_offsets[id];
const int64_t idx_end = (id == num_of_segments-1)?numel:segment_offsets[id+1];
const int64_t size = idx_end - idx_start;
ret[id] = ceil_div(size, NROWS_PER_THREAD);
}
}
template <typename index_t>
__global__
void krn_partial_segment_offset(
index_t *ret,
const index_t *partials_per_segment,
const index_t *partials_per_segment_offset,
const index_t *segment_offsets,
int64_t num_of_segments) {
const int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < num_of_segments) {
index_t idx = partials_per_segment_offset[id];
const index_t num_partials = partials_per_segment[id];
const index_t segment_offset = segment_offsets[id];
for (int64_t i=0; i<num_partials; ++i) {
ret[idx++] = segment_offset + i * NROWS_PER_THREAD;
}
}
}
template <typename scalar_t, typename index_t>
__global__ void compute_grad_weight_bags(
index_t *indices, scalar_t *gradOutput,
index_t *offset2bag, index_t *count, ptrdiff_t numel,
int64_t stride, int mode_mean, const index_t *bag_size,
scalar_t* per_sample_weights, int64_t per_sample_weights_stride,
index_t* segment_offsets, int64_t num_of_segments,
acc_type<scalar_t, true> *grad_weight_per_segment,
const int64_t stride_warped) {
const int gid = blockIdx.x * blockDim.x + threadIdx.x;
const int id = gid / stride_warped;
const int startFeature = gid % stride_warped;
if (startFeature >= stride) {
return;
}
if (id >= num_of_segments) {
return;
}
const int idx_begin = segment_offsets[id];
const int idx_end = (id == num_of_segments-1)?numel:segment_offsets[id+1];
acc_type<scalar_t, true> weight = 0;
for (int idx=idx_begin; idx < idx_end; ++idx) {
const int origRow = indices[idx];
const int seq_number = offset2bag[origRow];
const int gradOutputRow = seq_number * stride;
acc_type<scalar_t, true> scale = count ? 1.0 / count[idx] : 1.0;
if (per_sample_weights) {
scale *= per_sample_weights[origRow * per_sample_weights_stride];
}
acc_type<scalar_t, true> gradient = gradOutput[gradOutputRow + startFeature];
if (mode_mean) {
gradient /= bag_size[seq_number];
}
weight += gradient * scale;
}
grad_weight_per_segment[id * stride + startFeature] = weight;
}
template <typename scalar_t, typename index_t>
__global__ void compute_grad_weight(
index_t *indices,
scalar_t *gradOutput,
index_t *count,
ptrdiff_t numel,
int64_t stride,
index_t* segment_offsets,
int64_t num_of_segments,
acc_type<scalar_t, true> *grad_weight_per_segment,
const int64_t stride_warped) {
using accscalar_t = acc_type<scalar_t, true>;
const int gid = blockIdx.x * blockDim.x + threadIdx.x;
const int id = gid / stride_warped;
const int startFeature = gid % stride_warped;
if (startFeature >= stride) {
return;
}
if (id >= num_of_segments) {
return;
}
const int idx_begin = segment_offsets[id];
const int idx_end = (id == num_of_segments-1)?numel:segment_offsets[id+1];
accscalar_t weight = 0;
for (int idx=idx_begin; idx < idx_end; ++idx) {
const index_t target_row = indices[idx];
const accscalar_t scale = count ? (accscalar_t)1.0 / count[idx] : 1.0;
weight += gradOutput[target_row * stride + startFeature] * scale;
}
grad_weight_per_segment[id * stride + startFeature] = weight;
}
// This kernel assumes that all input tensors are contiguous.
template <typename scalar_t, typename index_t>
__global__ void sum_and_scatter(
index_t *input, scalar_t *gradWeight, int64_t stride,
index_t* segment_offsets, int64_t num_of_segments,
const acc_type<scalar_t, true> *grad_weight_per_segment,
const index_t *segment_sizes_offsets, int64_t num_of_partial_segments,
const int64_t padding_idx,
const int64_t stride_warped) {
const int gid = blockIdx.x * blockDim.x + threadIdx.x;
const int id = gid / stride_warped;
const int startFeature = gid % stride_warped;
if (startFeature >= stride) {
return;
}
if (id >= num_of_segments) {
return;
}
const int idx_begin = segment_sizes_offsets[id];
const int idx_end = (id == num_of_segments-1)?num_of_partial_segments:segment_sizes_offsets[id+1];
acc_type<scalar_t, true> weight = 0;
for (int idx=idx_begin; idx < idx_end; ++idx) {
weight += grad_weight_per_segment[idx*stride + startFeature];
}
int64_t target_row = input[segment_offsets[id]];
if (target_row != padding_idx) {
gradWeight[target_row * stride + startFeature] = weight;
}
}
} // anon namespace
template<typename index_t>
int64_t embedding_backward_cuda_kernel_unique_by_key(const Tensor &sorted_indices, Tensor &segment_offsets);
Tensor embedding_backward_cuda_kernel(
const Tensor &grad,
const Tensor &orig_indices,
const Tensor &sorted_indices,
const Tensor &count,
int64_t num_weights,
int padding_idx,
bool mode_mean,
const Tensor &offset2bag,
const Tensor &bag_size,
const Tensor &per_sample_weights) {
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
const ptrdiff_t numel = sorted_indices.numel();
auto grad_weight = at::zeros({num_weights, grad.size(-1)}, grad.options());
const int64_t stride = grad_weight.stride(0);
// Compute the number of segments and their start position so that we do not have to
// spawn a warp per index. In this context, a segment is a number of rows that should
// be summarized.
// Unit: index in `sorted_indices` and `orig_indices`
AT_DISPATCH_INDEX_TYPES(orig_indices.scalar_type(), "embedding_backward_cuda_kernel", [&] () {
auto segment_offsets = at::empty({numel}, orig_indices.options());
int64_t num_of_segments = embedding_backward_cuda_kernel_unique_by_key<index_t>(sorted_indices, segment_offsets);
// We split the segments up into sizes of `NROWS_PER_THREAD`
// Compute the number partial-segments per segment (some partial-segments
// may not be the full `NROWS_PER_THREAD` number of rows)
auto partials_per_segment = at::empty({num_of_segments}, orig_indices.options());
{
hipLaunchKernelGGL(( krn_partials_per_segment), dim3(ceil_div(num_of_segments, 32)), dim3(32), 0, stream,
partials_per_segment.data_ptr<index_t>(),
segment_offsets.data_ptr<index_t>(),
num_of_segments,
numel);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
// In order to compute `partial_segment_offset`, which is the start index
// of each partial-segment in `sorted_indices`, we need to compute the
// start position of each _segment_ in `partial_segment_offset`.
// Unit: index in `partial_segment_offset`
auto partials_per_segment_offset = at::empty({num_of_segments}, orig_indices.options());
cuda::cub::exclusive_scan(
partials_per_segment.data_ptr<index_t>(),
partials_per_segment_offset.data_ptr<index_t>(),
hipcub::Sum(),
index_t(0),
num_of_segments);
// The total number of partial-segments is the sum of `partials_per_segment_offset`
const int num_of_partial_segments = partials_per_segment[num_of_segments-1].item<index_t>() +
partials_per_segment_offset[num_of_segments-1].item<index_t>();
// Now we can compute the start position of each partial-segment
// Unit: index in `sorted_indices` and `orig_indices`
auto partial_segment_offset = at::empty({num_of_partial_segments}, orig_indices.options());
{
hipLaunchKernelGGL(( krn_partial_segment_offset), dim3(ceil_div(num_of_segments, 32)), dim3(32), 0, stream,
partial_segment_offset.data_ptr<index_t>(),
partials_per_segment.data_ptr<index_t>(),
partials_per_segment_offset.data_ptr<index_t>(),
segment_offsets.data_ptr<index_t>(),
num_of_segments);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
const int stride_warped = ceil_div(stride, C10_WARP_SIZE)*C10_WARP_SIZE;
const int block = ::min(stride_warped, MAX_BLOCK_SIZE);
const int grid = ceil_div(num_of_partial_segments*stride_warped, block);
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16,
grad.scalar_type(), "embedding_bag_backward_cuda_compute_grad_weight", [&] {
// For numerical stability, the dtype of `grad_weight_per_segment`
// should match `acc_type`
using partial_weight_t = acc_type<scalar_t, true>;
TensorOptions op;
if(grad.dtype() == at::kHalf || grad.dtype() == at::kBFloat16) {
op = grad.options().dtype(at::kFloat);
} else {
op = grad.options();
}
auto grad_weight_per_segment = at::empty({num_of_partial_segments, stride}, op);
// Compute the sum of each partial-segment and handle bags
if (offset2bag.defined()) {
hipLaunchKernelGGL(( compute_grad_weight_bags<scalar_t>), dim3(grid), dim3(block), 0, stream,
orig_indices.data_ptr<index_t>(),
grad.data_ptr<scalar_t>(),
offset2bag.data_ptr<index_t>(),
count.defined() ? count.data_ptr<index_t>() : nullptr, numel, stride,
mode_mean, bag_size.data_ptr<index_t>(),
per_sample_weights.defined() ? per_sample_weights.data_ptr<scalar_t>() : NULL,
per_sample_weights.defined() ? per_sample_weights.stride(0) : 0,
partial_segment_offset.data_ptr<index_t>(),
num_of_partial_segments, grad_weight_per_segment.data_ptr<partial_weight_t>(),
stride_warped);
C10_HIP_KERNEL_LAUNCH_CHECK();
} else {
hipLaunchKernelGGL(( compute_grad_weight<scalar_t>), dim3(grid), dim3(block), 0, stream,
orig_indices.data_ptr<index_t>(),
grad.data_ptr<scalar_t>(),
count.defined() ? count.data_ptr<index_t>() : nullptr,
numel, stride,
partial_segment_offset.data_ptr<index_t>(),
num_of_partial_segments,
grad_weight_per_segment.data_ptr<partial_weight_t>(),
stride_warped);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
// Finally, we sum all the partial-sums and scatter them
// into `grad_weight`.
const int grid2 = ceil_div(num_of_segments*stride_warped, block);
hipLaunchKernelGGL(( sum_and_scatter<scalar_t>), dim3(grid2), dim3(block), 0, stream,
sorted_indices.data_ptr<index_t>(),
grad_weight.data_ptr<scalar_t>(),
stride,
segment_offsets.data_ptr<index_t>(),
num_of_segments, grad_weight_per_segment.data_ptr<partial_weight_t>(),
partials_per_segment_offset.data_ptr<index_t>(),
num_of_partial_segments,
padding_idx,
stride_warped);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
});
return grad_weight;
}
}}
| 3aca91a50b3d82f7f9be7db85cec7e792415e5f6.cu | #include <ATen/ATen.h>
#include <ATen/cuda/Atomic.cuh>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/cub.cuh>
#include <ATen/TensorUtils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/native/cuda/SortingCommon.cuh>
#include <ATen/AccumulateType.h>
#include <c10/macros/Macros.h>
namespace at {
namespace native {
namespace {
/* This code computes the sum of the weights in two-steps:
1) Each GPU warp sums `NROWS_PER_THREAD` number of row given by `indeces`
2) Each partial-sum from 1) are summed and scatter into `grad_weight`
Notice, `NROWS_PER_THREAD` impacts the Achieved Occupancy of the
kernel execution. If it is high, the size of the thread blocks will be
too small to achieve good occupancy. Similarly, a very low value will
make the size of the thread blocks in the final sum in step 2) too small.
*/
constexpr int NROWS_PER_THREAD = 10;
// Fast ceil division (no overflow checking)
__host__ __device__ __forceinline__
int64_t ceil_div(int64_t x, int64_t y) {
return (x + y - 1) / y;
}
template <typename index_t>
__global__
void krn_partials_per_segment(index_t *ret, const index_t *segment_offsets,
int64_t num_of_segments, int64_t numel) {
const int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < num_of_segments) {
const int64_t idx_start = segment_offsets[id];
const int64_t idx_end = (id == num_of_segments-1)?numel:segment_offsets[id+1];
const int64_t size = idx_end - idx_start;
ret[id] = ceil_div(size, NROWS_PER_THREAD);
}
}
template <typename index_t>
__global__
void krn_partial_segment_offset(
index_t *ret,
const index_t *partials_per_segment,
const index_t *partials_per_segment_offset,
const index_t *segment_offsets,
int64_t num_of_segments) {
const int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < num_of_segments) {
index_t idx = partials_per_segment_offset[id];
const index_t num_partials = partials_per_segment[id];
const index_t segment_offset = segment_offsets[id];
for (int64_t i=0; i<num_partials; ++i) {
ret[idx++] = segment_offset + i * NROWS_PER_THREAD;
}
}
}
template <typename scalar_t, typename index_t>
__global__ void compute_grad_weight_bags(
index_t *indices, scalar_t *gradOutput,
index_t *offset2bag, index_t *count, ptrdiff_t numel,
int64_t stride, int mode_mean, const index_t *bag_size,
scalar_t* per_sample_weights, int64_t per_sample_weights_stride,
index_t* segment_offsets, int64_t num_of_segments,
acc_type<scalar_t, true> *grad_weight_per_segment,
const int64_t stride_warped) {
const int gid = blockIdx.x * blockDim.x + threadIdx.x;
const int id = gid / stride_warped;
const int startFeature = gid % stride_warped;
if (startFeature >= stride) {
return;
}
if (id >= num_of_segments) {
return;
}
const int idx_begin = segment_offsets[id];
const int idx_end = (id == num_of_segments-1)?numel:segment_offsets[id+1];
acc_type<scalar_t, true> weight = 0;
for (int idx=idx_begin; idx < idx_end; ++idx) {
const int origRow = indices[idx];
const int seq_number = offset2bag[origRow];
const int gradOutputRow = seq_number * stride;
acc_type<scalar_t, true> scale = count ? 1.0 / count[idx] : 1.0;
if (per_sample_weights) {
scale *= per_sample_weights[origRow * per_sample_weights_stride];
}
acc_type<scalar_t, true> gradient = gradOutput[gradOutputRow + startFeature];
if (mode_mean) {
gradient /= bag_size[seq_number];
}
weight += gradient * scale;
}
grad_weight_per_segment[id * stride + startFeature] = weight;
}
template <typename scalar_t, typename index_t>
__global__ void compute_grad_weight(
index_t *indices,
scalar_t *gradOutput,
index_t *count,
ptrdiff_t numel,
int64_t stride,
index_t* segment_offsets,
int64_t num_of_segments,
acc_type<scalar_t, true> *grad_weight_per_segment,
const int64_t stride_warped) {
using accscalar_t = acc_type<scalar_t, true>;
const int gid = blockIdx.x * blockDim.x + threadIdx.x;
const int id = gid / stride_warped;
const int startFeature = gid % stride_warped;
if (startFeature >= stride) {
return;
}
if (id >= num_of_segments) {
return;
}
const int idx_begin = segment_offsets[id];
const int idx_end = (id == num_of_segments-1)?numel:segment_offsets[id+1];
accscalar_t weight = 0;
for (int idx=idx_begin; idx < idx_end; ++idx) {
const index_t target_row = indices[idx];
const accscalar_t scale = count ? (accscalar_t)1.0 / count[idx] : 1.0;
weight += gradOutput[target_row * stride + startFeature] * scale;
}
grad_weight_per_segment[id * stride + startFeature] = weight;
}
// This kernel assumes that all input tensors are contiguous.
template <typename scalar_t, typename index_t>
__global__ void sum_and_scatter(
index_t *input, scalar_t *gradWeight, int64_t stride,
index_t* segment_offsets, int64_t num_of_segments,
const acc_type<scalar_t, true> *grad_weight_per_segment,
const index_t *segment_sizes_offsets, int64_t num_of_partial_segments,
const int64_t padding_idx,
const int64_t stride_warped) {
const int gid = blockIdx.x * blockDim.x + threadIdx.x;
const int id = gid / stride_warped;
const int startFeature = gid % stride_warped;
if (startFeature >= stride) {
return;
}
if (id >= num_of_segments) {
return;
}
const int idx_begin = segment_sizes_offsets[id];
const int idx_end = (id == num_of_segments-1)?num_of_partial_segments:segment_sizes_offsets[id+1];
acc_type<scalar_t, true> weight = 0;
for (int idx=idx_begin; idx < idx_end; ++idx) {
weight += grad_weight_per_segment[idx*stride + startFeature];
}
int64_t target_row = input[segment_offsets[id]];
if (target_row != padding_idx) {
gradWeight[target_row * stride + startFeature] = weight;
}
}
} // anon namespace
template<typename index_t>
int64_t embedding_backward_cuda_kernel_unique_by_key(const Tensor &sorted_indices, Tensor &segment_offsets);
Tensor embedding_backward_cuda_kernel(
const Tensor &grad,
const Tensor &orig_indices,
const Tensor &sorted_indices,
const Tensor &count,
int64_t num_weights,
int padding_idx,
bool mode_mean,
const Tensor &offset2bag,
const Tensor &bag_size,
const Tensor &per_sample_weights) {
auto stream = at::cuda::getCurrentCUDAStream();
const ptrdiff_t numel = sorted_indices.numel();
auto grad_weight = at::zeros({num_weights, grad.size(-1)}, grad.options());
const int64_t stride = grad_weight.stride(0);
// Compute the number of segments and their start position so that we do not have to
// spawn a warp per index. In this context, a segment is a number of rows that should
// be summarized.
// Unit: index in `sorted_indices` and `orig_indices`
AT_DISPATCH_INDEX_TYPES(orig_indices.scalar_type(), "embedding_backward_cuda_kernel", [&] () {
auto segment_offsets = at::empty({numel}, orig_indices.options());
int64_t num_of_segments = embedding_backward_cuda_kernel_unique_by_key<index_t>(sorted_indices, segment_offsets);
// We split the segments up into sizes of `NROWS_PER_THREAD`
// Compute the number partial-segments per segment (some partial-segments
// may not be the full `NROWS_PER_THREAD` number of rows)
auto partials_per_segment = at::empty({num_of_segments}, orig_indices.options());
{
krn_partials_per_segment<<<ceil_div(num_of_segments, 32), 32, 0, stream>>> (
partials_per_segment.data_ptr<index_t>(),
segment_offsets.data_ptr<index_t>(),
num_of_segments,
numel);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
// In order to compute `partial_segment_offset`, which is the start index
// of each partial-segment in `sorted_indices`, we need to compute the
// start position of each _segment_ in `partial_segment_offset`.
// Unit: index in `partial_segment_offset`
auto partials_per_segment_offset = at::empty({num_of_segments}, orig_indices.options());
cuda::cub::exclusive_scan(
partials_per_segment.data_ptr<index_t>(),
partials_per_segment_offset.data_ptr<index_t>(),
cub::Sum(),
index_t(0),
num_of_segments);
// The total number of partial-segments is the sum of `partials_per_segment_offset`
const int num_of_partial_segments = partials_per_segment[num_of_segments-1].item<index_t>() +
partials_per_segment_offset[num_of_segments-1].item<index_t>();
// Now we can compute the start position of each partial-segment
// Unit: index in `sorted_indices` and `orig_indices`
auto partial_segment_offset = at::empty({num_of_partial_segments}, orig_indices.options());
{
krn_partial_segment_offset<<<ceil_div(num_of_segments, 32), 32, 0, stream>>> (
partial_segment_offset.data_ptr<index_t>(),
partials_per_segment.data_ptr<index_t>(),
partials_per_segment_offset.data_ptr<index_t>(),
segment_offsets.data_ptr<index_t>(),
num_of_segments);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
const int stride_warped = ceil_div(stride, C10_WARP_SIZE)*C10_WARP_SIZE;
const int block = std::min(stride_warped, MAX_BLOCK_SIZE);
const int grid = ceil_div(num_of_partial_segments*stride_warped, block);
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16,
grad.scalar_type(), "embedding_bag_backward_cuda_compute_grad_weight", [&] {
// For numerical stability, the dtype of `grad_weight_per_segment`
// should match `acc_type`
using partial_weight_t = acc_type<scalar_t, true>;
TensorOptions op;
if(grad.dtype() == at::kHalf || grad.dtype() == at::kBFloat16) {
op = grad.options().dtype(at::kFloat);
} else {
op = grad.options();
}
auto grad_weight_per_segment = at::empty({num_of_partial_segments, stride}, op);
// Compute the sum of each partial-segment and handle bags
if (offset2bag.defined()) {
compute_grad_weight_bags<scalar_t><<<grid, block, 0, stream>>>(
orig_indices.data_ptr<index_t>(),
grad.data_ptr<scalar_t>(),
offset2bag.data_ptr<index_t>(),
count.defined() ? count.data_ptr<index_t>() : nullptr, numel, stride,
mode_mean, bag_size.data_ptr<index_t>(),
per_sample_weights.defined() ? per_sample_weights.data_ptr<scalar_t>() : NULL,
per_sample_weights.defined() ? per_sample_weights.stride(0) : 0,
partial_segment_offset.data_ptr<index_t>(),
num_of_partial_segments, grad_weight_per_segment.data_ptr<partial_weight_t>(),
stride_warped);
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else {
compute_grad_weight<scalar_t><<<grid, block, 0, stream>>>(
orig_indices.data_ptr<index_t>(),
grad.data_ptr<scalar_t>(),
count.defined() ? count.data_ptr<index_t>() : nullptr,
numel, stride,
partial_segment_offset.data_ptr<index_t>(),
num_of_partial_segments,
grad_weight_per_segment.data_ptr<partial_weight_t>(),
stride_warped);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
// Finally, we sum all the partial-sums and scatter them
// into `grad_weight`.
const int grid2 = ceil_div(num_of_segments*stride_warped, block);
sum_and_scatter<scalar_t><<<grid2, block, 0, stream>>>(
sorted_indices.data_ptr<index_t>(),
grad_weight.data_ptr<scalar_t>(),
stride,
segment_offsets.data_ptr<index_t>(),
num_of_segments, grad_weight_per_segment.data_ptr<partial_weight_t>(),
partials_per_segment_offset.data_ptr<index_t>(),
num_of_partial_segments,
padding_idx,
stride_warped);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
});
return grad_weight;
}
}}
|
1faa1da66fc83d89e8a5787512287a49d7802161.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2016 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include "../debug.h"
#define N ( 2048 * 2048 )
#define RADIUS 7
#define THREADS_PER_BLOCK 512
__global__ void stencil_1d(int n, double *in, double *out)
{
/* calculate global index in the array */
/* insert code to calculate global index in the array using block
and thread built-in variables */
int globalIndex = N + THREADS_PER_BLOCK;
/* return if my global index is larger than the array size */
if( globalIndex >= n ) return;
/* code to handle the boundary conditions */
if( globalIndex < RADIUS || globalIndex >= (n - RADIUS) )
{
out[globalIndex] = (double) globalIndex * ( (double)RADIUS*2 + 1) ;
return;
} /* end if */
double result = 0.0;
for( int i = globalIndex-(RADIUS); i <= globalIndex+(RADIUS); i++ )
{
/* add the required elements from the array "in" to the temporary
variable "result */
result += in[globalIndex];
}
out[globalIndex] = result;
return;
}
int main()
{
/* get GPU device number and name */
int dev;
hipDeviceProp_t deviceProp;
checkCUDA( hipGetDevice( &dev ) );
checkCUDA( hipGetDeviceProperties( &deviceProp, dev ) );
printf("Using GPU %d: %s\n", dev, deviceProp.name );
double *in, *out;
double *d_in, *d_out;
int size = N * sizeof( double );
/* allocate space for device copies of in, out */
checkCUDA( hipMalloc( (void **) &d_in, size ) );
checkCUDA( hipMalloc( (void **) &d_out, size ) );
/* allocate space for host copies of in, out and setup input values */
in = (double *)malloc( size );
out = (double *)malloc( size );
for( int i = 0; i < N; i++ )
{
in[i] = (double) i;
out[i] = 0;
}
/* copy inputs to device */
checkCUDA( hipMemcpy( d_in, in, size, hipMemcpyHostToDevice ) );
checkCUDA( hipMemset( d_out, 0, size ) );
/* calculate block and grid sizes */
dim3 threads( THREADS_PER_BLOCK, 1, 1);
/* insert code for proper number of blocks in X dimension */
dim3 blocks( threads.x, 1, 1);
/* start the timers */
hipEvent_t start, stop;
checkCUDA( hipEventCreate( &start ) );
checkCUDA( hipEventCreate( &stop ) );
checkCUDA( hipEventRecord( start, 0 ) );
/* launch the kernel on the GPU */
hipLaunchKernelGGL(( stencil_1d), dim3(blocks), dim3(threads) , 0, 0, N, d_in, d_out );
checkKERNEL()
/* stop the timers */
checkCUDA( hipEventRecord( stop, 0 ) );
checkCUDA( hipEventSynchronize( stop ) );
float elapsedTime;
checkCUDA( hipEventElapsedTime( &elapsedTime, start, stop ) );
printf("Total time for %d elements was %f ms\n", N, elapsedTime );
/* copy result back to host */
checkCUDA( hipMemcpy( out, d_out, size, hipMemcpyDeviceToHost ) );
int success = 1;
for( int i = 0; i < N; i++ )
{
if( in[i]*( (double)RADIUS*2+1 ) != out[i] )
{
printf("error in element %d in = %f out %f\n",i,in[i],out[i] );
success = 0;
break;
} /* end if */
} /* end for */
if( success == 1 ) printf("PASS\n");
else printf("FAIL\n");
/* clean up */
free(in);
free(out);
checkCUDA( hipFree( d_in ) );
checkCUDA( hipFree( d_out ) );
checkCUDA( hipDeviceReset() );
return 0;
} /* end main */
| 1faa1da66fc83d89e8a5787512287a49d7802161.cu | /*
* Copyright 2016 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include "../debug.h"
#define N ( 2048 * 2048 )
#define RADIUS 7
#define THREADS_PER_BLOCK 512
__global__ void stencil_1d(int n, double *in, double *out)
{
/* calculate global index in the array */
/* insert code to calculate global index in the array using block
and thread built-in variables */
int globalIndex = N + THREADS_PER_BLOCK;
/* return if my global index is larger than the array size */
if( globalIndex >= n ) return;
/* code to handle the boundary conditions */
if( globalIndex < RADIUS || globalIndex >= (n - RADIUS) )
{
out[globalIndex] = (double) globalIndex * ( (double)RADIUS*2 + 1) ;
return;
} /* end if */
double result = 0.0;
for( int i = globalIndex-(RADIUS); i <= globalIndex+(RADIUS); i++ )
{
/* add the required elements from the array "in" to the temporary
variable "result */
result += in[globalIndex];
}
out[globalIndex] = result;
return;
}
int main()
{
/* get GPU device number and name */
int dev;
cudaDeviceProp deviceProp;
checkCUDA( cudaGetDevice( &dev ) );
checkCUDA( cudaGetDeviceProperties( &deviceProp, dev ) );
printf("Using GPU %d: %s\n", dev, deviceProp.name );
double *in, *out;
double *d_in, *d_out;
int size = N * sizeof( double );
/* allocate space for device copies of in, out */
checkCUDA( cudaMalloc( (void **) &d_in, size ) );
checkCUDA( cudaMalloc( (void **) &d_out, size ) );
/* allocate space for host copies of in, out and setup input values */
in = (double *)malloc( size );
out = (double *)malloc( size );
for( int i = 0; i < N; i++ )
{
in[i] = (double) i;
out[i] = 0;
}
/* copy inputs to device */
checkCUDA( cudaMemcpy( d_in, in, size, cudaMemcpyHostToDevice ) );
checkCUDA( cudaMemset( d_out, 0, size ) );
/* calculate block and grid sizes */
dim3 threads( THREADS_PER_BLOCK, 1, 1);
/* insert code for proper number of blocks in X dimension */
dim3 blocks( threads.x, 1, 1);
/* start the timers */
cudaEvent_t start, stop;
checkCUDA( cudaEventCreate( &start ) );
checkCUDA( cudaEventCreate( &stop ) );
checkCUDA( cudaEventRecord( start, 0 ) );
/* launch the kernel on the GPU */
stencil_1d<<< blocks, threads >>>( N, d_in, d_out );
checkKERNEL()
/* stop the timers */
checkCUDA( cudaEventRecord( stop, 0 ) );
checkCUDA( cudaEventSynchronize( stop ) );
float elapsedTime;
checkCUDA( cudaEventElapsedTime( &elapsedTime, start, stop ) );
printf("Total time for %d elements was %f ms\n", N, elapsedTime );
/* copy result back to host */
checkCUDA( cudaMemcpy( out, d_out, size, cudaMemcpyDeviceToHost ) );
int success = 1;
for( int i = 0; i < N; i++ )
{
if( in[i]*( (double)RADIUS*2+1 ) != out[i] )
{
printf("error in element %d in = %f out %f\n",i,in[i],out[i] );
success = 0;
break;
} /* end if */
} /* end for */
if( success == 1 ) printf("PASS\n");
else printf("FAIL\n");
/* clean up */
free(in);
free(out);
checkCUDA( cudaFree( d_in ) );
checkCUDA( cudaFree( d_out ) );
checkCUDA( cudaDeviceReset() );
return 0;
} /* end main */
|
e64492f86b9c359ca9a11d0d0a1b4bfce6c621b7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/native/group_norm.h>
#include <type_traits>
#include <thrust/tuple.h>
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/hip/block_reduce.cuh>
#include <c10/hip/HIPMathCompat.h>
namespace at {
namespace native {
namespace {
constexpr int kCUDANumThreads = 256;
constexpr int kReduceTileSize = 32;
template <typename T>
__global__ void RowwiseMomentsCUDAKernel(
int64_t N,
T eps,
const T* X,
T* mean,
T* rstd) {
using T_ACC = acc_type<T, true>;
const int64_t i = blockIdx.x;
T_ACC sum1 = 0;
T_ACC sum2 = 0;
for (int64_t j = threadIdx.x; j < N; j += blockDim.x) {
const int64_t index = i * N + j;
sum1 += static_cast<T_ACC>(X[index]);
sum2 += static_cast<T_ACC>(X[index]) * static_cast<T_ACC>(X[index]);
}
if (blockDim.x <= C10_WARP_SIZE) {
sum1 = cuda_utils::WarpReduceSum<T_ACC>(sum1);
sum2 = cuda_utils::WarpReduceSum<T_ACC>(sum2);
} else {
__shared__ T_ACC m_shared[C10_WARP_SIZE];
__shared__ T_ACC v_shared[C10_WARP_SIZE];
sum1 = cuda_utils::BlockReduceSum<T_ACC>(sum1, m_shared);
sum2 = cuda_utils::BlockReduceSum<T_ACC>(sum2, v_shared);
}
if (threadIdx.x == 0) {
const T_ACC scale = T_ACC(1) / static_cast<T_ACC>(N);
sum1 *= scale;
sum2 = c10::hip::compat::max(sum2 * scale - sum1 * sum1, T_ACC(0));
mean[i] = sum1;
rstd[i] = c10::hip::compat::rsqrt(sum2 + static_cast<T_ACC>(eps));
}
}
template <typename T>
__global__ void ComputeFusedParamsCUDAKernel(
int64_t N,
int64_t C,
int64_t group,
const T* mean,
const T* rstd,
const T* gamma,
const T* beta,
acc_type<T, true>* a,
acc_type<T, true>* b) {
using T_ACC = acc_type<T, true>;
const int64_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N * C) {
const int64_t ng = index / (C / group);
const int64_t c = index % C;
const T_ACC scale = (gamma == nullptr)
? static_cast<T_ACC>(rstd[ng])
: static_cast<T_ACC>(rstd[ng]) * static_cast<T_ACC>(gamma[c]);
a[index] = scale;
b[index] = -scale * static_cast<T_ACC>(mean[ng]) +
((beta == nullptr) ? 0 : static_cast<T_ACC>(beta[c]));
}
}
template <typename T>
__global__ void Compute1dBackwardFusedParamsCUDAKernel(
int64_t C,
int64_t group,
const T* dY,
const T* X,
const T* mean,
const T* rstd,
const T* gamma,
acc_type<T, true>* c2,
acc_type<T, true>* c3) {
using T_ACC = acc_type<T, true>;
const int64_t G = group;
const int64_t D = C / G;
const int64_t n = blockIdx.x;
const int64_t g = blockIdx.y;
const int64_t ng = n * G + g;
T_ACC sum1 = 0;
T_ACC sum2 = 0;
for (int64_t i = threadIdx.x; i < D; i += blockDim.x) {
const int64_t index = ng * D + i;
const int64_t c = g * D + i;
const T_ACC gamma_v =
gamma == nullptr ? T_ACC(1) : static_cast<T_ACC>(gamma[c]);
sum1 += dY[index] * X[index] * gamma_v;
sum2 += dY[index] * gamma_v;
}
if (blockDim.x <= C10_WARP_SIZE) {
sum1 = cuda_utils::WarpReduceSum<T_ACC>(sum1);
sum2 = cuda_utils::WarpReduceSum<T_ACC>(sum2);
} else {
__shared__ T_ACC ds_shared[C10_WARP_SIZE];
__shared__ T_ACC db_shared[C10_WARP_SIZE];
sum1 = cuda_utils::BlockReduceSum<T_ACC>(sum1, ds_shared);
sum2 = cuda_utils::BlockReduceSum<T_ACC>(sum2, db_shared);
}
if (threadIdx.x == 0) {
const T_ACC s = T_ACC(1) / static_cast<T_ACC>(D);
const T_ACC x = (sum2 * static_cast<T_ACC>(mean[ng]) - sum1) *
static_cast<T_ACC>(rstd[ng]) * static_cast<T_ACC>(rstd[ng]) *
static_cast<T_ACC>(rstd[ng]) * s;
c2[ng] = x;
c3[ng] = -x * static_cast<T_ACC>(mean[ng]) -
sum2 * static_cast<T_ACC>(rstd[ng]) * s;
}
}
template <typename T>
__global__ void GammaBeta1dBackwardCUDAKernel1(
int64_t N,
int64_t C,
int64_t group,
const T* dY,
const T* X,
const T* mean,
const T* rstd,
T* dgamma,
T* dbeta) {
using T_ACC = acc_type<T, true>;
const int64_t c = blockIdx.x * blockDim.x + threadIdx.x;
if (c < C) {
const int64_t G = group;
const int64_t D = C / G;
T_ACC sum1 = 0;
T_ACC sum2 = 0;
for (int64_t n = 0; n < N; ++n) {
const int64_t nc = n * C + c;
const int64_t ng = n * G + c / D;
const T_ACC dy_acc = static_cast<T_ACC>(dY[nc]);
const T_ACC x_acc = static_cast<T_ACC>(X[nc]);
sum1 += (dgamma == nullptr)
? T_ACC(0)
: ((dy_acc * x_acc - dy_acc * static_cast<T_ACC>(mean[ng])) *
static_cast<T_ACC>(rstd[ng]));
sum2 += (dbeta == nullptr) ? T_ACC(0) : dy_acc;
}
if (dgamma != nullptr) {
dgamma[c] = sum1;
}
if (dbeta != nullptr) {
dbeta[c] = sum2;
}
}
}
template <typename T>
__global__ void GammaBeta1dBackwardCUDAKernel2(
int64_t N,
int64_t C,
int64_t group,
const T* dY,
const T* X,
const T* mean,
const T* rstd,
T* dgamma,
T* dbeta) {
using T_ACC = acc_type<T, true>;
__shared__ T_ACC g_shared[kReduceTileSize][kReduceTileSize + 1];
__shared__ T_ACC b_shared[kReduceTileSize][kReduceTileSize + 1];
const int64_t c = blockIdx.x * blockDim.x + threadIdx.x;
T_ACC dg_sum1 = 0;
T_ACC dg_sum2 = 0;
T_ACC db_sum1 = 0;
T_ACC db_sum2 = 0;
if (c < C) {
const int64_t G = group;
const int64_t D = C / G;
// Accumulate each 32 cols into a 32 * 32 tile.
// Since the blockDim is (32, 16), accumulate twice for 1st and 2nd 16 rows
// of a 32 contiguous elements.
for (int64_t n = threadIdx.y; n < N; n += blockDim.y * 2) {
const int64_t n1 = n;
const int64_t n2 = n + blockDim.y;
const int64_t nc1 = n1 * C + c;
const int64_t nc2 = n2 * C + c;
const int64_t ng1 = n1 * G + c / D;
const int64_t ng2 = n2 * G + c / D;
const T_ACC dy1_acc = static_cast<T_ACC>(dY[nc1]);
const T_ACC x1_acc = static_cast<T_ACC>(X[nc1]);
dg_sum1 += dgamma == nullptr
? T_ACC(0)
: ((dy1_acc * x1_acc - dy1_acc * static_cast<T_ACC>(mean[ng1])) *
static_cast<T_ACC>(rstd[ng1]));
db_sum1 += dbeta == nullptr ? T_ACC(0) : dy1_acc;
if (n2 < N) {
const T_ACC dy2_acc = static_cast<T_ACC>(dY[nc2]);
const T_ACC x2_acc = static_cast<T_ACC>(X[nc2]);
dg_sum2 += dgamma == nullptr
? T_ACC(0)
: ((dy2_acc * x2_acc - dy2_acc * static_cast<T_ACC>(mean[ng2])) *
static_cast<T_ACC>(rstd[ng2]));
db_sum2 += dbeta == nullptr ? T_ACC(0) : dy2_acc;
}
}
}
// Write accumulated tile to shared memory.
g_shared[threadIdx.y][threadIdx.x] = dg_sum1;
g_shared[threadIdx.y + blockDim.y][threadIdx.x] = dg_sum2;
b_shared[threadIdx.y][threadIdx.x] = db_sum1;
b_shared[threadIdx.y + blockDim.y][threadIdx.x] = db_sum2;
__syncthreads();
// Do warp reduce for the 1st 16 cols in the tile.
T_ACC sum1 = g_shared[threadIdx.x][threadIdx.y];
T_ACC sum2 = b_shared[threadIdx.x][threadIdx.y];
sum1 = cuda_utils::WarpReduceSum<T_ACC>(sum1);
sum2 = cuda_utils::WarpReduceSum<T_ACC>(sum2);
if (threadIdx.x == 0) {
const int64_t c = blockIdx.x * blockDim.x + threadIdx.y;
if (c < C) {
if (dgamma != nullptr) {
dgamma[c] = sum1;
}
if (dbeta != nullptr) {
dbeta[c] = sum2;
}
}
}
// Do warp reduce for the 2nd 16 cols in the tile.
sum1 = g_shared[threadIdx.x][threadIdx.y + blockDim.y];
sum2 = b_shared[threadIdx.x][threadIdx.y + blockDim.y];
sum1 = cuda_utils::WarpReduceSum<T_ACC>(sum1);
sum2 = cuda_utils::WarpReduceSum<T_ACC>(sum2);
if (threadIdx.x == 0) {
const int64_t c = blockIdx.x * blockDim.x + threadIdx.y + blockDim.y;
if (c < C) {
if (dgamma != nullptr) {
dgamma[c] = sum1;
}
if (dbeta != nullptr) {
dbeta[c] = sum2;
}
}
}
}
template <typename T>
__global__ void ComputeInternalGradientsCUDAKernel(
int64_t HxW,
const T* dY,
const T* X,
acc_type<T, true>* ds,
acc_type<T, true>* db) {
using T_ACC = acc_type<T, true>;
const int64_t nc = blockIdx.x;
T_ACC sum1 = 0;
T_ACC sum2 = 0;
for (int64_t hw = threadIdx.x; hw < HxW; hw += blockDim.x) {
const int64_t index = nc * HxW + hw;
sum1 += static_cast<T_ACC>(dY[index]) * static_cast<T_ACC>(X[index]);
sum2 += static_cast<T_ACC>(dY[index]);
}
if (blockDim.x <= C10_WARP_SIZE) {
sum1 = cuda_utils::WarpReduceSum<T_ACC>(sum1);
sum2 = cuda_utils::WarpReduceSum<T_ACC>(sum2);
} else {
__shared__ T_ACC ds_shared[C10_WARP_SIZE];
__shared__ T_ACC db_shared[C10_WARP_SIZE];
sum1 = cuda_utils::BlockReduceSum<T_ACC>(sum1, ds_shared);
sum2 = cuda_utils::BlockReduceSum<T_ACC>(sum2, db_shared);
}
if (threadIdx.x == 0) {
ds[nc] = sum1;
db[nc] = sum2;
}
}
template <typename T>
__global__ void ComputeBackwardFusedParamsCUDAKernel(
int64_t C,
int64_t HxW,
int64_t group,
const T* mean,
const T* rstd,
const T* gamma,
const acc_type<T, true>* ds,
const acc_type<T, true>* db,
acc_type<T, true>* c2,
acc_type<T, true>* c3) {
using T_ACC = acc_type<T, true>;
const int64_t G = group;
const int64_t D = C / G;
const int64_t n = blockIdx.x;
const int64_t g = blockIdx.y;
const int64_t ng = n * G + g;
T_ACC sum1 = 0;
T_ACC sum2 = 0;
for (int64_t i = threadIdx.x; i < D; i += blockDim.x) {
const int64_t index = ng * D + i;
const int64_t c = g * D + i;
const T_ACC gamma_v =
gamma == nullptr ? T_ACC(1) : static_cast<T_ACC>(gamma[c]);
sum1 += ds[index] * gamma_v;
sum2 += db[index] * gamma_v;
}
if (blockDim.x <= C10_WARP_SIZE) {
sum1 = cuda_utils::WarpReduceSum<T_ACC>(sum1);
sum2 = cuda_utils::WarpReduceSum<T_ACC>(sum2);
} else {
__shared__ T_ACC ds_shared[C10_WARP_SIZE];
__shared__ T_ACC db_shared[C10_WARP_SIZE];
sum1 = cuda_utils::BlockReduceSum<T_ACC>(sum1, ds_shared);
sum2 = cuda_utils::BlockReduceSum<T_ACC>(sum2, db_shared);
}
if (threadIdx.x == 0) {
const T_ACC s = T_ACC(1) / static_cast<T_ACC>(D * HxW);
const T_ACC x = (sum2 * static_cast<T_ACC>(mean[ng]) - sum1) *
static_cast<T_ACC>(rstd[ng]) * static_cast<T_ACC>(rstd[ng]) *
static_cast<T_ACC>(rstd[ng]) * s;
c2[ng] = x;
c3[ng] = -x * static_cast<T_ACC>(mean[ng]) -
sum2 * static_cast<T_ACC>(rstd[ng]) * s;
}
}
template <typename T>
__global__ void GammaBetaBackwardCUDAKernel1(
int64_t N,
int64_t C,
int64_t group,
const T* mean,
const T* rstd,
const acc_type<T, true>* ds,
const acc_type<T, true>* db,
T* dgamma,
T* dbeta) {
using T_ACC = acc_type<T, true>;
const int64_t c = blockIdx.x * blockDim.x + threadIdx.x;
if (c < C) {
const int64_t G = group;
const int64_t D = C / G;
T_ACC sum1 = 0;
T_ACC sum2 = 0;
for (int64_t n = 0; n < N; ++n) {
const int64_t nc = n * C + c;
const int64_t ng = n * G + c / D;
sum1 += (dgamma == nullptr)
? T_ACC(0)
: ((ds[nc] - db[nc] * static_cast<T_ACC>(mean[ng])) *
static_cast<T_ACC>(rstd[ng]));
sum2 += (dbeta == nullptr) ? T_ACC(0) : db[nc];
}
if (dgamma != nullptr) {
dgamma[c] = sum1;
}
if (dbeta != nullptr) {
dbeta[c] = sum2;
}
}
}
template <typename T>
__global__ void GammaBetaBackwardCUDAKernel2(
int64_t N,
int64_t C,
int64_t group,
const T* mean,
const T* rstd,
const acc_type<T, true>* ds,
const acc_type<T, true>* db,
T* dgamma,
T* dbeta) {
using T_ACC = acc_type<T, true>;
__shared__ T_ACC g_shared[kReduceTileSize][kReduceTileSize + 1];
__shared__ T_ACC b_shared[kReduceTileSize][kReduceTileSize + 1];
const int64_t c = blockIdx.x * blockDim.x + threadIdx.x;
T_ACC dg_sum1 = 0;
T_ACC dg_sum2 = 0;
T_ACC db_sum1 = 0;
T_ACC db_sum2 = 0;
if (c < C) {
const int64_t G = group;
const int64_t D = C / G;
// Accumulate each 32 cols into a 32 * 32 tile.
// Since the blockDim is (32, 16), accumulate twice for 1st and 2nd 16 rows
// of a 32 contiguous elements.
for (int64_t n = threadIdx.y; n < N; n += blockDim.y * 2) {
const int64_t n1 = n;
const int64_t n2 = n + blockDim.y;
const int64_t nc1 = n1 * C + c;
const int64_t nc2 = n2 * C + c;
const int64_t ng1 = n1 * G + c / D;
const int64_t ng2 = n2 * G + c / D;
dg_sum1 += dgamma == nullptr
? T_ACC(0)
: ((ds[nc1] - db[nc1] * static_cast<T_ACC>(mean[ng1])) *
static_cast<T_ACC>(rstd[ng1]));
db_sum1 += dbeta == nullptr ? T_ACC(0) : db[nc1];
if (n2 < N) {
dg_sum2 += dgamma == nullptr
? T_ACC(0)
: ((ds[nc2] - db[nc2] * static_cast<T_ACC>(mean[ng2])) *
static_cast<T_ACC>(rstd[ng2]));
db_sum2 += dbeta == nullptr ? T_ACC(0) : db[nc2];
}
}
}
// Write accumulated tile to shared memory.
g_shared[threadIdx.y][threadIdx.x] = dg_sum1;
g_shared[threadIdx.y + blockDim.y][threadIdx.x] = dg_sum2;
b_shared[threadIdx.y][threadIdx.x] = db_sum1;
b_shared[threadIdx.y + blockDim.y][threadIdx.x] = db_sum2;
__syncthreads();
// Do warp reduce for the 1st 16 cols in the tile.
T_ACC sum1 = g_shared[threadIdx.x][threadIdx.y];
T_ACC sum2 = b_shared[threadIdx.x][threadIdx.y];
sum1 = cuda_utils::WarpReduceSum<T_ACC>(sum1);
sum2 = cuda_utils::WarpReduceSum<T_ACC>(sum2);
if (threadIdx.x == 0) {
const int64_t c = blockIdx.x * blockDim.x + threadIdx.y;
if (c < C) {
if (dgamma != nullptr) {
dgamma[c] = sum1;
}
if (dbeta != nullptr) {
dbeta[c] = sum2;
}
}
}
// Do warp reduce for the 2st 16 cols in the tile.
sum1 = g_shared[threadIdx.x][threadIdx.y + blockDim.y];
sum2 = b_shared[threadIdx.x][threadIdx.y + blockDim.y];
sum1 = cuda_utils::WarpReduceSum<T_ACC>(sum1);
sum2 = cuda_utils::WarpReduceSum<T_ACC>(sum2);
if (threadIdx.x == 0) {
const int64_t c = blockIdx.x * blockDim.x + threadIdx.y + blockDim.y;
if (c < C) {
if (dgamma != nullptr) {
dgamma[c] = sum1;
}
if (dbeta != nullptr) {
dbeta[c] = sum2;
}
}
}
}
template <typename T>
void GroupNorm1dForward(
const Tensor& X,
const Tensor& mean,
const Tensor& rstd,
const Tensor& gamma,
const Tensor& beta,
int64_t N,
int64_t C,
int64_t group,
Tensor& Y) {
using T_ACC = acc_type<T, true>;
const int64_t G = group;
const int64_t D = C / G;
if (gamma.defined() && beta.defined()) {
auto iter = TensorIteratorConfig()
.resize_outputs(false)
.add_output(Y.view({N, G, D}))
.add_input(X.view({N, G, D}))
.add_input(mean.view({N, G, 1}))
.add_input(rstd.view({N, G, 1}))
.add_input(gamma.view({1, G, D}))
.add_input(beta.view({1, G, D}))
.build();
gpu_kernel(iter, [] GPU_LAMBDA(T x, T mean, T rstd, T gamma, T beta) -> T {
return (static_cast<T_ACC>(x) - static_cast<T_ACC>(mean)) *
static_cast<T_ACC>(rstd) * static_cast<T_ACC>(gamma) +
static_cast<T_ACC>(beta);
});
} else if (gamma.defined()) {
auto iter = TensorIteratorConfig()
.resize_outputs(false)
.add_output(Y.view({N, G, D}))
.add_input(X.view({N, G, D}))
.add_input(mean.view({N, G, 1}))
.add_input(rstd.view({N, G, 1}))
.add_input(gamma.view({1, G, D}))
.build();
gpu_kernel(iter, [] GPU_LAMBDA(T x, T mean, T rstd, T gamma) -> T {
return (static_cast<T_ACC>(x) - static_cast<T_ACC>(mean)) *
static_cast<T_ACC>(rstd) * static_cast<T_ACC>(gamma);
});
} else if (beta.defined()) {
auto iter = TensorIteratorConfig()
.resize_outputs(false)
.add_output(Y.view({N, G, D}))
.add_input(X.view({N, G, D}))
.add_input(mean.view({N, G, 1}))
.add_input(rstd.view({N, G, 1}))
.add_input(beta.view({1, G, D}))
.build();
gpu_kernel(iter, [] GPU_LAMBDA(T x, T mean, T rstd, T beta) -> T {
return (static_cast<T_ACC>(x) - static_cast<T_ACC>(mean)) *
static_cast<T_ACC>(rstd) +
static_cast<T_ACC>(beta);
});
} else {
auto iter = TensorIteratorConfig()
.resize_outputs(false)
.add_output(Y.view({N * G, D}))
.add_input(X.view({N * G, D}))
.add_input(mean.view({N * G, 1}))
.add_input(rstd.view({N * G, 1}))
.build();
gpu_kernel(iter, [] GPU_LAMBDA(T x, T mean, T rstd) -> T {
return (static_cast<T_ACC>(x) - static_cast<T_ACC>(mean)) *
static_cast<T_ACC>(rstd);
});
}
AT_CUDA_CHECK(hipGetLastError());
}
template <typename T>
void GroupNormKernelImplInternal(
const Tensor& X,
const Tensor& gamma,
const Tensor& beta,
int64_t N,
int64_t C,
int64_t HxW,
int64_t group,
T eps,
Tensor& Y,
Tensor& mean,
Tensor& rstd) {
using T_ACC = acc_type<T, true>;
TORCH_CHECK(X.numel() == N * C * HxW);
TORCH_CHECK(!gamma.defined() || gamma.numel() == C);
TORCH_CHECK(!beta.defined() || beta.numel() == C);
if (N == 0) {
return;
}
const int64_t G = group;
const int64_t D = C / G;
const T* X_data = X.data_ptr<T>();
T* Y_data = Y.data_ptr<T>();
T* mean_data = mean.data_ptr<T>();
T* rstd_data = rstd.data_ptr<T>();
hipStream_t cuda_stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
const int64_t num_threads = D * HxW < cuda_utils::kCUDABlockReduceNumThreads
? C10_WARP_SIZE
: cuda_utils::kCUDABlockReduceNumThreads;
hipLaunchKernelGGL(( RowwiseMomentsCUDAKernel<T>), dim3(N * G), dim3(num_threads), 0, cuda_stream,
D * HxW, eps, X_data, mean_data, rstd_data);
TORCH_CUDA_KERNEL_LAUNCH_CHECK();
if (HxW == 1) {
GroupNorm1dForward<T>(X, mean, rstd, gamma, beta, N, C, G, Y);
} else if (!gamma.defined() && !beta.defined()) {
auto iter = TensorIteratorConfig()
.resize_outputs(false)
.add_output(Y.view({N * G, D * HxW}))
.add_input(X.view({N * G, D * HxW}))
.add_input(mean.view({N * G, 1}))
.add_input(rstd.view({N * G, 1}))
.build();
gpu_kernel(iter, [] GPU_LAMBDA(T x, T mean, T rstd) -> T {
return (static_cast<T_ACC>(x) - static_cast<T_ACC>(mean)) *
static_cast<T_ACC>(rstd);
});
} else {
const auto kAccType =
(X.scalar_type() == kHalf || X.scalar_type() == kBFloat16)
? kFloat
: X.scalar_type();
Tensor a = at::empty({N, C}, X.options().dtype(kAccType));
Tensor b = at::empty({N, C}, X.options().dtype(kAccType));
const T* gamma_data = gamma.defined() ? gamma.data_ptr<T>() : nullptr;
const T* beta_data = beta.defined() ? beta.data_ptr<T>() : nullptr;
T_ACC* a_data = a.data_ptr<T_ACC>();
T_ACC* b_data = b.data_ptr<T_ACC>();
// TODO: Since there is some issues in gpu_kernel_multiple_outputs, we are
// using maunal kernel here. Make it using gpu_kernel_multiple_outputs once
// the issue fixed.
const int64_t B = (N * C + kCUDANumThreads - 1) / kCUDANumThreads;
hipLaunchKernelGGL(( ComputeFusedParamsCUDAKernel<T>), dim3(B), dim3(kCUDANumThreads), 0, cuda_stream,
N, C, G, mean_data, rstd_data, gamma_data, beta_data, a_data, b_data);
TORCH_CUDA_KERNEL_LAUNCH_CHECK();
auto iter = TensorIteratorConfig()
.check_all_same_dtype(std::is_same<T, T_ACC>::value)
.resize_outputs(false)
.add_output(Y.view({N * C, HxW}))
.add_input(X.view({N * C, HxW}))
.add_input(a.view({N * C, 1}))
.add_input(b.view({N * C, 1}))
.build();
gpu_kernel(iter, [] GPU_LAMBDA(T x, T_ACC a, T_ACC b) -> T {
return a * static_cast<T_ACC>(x) + b;
});
}
}
void GroupNormKernelImpl(
const Tensor& X,
const Tensor& gamma,
const Tensor& beta,
int64_t N,
int64_t C,
int64_t HxW,
int64_t group,
double eps,
Tensor& Y,
Tensor& mean,
Tensor& rstd) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
X.scalar_type(),
"GroupNormKernelImpl",
[&]() {
GroupNormKernelImplInternal<scalar_t>(
X,
gamma,
beta,
N,
C,
HxW,
group,
static_cast<scalar_t>(eps),
Y,
mean,
rstd);
});
}
template <typename T>
void GroupNorm1dBackward(
const Tensor dY,
const Tensor X,
const Tensor mean,
const Tensor rstd,
const Tensor gamma,
int64_t N,
int64_t C,
int64_t group,
Tensor& dX,
Tensor& dgamma,
Tensor& dbeta) {
using T_ACC = acc_type<T, true>;
const int64_t G = group;
const int64_t D = C / G;
const T* dY_data = dY.data_ptr<T>();
const T* X_data = X.data_ptr<T>();
const T* mean_data = mean.data_ptr<T>();
const T* rstd_data = rstd.data_ptr<T>();
hipStream_t cuda_stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
if (dX.defined()) {
const T* gamma_data = gamma.defined() ? gamma.data_ptr<T>() : nullptr;
const auto kAccType =
(X.scalar_type() == kHalf || X.scalar_type() == kBFloat16)
? kFloat
: X.scalar_type();
Tensor c2 = at::empty({N, G}, X.options().dtype(kAccType));
Tensor c3 = at::empty({N, G}, X.options().dtype(kAccType));
T_ACC* c2_data = c2.data_ptr<T_ACC>();
T_ACC* c3_data = c3.data_ptr<T_ACC>();
const int64_t num_threads = (C / G) < cuda_utils::kCUDABlockReduceNumThreads
? C10_WARP_SIZE
: cuda_utils::kCUDABlockReduceNumThreads;
hipLaunchKernelGGL(( Compute1dBackwardFusedParamsCUDAKernel<T>)
, dim3(dim3(N, G)), dim3(num_threads), 0, cuda_stream,
C,
G,
dY_data,
X_data,
mean_data,
rstd_data,
gamma_data,
c2_data,
c3_data);
TORCH_CUDA_KERNEL_LAUNCH_CHECK();
if (gamma.defined()) {
auto iter = TensorIteratorConfig()
.check_all_same_dtype(std::is_same<T, T_ACC>::value)
.resize_outputs(false)
.add_output(dX.view({N, G, D}))
.add_input(dY.view({N, G, D}))
.add_input(X.view({N, G, D}))
.add_input(rstd.view({N, G, 1}))
.add_input(gamma.view({1, G, D}))
.add_input(c2.view({N, G, 1}))
.add_input(c3.view({N, G, 1}))
.build();
gpu_kernel(
iter,
[] GPU_LAMBDA(T dy, T x, T rstd, T gamma, T_ACC c2, T_ACC c3) -> T {
const T_ACC c1 =
static_cast<T_ACC>(rstd) * static_cast<T_ACC>(gamma);
return c1 * static_cast<T_ACC>(dy) + c2 * static_cast<T_ACC>(x) +
c3;
});
} else {
auto iter = TensorIteratorConfig()
.check_all_same_dtype(std::is_same<T, T_ACC>::value)
.resize_outputs(false)
.add_output(dX.view({N * G, D}))
.add_input(dY.view({N * G, D}))
.add_input(X.view({N * G, D}))
.add_input(rstd.view({N * G, 1}))
.add_input(c2.view({N * G, 1}))
.add_input(c3.view({N * G, 1}))
.build();
gpu_kernel(
iter, [] GPU_LAMBDA(T dy, T x, T rstd, T_ACC c2, T_ACC c3) -> T {
const T_ACC c1 = static_cast<T_ACC>(rstd);
return c1 * static_cast<T_ACC>(dy) + c2 * static_cast<T_ACC>(x) +
c3;
});
}
}
if (dgamma.defined() || dbeta.defined()) {
T* dgamma_data = dgamma.defined() ? dgamma.data_ptr<T>() : nullptr;
T* dbeta_data = dbeta.defined() ? dbeta.data_ptr<T>() : nullptr;
if (N <= 128) {
const int64_t B = (C + kCUDANumThreads - 1) / kCUDANumThreads;
hipLaunchKernelGGL(( GammaBeta1dBackwardCUDAKernel1<T>), dim3(B), dim3(kCUDANumThreads), 0, cuda_stream,
N,
C,
G,
dY_data,
X_data,
mean_data,
rstd_data,
dgamma_data,
dbeta_data);
TORCH_CUDA_KERNEL_LAUNCH_CHECK();
} else {
const int64_t B = (C + kReduceTileSize - 1) / kReduceTileSize;
// The algorithm for colwise reduction here is to accumulate each 32 cols
// to a 32 * 32 tile and write the tile to shared memmory. Then do warp
// reduce for each col in the tile. So here the blockDim must be (32, 16).
constexpr int kThreadX = kReduceTileSize;
constexpr int kThreadY = kReduceTileSize / 2;
hipLaunchKernelGGL(( GammaBeta1dBackwardCUDAKernel2<T>)
, dim3(B), dim3(dim3(kThreadX, kThreadY)), 0, cuda_stream,
N,
C,
G,
dY_data,
X_data,
mean_data,
rstd_data,
dgamma_data,
dbeta_data);
TORCH_CUDA_KERNEL_LAUNCH_CHECK();
}
}
}
template <typename T>
void GroupNormBackwardKernelImplInternal(
const Tensor& dY,
const Tensor& X,
const Tensor& mean,
const Tensor& rstd,
const Tensor& gamma,
int64_t N,
int64_t C,
int64_t HxW,
int64_t group,
Tensor& dX,
Tensor& dgamma,
Tensor& dbeta) {
using T_ACC = acc_type<T, true>;
const int64_t G = group;
const int64_t D = C / G;
TORCH_CHECK(dY.numel() == N * C * HxW);
TORCH_CHECK(X.numel() == N * C * HxW);
TORCH_CHECK(mean.numel() == N * G);
TORCH_CHECK(rstd.numel() == N * G);
TORCH_CHECK(!gamma.defined() || gamma.numel() == C);
hipStream_t cuda_stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
if (N == 0) {
if (dgamma.defined()) {
dgamma.fill_(T(0));
}
if (dbeta.defined()) {
dbeta.fill_(T(0));
}
return;
}
const T* dY_data = dY.data_ptr<T>();
const T* X_data = X.data_ptr<T>();
const T* mean_data = mean.data_ptr<T>();
const T* rstd_data = rstd.data_ptr<T>();
const T* gamma_data = gamma.defined() ? gamma.data_ptr<T>() : nullptr;
const auto kAccType =
(X.scalar_type() == kHalf || X.scalar_type() == kBFloat16)
? kFloat
: X.scalar_type();
Tensor ds = at::empty({N, C}, X.options().dtype(kAccType));
Tensor db = at::empty({N, C}, X.options().dtype(kAccType));
T_ACC* ds_data = ds.data_ptr<T_ACC>();
T_ACC* db_data = db.data_ptr<T_ACC>();
if (HxW == 1) {
GroupNorm1dBackward<T>(
dY, X, mean, rstd, gamma, N, C, G, dX, dgamma, dbeta);
return;
}
int64_t num_threads = HxW < cuda_utils::kCUDABlockReduceNumThreads
? C10_WARP_SIZE
: cuda_utils::kCUDABlockReduceNumThreads;
hipLaunchKernelGGL(( ComputeInternalGradientsCUDAKernel<T>), dim3(N * C), dim3(num_threads), 0, cuda_stream,
HxW, dY_data, X_data, ds_data, db_data);
TORCH_CUDA_KERNEL_LAUNCH_CHECK();
if (dX.defined()) {
Tensor c1 = at::empty({0}, X.options().dtype(kAccType));
Tensor c2 = at::empty({N, G}, X.options().dtype(kAccType));
Tensor c3 = at::empty({N, G}, X.options().dtype(kAccType));
T_ACC* c2_data = c2.data_ptr<T_ACC>();
T_ACC* c3_data = c3.data_ptr<T_ACC>();
if (gamma.defined()) {
auto iter = TensorIteratorConfig()
.check_all_same_dtype(std::is_same<T, T_ACC>::value)
.add_output(c1)
.add_input(rstd.view({N, G, 1}))
.add_input(gamma.view({1, G, D}))
.build();
gpu_kernel(iter, [] GPU_LAMBDA(T rstd, T gamma) -> T_ACC {
return static_cast<T_ACC>(rstd) * static_cast<T_ACC>(gamma);
});
}
num_threads = (C / G) < cuda_utils::kCUDABlockReduceNumThreads
? C10_WARP_SIZE
: cuda_utils::kCUDABlockReduceNumThreads;
hipLaunchKernelGGL(( ComputeBackwardFusedParamsCUDAKernel<T>)
, dim3(dim3(N, G)), dim3(num_threads), 0, cuda_stream,
C,
HxW,
G,
mean_data,
rstd_data,
gamma_data,
ds_data,
db_data,
c2_data,
c3_data);
TORCH_CUDA_KERNEL_LAUNCH_CHECK();
if (gamma.defined()) {
auto iter = TensorIteratorConfig()
.check_all_same_dtype(std::is_same<T, T_ACC>::value)
.resize_outputs(false)
.add_output(dX.view({N * G, D, HxW}))
.add_input(dY.view({N * G, D, HxW}))
.add_input(X.view({N * G, D, HxW}))
.add_input(c1.view({N * G, D, 1}))
.add_input(c2.view({N * G, 1, 1}))
.add_input(c3.view({N * G, 1, 1}))
.build();
gpu_kernel(
iter, [] GPU_LAMBDA(T dy, T x, T_ACC c1, T_ACC c2, T_ACC c3) -> T {
return c1 * static_cast<T_ACC>(dy) + c2 * static_cast<T_ACC>(x) +
c3;
});
} else {
auto iter = TensorIteratorConfig()
.check_all_same_dtype(std::is_same<T, T_ACC>::value)
.resize_outputs(false)
.add_output(dX.view({N * G, D * HxW}))
.add_input(dY.view({N * G, D * HxW}))
.add_input(X.view({N * G, D * HxW}))
.add_input(rstd.view({N * G, 1}))
.add_input(c2.view({N * G, 1}))
.add_input(c3.view({N * G, 1}))
.build();
gpu_kernel(
iter, [] GPU_LAMBDA(T dy, T x, T_ACC c1, T_ACC c2, T_ACC c3) -> T {
return c1 * static_cast<T_ACC>(dy) + c2 * static_cast<T_ACC>(x) +
c3;
});
}
}
if (dgamma.defined() || dbeta.defined()) {
T* dgamma_data = dgamma.defined() ? dgamma.data_ptr<T>() : nullptr;
T* dbeta_data = dbeta.defined() ? dbeta.data_ptr<T>() : nullptr;
if (N <= 128) {
// For small batch size, do colwise reduce directly.
const int64_t B = (C + kCUDANumThreads - 1) / kCUDANumThreads;
hipLaunchKernelGGL(( GammaBetaBackwardCUDAKernel1<T>), dim3(B), dim3(kCUDANumThreads), 0, cuda_stream,
N,
C,
G,
mean_data,
rstd_data,
ds_data,
db_data,
dgamma_data,
dbeta_data);
TORCH_CUDA_KERNEL_LAUNCH_CHECK();
} else {
const int64_t B = (C + kReduceTileSize - 1) / kReduceTileSize;
// The algorithm for colwise reduction here is to accumulate each 32 cols
// to a 32 * 32 tile and write the tile to shared memmory. Then do warp
// reduce for each col in the tile. So here the blockDim must be (32, 16).
constexpr int kThreadX = kReduceTileSize;
constexpr int kThreadY = kReduceTileSize / 2;
hipLaunchKernelGGL(( GammaBetaBackwardCUDAKernel2<T>)
, dim3(B), dim3(dim3(kThreadX, kThreadY)), 0, cuda_stream,
N,
C,
G,
mean_data,
rstd_data,
ds_data,
db_data,
dgamma_data,
dbeta_data);
TORCH_CUDA_KERNEL_LAUNCH_CHECK();
}
}
}
void GroupNormBackwardKernelImpl(
const Tensor& dY,
const Tensor& X,
const Tensor& mean,
const Tensor& rstd,
const Tensor& gamma,
int64_t N,
int64_t C,
int64_t HxW,
int64_t group,
Tensor& dX,
Tensor& dgamma,
Tensor& dbeta) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
X.scalar_type(),
"GroupNormBackwardKernelImpl",
[&]() {
GroupNormBackwardKernelImplInternal<scalar_t>(
dY, X, mean, rstd, gamma, N, C, HxW, group, dX, dgamma, dbeta);
});
}
} // namespace
REGISTER_DISPATCH(GroupNormKernel, &GroupNormKernelImpl);
REGISTER_DISPATCH(GroupNormBackwardKernel, &GroupNormBackwardKernelImpl);
} // namespace native
} // namespace at
| e64492f86b9c359ca9a11d0d0a1b4bfce6c621b7.cu | #include <ATen/native/group_norm.h>
#include <type_traits>
#include <thrust/tuple.h>
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/block_reduce.cuh>
#include <c10/cuda/CUDAMathCompat.h>
namespace at {
namespace native {
namespace {
constexpr int kCUDANumThreads = 256;
constexpr int kReduceTileSize = 32;
template <typename T>
__global__ void RowwiseMomentsCUDAKernel(
int64_t N,
T eps,
const T* X,
T* mean,
T* rstd) {
using T_ACC = acc_type<T, true>;
const int64_t i = blockIdx.x;
T_ACC sum1 = 0;
T_ACC sum2 = 0;
for (int64_t j = threadIdx.x; j < N; j += blockDim.x) {
const int64_t index = i * N + j;
sum1 += static_cast<T_ACC>(X[index]);
sum2 += static_cast<T_ACC>(X[index]) * static_cast<T_ACC>(X[index]);
}
if (blockDim.x <= C10_WARP_SIZE) {
sum1 = cuda_utils::WarpReduceSum<T_ACC>(sum1);
sum2 = cuda_utils::WarpReduceSum<T_ACC>(sum2);
} else {
__shared__ T_ACC m_shared[C10_WARP_SIZE];
__shared__ T_ACC v_shared[C10_WARP_SIZE];
sum1 = cuda_utils::BlockReduceSum<T_ACC>(sum1, m_shared);
sum2 = cuda_utils::BlockReduceSum<T_ACC>(sum2, v_shared);
}
if (threadIdx.x == 0) {
const T_ACC scale = T_ACC(1) / static_cast<T_ACC>(N);
sum1 *= scale;
sum2 = c10::cuda::compat::max(sum2 * scale - sum1 * sum1, T_ACC(0));
mean[i] = sum1;
rstd[i] = c10::cuda::compat::rsqrt(sum2 + static_cast<T_ACC>(eps));
}
}
template <typename T>
__global__ void ComputeFusedParamsCUDAKernel(
int64_t N,
int64_t C,
int64_t group,
const T* mean,
const T* rstd,
const T* gamma,
const T* beta,
acc_type<T, true>* a,
acc_type<T, true>* b) {
using T_ACC = acc_type<T, true>;
const int64_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N * C) {
const int64_t ng = index / (C / group);
const int64_t c = index % C;
const T_ACC scale = (gamma == nullptr)
? static_cast<T_ACC>(rstd[ng])
: static_cast<T_ACC>(rstd[ng]) * static_cast<T_ACC>(gamma[c]);
a[index] = scale;
b[index] = -scale * static_cast<T_ACC>(mean[ng]) +
((beta == nullptr) ? 0 : static_cast<T_ACC>(beta[c]));
}
}
template <typename T>
__global__ void Compute1dBackwardFusedParamsCUDAKernel(
int64_t C,
int64_t group,
const T* dY,
const T* X,
const T* mean,
const T* rstd,
const T* gamma,
acc_type<T, true>* c2,
acc_type<T, true>* c3) {
using T_ACC = acc_type<T, true>;
const int64_t G = group;
const int64_t D = C / G;
const int64_t n = blockIdx.x;
const int64_t g = blockIdx.y;
const int64_t ng = n * G + g;
T_ACC sum1 = 0;
T_ACC sum2 = 0;
for (int64_t i = threadIdx.x; i < D; i += blockDim.x) {
const int64_t index = ng * D + i;
const int64_t c = g * D + i;
const T_ACC gamma_v =
gamma == nullptr ? T_ACC(1) : static_cast<T_ACC>(gamma[c]);
sum1 += dY[index] * X[index] * gamma_v;
sum2 += dY[index] * gamma_v;
}
if (blockDim.x <= C10_WARP_SIZE) {
sum1 = cuda_utils::WarpReduceSum<T_ACC>(sum1);
sum2 = cuda_utils::WarpReduceSum<T_ACC>(sum2);
} else {
__shared__ T_ACC ds_shared[C10_WARP_SIZE];
__shared__ T_ACC db_shared[C10_WARP_SIZE];
sum1 = cuda_utils::BlockReduceSum<T_ACC>(sum1, ds_shared);
sum2 = cuda_utils::BlockReduceSum<T_ACC>(sum2, db_shared);
}
if (threadIdx.x == 0) {
const T_ACC s = T_ACC(1) / static_cast<T_ACC>(D);
const T_ACC x = (sum2 * static_cast<T_ACC>(mean[ng]) - sum1) *
static_cast<T_ACC>(rstd[ng]) * static_cast<T_ACC>(rstd[ng]) *
static_cast<T_ACC>(rstd[ng]) * s;
c2[ng] = x;
c3[ng] = -x * static_cast<T_ACC>(mean[ng]) -
sum2 * static_cast<T_ACC>(rstd[ng]) * s;
}
}
template <typename T>
__global__ void GammaBeta1dBackwardCUDAKernel1(
int64_t N,
int64_t C,
int64_t group,
const T* dY,
const T* X,
const T* mean,
const T* rstd,
T* dgamma,
T* dbeta) {
using T_ACC = acc_type<T, true>;
const int64_t c = blockIdx.x * blockDim.x + threadIdx.x;
if (c < C) {
const int64_t G = group;
const int64_t D = C / G;
T_ACC sum1 = 0;
T_ACC sum2 = 0;
for (int64_t n = 0; n < N; ++n) {
const int64_t nc = n * C + c;
const int64_t ng = n * G + c / D;
const T_ACC dy_acc = static_cast<T_ACC>(dY[nc]);
const T_ACC x_acc = static_cast<T_ACC>(X[nc]);
sum1 += (dgamma == nullptr)
? T_ACC(0)
: ((dy_acc * x_acc - dy_acc * static_cast<T_ACC>(mean[ng])) *
static_cast<T_ACC>(rstd[ng]));
sum2 += (dbeta == nullptr) ? T_ACC(0) : dy_acc;
}
if (dgamma != nullptr) {
dgamma[c] = sum1;
}
if (dbeta != nullptr) {
dbeta[c] = sum2;
}
}
}
template <typename T>
__global__ void GammaBeta1dBackwardCUDAKernel2(
int64_t N,
int64_t C,
int64_t group,
const T* dY,
const T* X,
const T* mean,
const T* rstd,
T* dgamma,
T* dbeta) {
using T_ACC = acc_type<T, true>;
__shared__ T_ACC g_shared[kReduceTileSize][kReduceTileSize + 1];
__shared__ T_ACC b_shared[kReduceTileSize][kReduceTileSize + 1];
const int64_t c = blockIdx.x * blockDim.x + threadIdx.x;
T_ACC dg_sum1 = 0;
T_ACC dg_sum2 = 0;
T_ACC db_sum1 = 0;
T_ACC db_sum2 = 0;
if (c < C) {
const int64_t G = group;
const int64_t D = C / G;
// Accumulate each 32 cols into a 32 * 32 tile.
// Since the blockDim is (32, 16), accumulate twice for 1st and 2nd 16 rows
// of a 32 contiguous elements.
for (int64_t n = threadIdx.y; n < N; n += blockDim.y * 2) {
const int64_t n1 = n;
const int64_t n2 = n + blockDim.y;
const int64_t nc1 = n1 * C + c;
const int64_t nc2 = n2 * C + c;
const int64_t ng1 = n1 * G + c / D;
const int64_t ng2 = n2 * G + c / D;
const T_ACC dy1_acc = static_cast<T_ACC>(dY[nc1]);
const T_ACC x1_acc = static_cast<T_ACC>(X[nc1]);
dg_sum1 += dgamma == nullptr
? T_ACC(0)
: ((dy1_acc * x1_acc - dy1_acc * static_cast<T_ACC>(mean[ng1])) *
static_cast<T_ACC>(rstd[ng1]));
db_sum1 += dbeta == nullptr ? T_ACC(0) : dy1_acc;
if (n2 < N) {
const T_ACC dy2_acc = static_cast<T_ACC>(dY[nc2]);
const T_ACC x2_acc = static_cast<T_ACC>(X[nc2]);
dg_sum2 += dgamma == nullptr
? T_ACC(0)
: ((dy2_acc * x2_acc - dy2_acc * static_cast<T_ACC>(mean[ng2])) *
static_cast<T_ACC>(rstd[ng2]));
db_sum2 += dbeta == nullptr ? T_ACC(0) : dy2_acc;
}
}
}
// Write accumulated tile to shared memory.
g_shared[threadIdx.y][threadIdx.x] = dg_sum1;
g_shared[threadIdx.y + blockDim.y][threadIdx.x] = dg_sum2;
b_shared[threadIdx.y][threadIdx.x] = db_sum1;
b_shared[threadIdx.y + blockDim.y][threadIdx.x] = db_sum2;
__syncthreads();
// Do warp reduce for the 1st 16 cols in the tile.
T_ACC sum1 = g_shared[threadIdx.x][threadIdx.y];
T_ACC sum2 = b_shared[threadIdx.x][threadIdx.y];
sum1 = cuda_utils::WarpReduceSum<T_ACC>(sum1);
sum2 = cuda_utils::WarpReduceSum<T_ACC>(sum2);
if (threadIdx.x == 0) {
const int64_t c = blockIdx.x * blockDim.x + threadIdx.y;
if (c < C) {
if (dgamma != nullptr) {
dgamma[c] = sum1;
}
if (dbeta != nullptr) {
dbeta[c] = sum2;
}
}
}
// Do warp reduce for the 2nd 16 cols in the tile.
sum1 = g_shared[threadIdx.x][threadIdx.y + blockDim.y];
sum2 = b_shared[threadIdx.x][threadIdx.y + blockDim.y];
sum1 = cuda_utils::WarpReduceSum<T_ACC>(sum1);
sum2 = cuda_utils::WarpReduceSum<T_ACC>(sum2);
if (threadIdx.x == 0) {
const int64_t c = blockIdx.x * blockDim.x + threadIdx.y + blockDim.y;
if (c < C) {
if (dgamma != nullptr) {
dgamma[c] = sum1;
}
if (dbeta != nullptr) {
dbeta[c] = sum2;
}
}
}
}
template <typename T>
__global__ void ComputeInternalGradientsCUDAKernel(
int64_t HxW,
const T* dY,
const T* X,
acc_type<T, true>* ds,
acc_type<T, true>* db) {
using T_ACC = acc_type<T, true>;
const int64_t nc = blockIdx.x;
T_ACC sum1 = 0;
T_ACC sum2 = 0;
for (int64_t hw = threadIdx.x; hw < HxW; hw += blockDim.x) {
const int64_t index = nc * HxW + hw;
sum1 += static_cast<T_ACC>(dY[index]) * static_cast<T_ACC>(X[index]);
sum2 += static_cast<T_ACC>(dY[index]);
}
if (blockDim.x <= C10_WARP_SIZE) {
sum1 = cuda_utils::WarpReduceSum<T_ACC>(sum1);
sum2 = cuda_utils::WarpReduceSum<T_ACC>(sum2);
} else {
__shared__ T_ACC ds_shared[C10_WARP_SIZE];
__shared__ T_ACC db_shared[C10_WARP_SIZE];
sum1 = cuda_utils::BlockReduceSum<T_ACC>(sum1, ds_shared);
sum2 = cuda_utils::BlockReduceSum<T_ACC>(sum2, db_shared);
}
if (threadIdx.x == 0) {
ds[nc] = sum1;
db[nc] = sum2;
}
}
template <typename T>
__global__ void ComputeBackwardFusedParamsCUDAKernel(
int64_t C,
int64_t HxW,
int64_t group,
const T* mean,
const T* rstd,
const T* gamma,
const acc_type<T, true>* ds,
const acc_type<T, true>* db,
acc_type<T, true>* c2,
acc_type<T, true>* c3) {
using T_ACC = acc_type<T, true>;
const int64_t G = group;
const int64_t D = C / G;
const int64_t n = blockIdx.x;
const int64_t g = blockIdx.y;
const int64_t ng = n * G + g;
T_ACC sum1 = 0;
T_ACC sum2 = 0;
for (int64_t i = threadIdx.x; i < D; i += blockDim.x) {
const int64_t index = ng * D + i;
const int64_t c = g * D + i;
const T_ACC gamma_v =
gamma == nullptr ? T_ACC(1) : static_cast<T_ACC>(gamma[c]);
sum1 += ds[index] * gamma_v;
sum2 += db[index] * gamma_v;
}
if (blockDim.x <= C10_WARP_SIZE) {
sum1 = cuda_utils::WarpReduceSum<T_ACC>(sum1);
sum2 = cuda_utils::WarpReduceSum<T_ACC>(sum2);
} else {
__shared__ T_ACC ds_shared[C10_WARP_SIZE];
__shared__ T_ACC db_shared[C10_WARP_SIZE];
sum1 = cuda_utils::BlockReduceSum<T_ACC>(sum1, ds_shared);
sum2 = cuda_utils::BlockReduceSum<T_ACC>(sum2, db_shared);
}
if (threadIdx.x == 0) {
const T_ACC s = T_ACC(1) / static_cast<T_ACC>(D * HxW);
const T_ACC x = (sum2 * static_cast<T_ACC>(mean[ng]) - sum1) *
static_cast<T_ACC>(rstd[ng]) * static_cast<T_ACC>(rstd[ng]) *
static_cast<T_ACC>(rstd[ng]) * s;
c2[ng] = x;
c3[ng] = -x * static_cast<T_ACC>(mean[ng]) -
sum2 * static_cast<T_ACC>(rstd[ng]) * s;
}
}
template <typename T>
__global__ void GammaBetaBackwardCUDAKernel1(
int64_t N,
int64_t C,
int64_t group,
const T* mean,
const T* rstd,
const acc_type<T, true>* ds,
const acc_type<T, true>* db,
T* dgamma,
T* dbeta) {
using T_ACC = acc_type<T, true>;
const int64_t c = blockIdx.x * blockDim.x + threadIdx.x;
if (c < C) {
const int64_t G = group;
const int64_t D = C / G;
T_ACC sum1 = 0;
T_ACC sum2 = 0;
for (int64_t n = 0; n < N; ++n) {
const int64_t nc = n * C + c;
const int64_t ng = n * G + c / D;
sum1 += (dgamma == nullptr)
? T_ACC(0)
: ((ds[nc] - db[nc] * static_cast<T_ACC>(mean[ng])) *
static_cast<T_ACC>(rstd[ng]));
sum2 += (dbeta == nullptr) ? T_ACC(0) : db[nc];
}
if (dgamma != nullptr) {
dgamma[c] = sum1;
}
if (dbeta != nullptr) {
dbeta[c] = sum2;
}
}
}
template <typename T>
__global__ void GammaBetaBackwardCUDAKernel2(
int64_t N,
int64_t C,
int64_t group,
const T* mean,
const T* rstd,
const acc_type<T, true>* ds,
const acc_type<T, true>* db,
T* dgamma,
T* dbeta) {
using T_ACC = acc_type<T, true>;
__shared__ T_ACC g_shared[kReduceTileSize][kReduceTileSize + 1];
__shared__ T_ACC b_shared[kReduceTileSize][kReduceTileSize + 1];
const int64_t c = blockIdx.x * blockDim.x + threadIdx.x;
T_ACC dg_sum1 = 0;
T_ACC dg_sum2 = 0;
T_ACC db_sum1 = 0;
T_ACC db_sum2 = 0;
if (c < C) {
const int64_t G = group;
const int64_t D = C / G;
// Accumulate each 32 cols into a 32 * 32 tile.
// Since the blockDim is (32, 16), accumulate twice for 1st and 2nd 16 rows
// of a 32 contiguous elements.
for (int64_t n = threadIdx.y; n < N; n += blockDim.y * 2) {
const int64_t n1 = n;
const int64_t n2 = n + blockDim.y;
const int64_t nc1 = n1 * C + c;
const int64_t nc2 = n2 * C + c;
const int64_t ng1 = n1 * G + c / D;
const int64_t ng2 = n2 * G + c / D;
dg_sum1 += dgamma == nullptr
? T_ACC(0)
: ((ds[nc1] - db[nc1] * static_cast<T_ACC>(mean[ng1])) *
static_cast<T_ACC>(rstd[ng1]));
db_sum1 += dbeta == nullptr ? T_ACC(0) : db[nc1];
if (n2 < N) {
dg_sum2 += dgamma == nullptr
? T_ACC(0)
: ((ds[nc2] - db[nc2] * static_cast<T_ACC>(mean[ng2])) *
static_cast<T_ACC>(rstd[ng2]));
db_sum2 += dbeta == nullptr ? T_ACC(0) : db[nc2];
}
}
}
// Write accumulated tile to shared memory.
g_shared[threadIdx.y][threadIdx.x] = dg_sum1;
g_shared[threadIdx.y + blockDim.y][threadIdx.x] = dg_sum2;
b_shared[threadIdx.y][threadIdx.x] = db_sum1;
b_shared[threadIdx.y + blockDim.y][threadIdx.x] = db_sum2;
__syncthreads();
// Do warp reduce for the 1st 16 cols in the tile.
T_ACC sum1 = g_shared[threadIdx.x][threadIdx.y];
T_ACC sum2 = b_shared[threadIdx.x][threadIdx.y];
sum1 = cuda_utils::WarpReduceSum<T_ACC>(sum1);
sum2 = cuda_utils::WarpReduceSum<T_ACC>(sum2);
if (threadIdx.x == 0) {
const int64_t c = blockIdx.x * blockDim.x + threadIdx.y;
if (c < C) {
if (dgamma != nullptr) {
dgamma[c] = sum1;
}
if (dbeta != nullptr) {
dbeta[c] = sum2;
}
}
}
// Do warp reduce for the 2st 16 cols in the tile.
sum1 = g_shared[threadIdx.x][threadIdx.y + blockDim.y];
sum2 = b_shared[threadIdx.x][threadIdx.y + blockDim.y];
sum1 = cuda_utils::WarpReduceSum<T_ACC>(sum1);
sum2 = cuda_utils::WarpReduceSum<T_ACC>(sum2);
if (threadIdx.x == 0) {
const int64_t c = blockIdx.x * blockDim.x + threadIdx.y + blockDim.y;
if (c < C) {
if (dgamma != nullptr) {
dgamma[c] = sum1;
}
if (dbeta != nullptr) {
dbeta[c] = sum2;
}
}
}
}
template <typename T>
void GroupNorm1dForward(
const Tensor& X,
const Tensor& mean,
const Tensor& rstd,
const Tensor& gamma,
const Tensor& beta,
int64_t N,
int64_t C,
int64_t group,
Tensor& Y) {
using T_ACC = acc_type<T, true>;
const int64_t G = group;
const int64_t D = C / G;
if (gamma.defined() && beta.defined()) {
auto iter = TensorIteratorConfig()
.resize_outputs(false)
.add_output(Y.view({N, G, D}))
.add_input(X.view({N, G, D}))
.add_input(mean.view({N, G, 1}))
.add_input(rstd.view({N, G, 1}))
.add_input(gamma.view({1, G, D}))
.add_input(beta.view({1, G, D}))
.build();
gpu_kernel(iter, [] GPU_LAMBDA(T x, T mean, T rstd, T gamma, T beta) -> T {
return (static_cast<T_ACC>(x) - static_cast<T_ACC>(mean)) *
static_cast<T_ACC>(rstd) * static_cast<T_ACC>(gamma) +
static_cast<T_ACC>(beta);
});
} else if (gamma.defined()) {
auto iter = TensorIteratorConfig()
.resize_outputs(false)
.add_output(Y.view({N, G, D}))
.add_input(X.view({N, G, D}))
.add_input(mean.view({N, G, 1}))
.add_input(rstd.view({N, G, 1}))
.add_input(gamma.view({1, G, D}))
.build();
gpu_kernel(iter, [] GPU_LAMBDA(T x, T mean, T rstd, T gamma) -> T {
return (static_cast<T_ACC>(x) - static_cast<T_ACC>(mean)) *
static_cast<T_ACC>(rstd) * static_cast<T_ACC>(gamma);
});
} else if (beta.defined()) {
auto iter = TensorIteratorConfig()
.resize_outputs(false)
.add_output(Y.view({N, G, D}))
.add_input(X.view({N, G, D}))
.add_input(mean.view({N, G, 1}))
.add_input(rstd.view({N, G, 1}))
.add_input(beta.view({1, G, D}))
.build();
gpu_kernel(iter, [] GPU_LAMBDA(T x, T mean, T rstd, T beta) -> T {
return (static_cast<T_ACC>(x) - static_cast<T_ACC>(mean)) *
static_cast<T_ACC>(rstd) +
static_cast<T_ACC>(beta);
});
} else {
auto iter = TensorIteratorConfig()
.resize_outputs(false)
.add_output(Y.view({N * G, D}))
.add_input(X.view({N * G, D}))
.add_input(mean.view({N * G, 1}))
.add_input(rstd.view({N * G, 1}))
.build();
gpu_kernel(iter, [] GPU_LAMBDA(T x, T mean, T rstd) -> T {
return (static_cast<T_ACC>(x) - static_cast<T_ACC>(mean)) *
static_cast<T_ACC>(rstd);
});
}
AT_CUDA_CHECK(cudaGetLastError());
}
template <typename T>
void GroupNormKernelImplInternal(
const Tensor& X,
const Tensor& gamma,
const Tensor& beta,
int64_t N,
int64_t C,
int64_t HxW,
int64_t group,
T eps,
Tensor& Y,
Tensor& mean,
Tensor& rstd) {
using T_ACC = acc_type<T, true>;
TORCH_CHECK(X.numel() == N * C * HxW);
TORCH_CHECK(!gamma.defined() || gamma.numel() == C);
TORCH_CHECK(!beta.defined() || beta.numel() == C);
if (N == 0) {
return;
}
const int64_t G = group;
const int64_t D = C / G;
const T* X_data = X.data_ptr<T>();
T* Y_data = Y.data_ptr<T>();
T* mean_data = mean.data_ptr<T>();
T* rstd_data = rstd.data_ptr<T>();
cudaStream_t cuda_stream = at::cuda::getCurrentCUDAStream();
const int64_t num_threads = D * HxW < cuda_utils::kCUDABlockReduceNumThreads
? C10_WARP_SIZE
: cuda_utils::kCUDABlockReduceNumThreads;
RowwiseMomentsCUDAKernel<T><<<N * G, num_threads, 0, cuda_stream>>>(
D * HxW, eps, X_data, mean_data, rstd_data);
TORCH_CUDA_KERNEL_LAUNCH_CHECK();
if (HxW == 1) {
GroupNorm1dForward<T>(X, mean, rstd, gamma, beta, N, C, G, Y);
} else if (!gamma.defined() && !beta.defined()) {
auto iter = TensorIteratorConfig()
.resize_outputs(false)
.add_output(Y.view({N * G, D * HxW}))
.add_input(X.view({N * G, D * HxW}))
.add_input(mean.view({N * G, 1}))
.add_input(rstd.view({N * G, 1}))
.build();
gpu_kernel(iter, [] GPU_LAMBDA(T x, T mean, T rstd) -> T {
return (static_cast<T_ACC>(x) - static_cast<T_ACC>(mean)) *
static_cast<T_ACC>(rstd);
});
} else {
const auto kAccType =
(X.scalar_type() == kHalf || X.scalar_type() == kBFloat16)
? kFloat
: X.scalar_type();
Tensor a = at::empty({N, C}, X.options().dtype(kAccType));
Tensor b = at::empty({N, C}, X.options().dtype(kAccType));
const T* gamma_data = gamma.defined() ? gamma.data_ptr<T>() : nullptr;
const T* beta_data = beta.defined() ? beta.data_ptr<T>() : nullptr;
T_ACC* a_data = a.data_ptr<T_ACC>();
T_ACC* b_data = b.data_ptr<T_ACC>();
// TODO: Since there is some issues in gpu_kernel_multiple_outputs, we are
// using maunal kernel here. Make it using gpu_kernel_multiple_outputs once
// the issue fixed.
const int64_t B = (N * C + kCUDANumThreads - 1) / kCUDANumThreads;
ComputeFusedParamsCUDAKernel<T><<<B, kCUDANumThreads, 0, cuda_stream>>>(
N, C, G, mean_data, rstd_data, gamma_data, beta_data, a_data, b_data);
TORCH_CUDA_KERNEL_LAUNCH_CHECK();
auto iter = TensorIteratorConfig()
.check_all_same_dtype(std::is_same<T, T_ACC>::value)
.resize_outputs(false)
.add_output(Y.view({N * C, HxW}))
.add_input(X.view({N * C, HxW}))
.add_input(a.view({N * C, 1}))
.add_input(b.view({N * C, 1}))
.build();
gpu_kernel(iter, [] GPU_LAMBDA(T x, T_ACC a, T_ACC b) -> T {
return a * static_cast<T_ACC>(x) + b;
});
}
}
void GroupNormKernelImpl(
const Tensor& X,
const Tensor& gamma,
const Tensor& beta,
int64_t N,
int64_t C,
int64_t HxW,
int64_t group,
double eps,
Tensor& Y,
Tensor& mean,
Tensor& rstd) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
X.scalar_type(),
"GroupNormKernelImpl",
[&]() {
GroupNormKernelImplInternal<scalar_t>(
X,
gamma,
beta,
N,
C,
HxW,
group,
static_cast<scalar_t>(eps),
Y,
mean,
rstd);
});
}
template <typename T>
void GroupNorm1dBackward(
const Tensor dY,
const Tensor X,
const Tensor mean,
const Tensor rstd,
const Tensor gamma,
int64_t N,
int64_t C,
int64_t group,
Tensor& dX,
Tensor& dgamma,
Tensor& dbeta) {
using T_ACC = acc_type<T, true>;
const int64_t G = group;
const int64_t D = C / G;
const T* dY_data = dY.data_ptr<T>();
const T* X_data = X.data_ptr<T>();
const T* mean_data = mean.data_ptr<T>();
const T* rstd_data = rstd.data_ptr<T>();
cudaStream_t cuda_stream = at::cuda::getCurrentCUDAStream();
if (dX.defined()) {
const T* gamma_data = gamma.defined() ? gamma.data_ptr<T>() : nullptr;
const auto kAccType =
(X.scalar_type() == kHalf || X.scalar_type() == kBFloat16)
? kFloat
: X.scalar_type();
Tensor c2 = at::empty({N, G}, X.options().dtype(kAccType));
Tensor c3 = at::empty({N, G}, X.options().dtype(kAccType));
T_ACC* c2_data = c2.data_ptr<T_ACC>();
T_ACC* c3_data = c3.data_ptr<T_ACC>();
const int64_t num_threads = (C / G) < cuda_utils::kCUDABlockReduceNumThreads
? C10_WARP_SIZE
: cuda_utils::kCUDABlockReduceNumThreads;
Compute1dBackwardFusedParamsCUDAKernel<T>
<<<dim3(N, G), num_threads, 0, cuda_stream>>>(
C,
G,
dY_data,
X_data,
mean_data,
rstd_data,
gamma_data,
c2_data,
c3_data);
TORCH_CUDA_KERNEL_LAUNCH_CHECK();
if (gamma.defined()) {
auto iter = TensorIteratorConfig()
.check_all_same_dtype(std::is_same<T, T_ACC>::value)
.resize_outputs(false)
.add_output(dX.view({N, G, D}))
.add_input(dY.view({N, G, D}))
.add_input(X.view({N, G, D}))
.add_input(rstd.view({N, G, 1}))
.add_input(gamma.view({1, G, D}))
.add_input(c2.view({N, G, 1}))
.add_input(c3.view({N, G, 1}))
.build();
gpu_kernel(
iter,
[] GPU_LAMBDA(T dy, T x, T rstd, T gamma, T_ACC c2, T_ACC c3) -> T {
const T_ACC c1 =
static_cast<T_ACC>(rstd) * static_cast<T_ACC>(gamma);
return c1 * static_cast<T_ACC>(dy) + c2 * static_cast<T_ACC>(x) +
c3;
});
} else {
auto iter = TensorIteratorConfig()
.check_all_same_dtype(std::is_same<T, T_ACC>::value)
.resize_outputs(false)
.add_output(dX.view({N * G, D}))
.add_input(dY.view({N * G, D}))
.add_input(X.view({N * G, D}))
.add_input(rstd.view({N * G, 1}))
.add_input(c2.view({N * G, 1}))
.add_input(c3.view({N * G, 1}))
.build();
gpu_kernel(
iter, [] GPU_LAMBDA(T dy, T x, T rstd, T_ACC c2, T_ACC c3) -> T {
const T_ACC c1 = static_cast<T_ACC>(rstd);
return c1 * static_cast<T_ACC>(dy) + c2 * static_cast<T_ACC>(x) +
c3;
});
}
}
if (dgamma.defined() || dbeta.defined()) {
T* dgamma_data = dgamma.defined() ? dgamma.data_ptr<T>() : nullptr;
T* dbeta_data = dbeta.defined() ? dbeta.data_ptr<T>() : nullptr;
if (N <= 128) {
const int64_t B = (C + kCUDANumThreads - 1) / kCUDANumThreads;
GammaBeta1dBackwardCUDAKernel1<T><<<B, kCUDANumThreads, 0, cuda_stream>>>(
N,
C,
G,
dY_data,
X_data,
mean_data,
rstd_data,
dgamma_data,
dbeta_data);
TORCH_CUDA_KERNEL_LAUNCH_CHECK();
} else {
const int64_t B = (C + kReduceTileSize - 1) / kReduceTileSize;
// The algorithm for colwise reduction here is to accumulate each 32 cols
// to a 32 * 32 tile and write the tile to shared memmory. Then do warp
// reduce for each col in the tile. So here the blockDim must be (32, 16).
constexpr int kThreadX = kReduceTileSize;
constexpr int kThreadY = kReduceTileSize / 2;
GammaBeta1dBackwardCUDAKernel2<T>
<<<B, dim3(kThreadX, kThreadY), 0, cuda_stream>>>(
N,
C,
G,
dY_data,
X_data,
mean_data,
rstd_data,
dgamma_data,
dbeta_data);
TORCH_CUDA_KERNEL_LAUNCH_CHECK();
}
}
}
template <typename T>
void GroupNormBackwardKernelImplInternal(
const Tensor& dY,
const Tensor& X,
const Tensor& mean,
const Tensor& rstd,
const Tensor& gamma,
int64_t N,
int64_t C,
int64_t HxW,
int64_t group,
Tensor& dX,
Tensor& dgamma,
Tensor& dbeta) {
using T_ACC = acc_type<T, true>;
const int64_t G = group;
const int64_t D = C / G;
TORCH_CHECK(dY.numel() == N * C * HxW);
TORCH_CHECK(X.numel() == N * C * HxW);
TORCH_CHECK(mean.numel() == N * G);
TORCH_CHECK(rstd.numel() == N * G);
TORCH_CHECK(!gamma.defined() || gamma.numel() == C);
cudaStream_t cuda_stream = at::cuda::getCurrentCUDAStream();
if (N == 0) {
if (dgamma.defined()) {
dgamma.fill_(T(0));
}
if (dbeta.defined()) {
dbeta.fill_(T(0));
}
return;
}
const T* dY_data = dY.data_ptr<T>();
const T* X_data = X.data_ptr<T>();
const T* mean_data = mean.data_ptr<T>();
const T* rstd_data = rstd.data_ptr<T>();
const T* gamma_data = gamma.defined() ? gamma.data_ptr<T>() : nullptr;
const auto kAccType =
(X.scalar_type() == kHalf || X.scalar_type() == kBFloat16)
? kFloat
: X.scalar_type();
Tensor ds = at::empty({N, C}, X.options().dtype(kAccType));
Tensor db = at::empty({N, C}, X.options().dtype(kAccType));
T_ACC* ds_data = ds.data_ptr<T_ACC>();
T_ACC* db_data = db.data_ptr<T_ACC>();
if (HxW == 1) {
GroupNorm1dBackward<T>(
dY, X, mean, rstd, gamma, N, C, G, dX, dgamma, dbeta);
return;
}
int64_t num_threads = HxW < cuda_utils::kCUDABlockReduceNumThreads
? C10_WARP_SIZE
: cuda_utils::kCUDABlockReduceNumThreads;
ComputeInternalGradientsCUDAKernel<T><<<N * C, num_threads, 0, cuda_stream>>>(
HxW, dY_data, X_data, ds_data, db_data);
TORCH_CUDA_KERNEL_LAUNCH_CHECK();
if (dX.defined()) {
Tensor c1 = at::empty({0}, X.options().dtype(kAccType));
Tensor c2 = at::empty({N, G}, X.options().dtype(kAccType));
Tensor c3 = at::empty({N, G}, X.options().dtype(kAccType));
T_ACC* c2_data = c2.data_ptr<T_ACC>();
T_ACC* c3_data = c3.data_ptr<T_ACC>();
if (gamma.defined()) {
auto iter = TensorIteratorConfig()
.check_all_same_dtype(std::is_same<T, T_ACC>::value)
.add_output(c1)
.add_input(rstd.view({N, G, 1}))
.add_input(gamma.view({1, G, D}))
.build();
gpu_kernel(iter, [] GPU_LAMBDA(T rstd, T gamma) -> T_ACC {
return static_cast<T_ACC>(rstd) * static_cast<T_ACC>(gamma);
});
}
num_threads = (C / G) < cuda_utils::kCUDABlockReduceNumThreads
? C10_WARP_SIZE
: cuda_utils::kCUDABlockReduceNumThreads;
ComputeBackwardFusedParamsCUDAKernel<T>
<<<dim3(N, G), num_threads, 0, cuda_stream>>>(
C,
HxW,
G,
mean_data,
rstd_data,
gamma_data,
ds_data,
db_data,
c2_data,
c3_data);
TORCH_CUDA_KERNEL_LAUNCH_CHECK();
if (gamma.defined()) {
auto iter = TensorIteratorConfig()
.check_all_same_dtype(std::is_same<T, T_ACC>::value)
.resize_outputs(false)
.add_output(dX.view({N * G, D, HxW}))
.add_input(dY.view({N * G, D, HxW}))
.add_input(X.view({N * G, D, HxW}))
.add_input(c1.view({N * G, D, 1}))
.add_input(c2.view({N * G, 1, 1}))
.add_input(c3.view({N * G, 1, 1}))
.build();
gpu_kernel(
iter, [] GPU_LAMBDA(T dy, T x, T_ACC c1, T_ACC c2, T_ACC c3) -> T {
return c1 * static_cast<T_ACC>(dy) + c2 * static_cast<T_ACC>(x) +
c3;
});
} else {
auto iter = TensorIteratorConfig()
.check_all_same_dtype(std::is_same<T, T_ACC>::value)
.resize_outputs(false)
.add_output(dX.view({N * G, D * HxW}))
.add_input(dY.view({N * G, D * HxW}))
.add_input(X.view({N * G, D * HxW}))
.add_input(rstd.view({N * G, 1}))
.add_input(c2.view({N * G, 1}))
.add_input(c3.view({N * G, 1}))
.build();
gpu_kernel(
iter, [] GPU_LAMBDA(T dy, T x, T_ACC c1, T_ACC c2, T_ACC c3) -> T {
return c1 * static_cast<T_ACC>(dy) + c2 * static_cast<T_ACC>(x) +
c3;
});
}
}
if (dgamma.defined() || dbeta.defined()) {
T* dgamma_data = dgamma.defined() ? dgamma.data_ptr<T>() : nullptr;
T* dbeta_data = dbeta.defined() ? dbeta.data_ptr<T>() : nullptr;
if (N <= 128) {
// For small batch size, do colwise reduce directly.
const int64_t B = (C + kCUDANumThreads - 1) / kCUDANumThreads;
GammaBetaBackwardCUDAKernel1<T><<<B, kCUDANumThreads, 0, cuda_stream>>>(
N,
C,
G,
mean_data,
rstd_data,
ds_data,
db_data,
dgamma_data,
dbeta_data);
TORCH_CUDA_KERNEL_LAUNCH_CHECK();
} else {
const int64_t B = (C + kReduceTileSize - 1) / kReduceTileSize;
// The algorithm for colwise reduction here is to accumulate each 32 cols
// to a 32 * 32 tile and write the tile to shared memmory. Then do warp
// reduce for each col in the tile. So here the blockDim must be (32, 16).
constexpr int kThreadX = kReduceTileSize;
constexpr int kThreadY = kReduceTileSize / 2;
GammaBetaBackwardCUDAKernel2<T>
<<<B, dim3(kThreadX, kThreadY), 0, cuda_stream>>>(
N,
C,
G,
mean_data,
rstd_data,
ds_data,
db_data,
dgamma_data,
dbeta_data);
TORCH_CUDA_KERNEL_LAUNCH_CHECK();
}
}
}
void GroupNormBackwardKernelImpl(
const Tensor& dY,
const Tensor& X,
const Tensor& mean,
const Tensor& rstd,
const Tensor& gamma,
int64_t N,
int64_t C,
int64_t HxW,
int64_t group,
Tensor& dX,
Tensor& dgamma,
Tensor& dbeta) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
X.scalar_type(),
"GroupNormBackwardKernelImpl",
[&]() {
GroupNormBackwardKernelImplInternal<scalar_t>(
dY, X, mean, rstd, gamma, N, C, HxW, group, dX, dgamma, dbeta);
});
}
} // namespace
REGISTER_DISPATCH(GroupNormKernel, &GroupNormKernelImpl);
REGISTER_DISPATCH(GroupNormBackwardKernel, &GroupNormBackwardKernelImpl);
} // namespace native
} // namespace at
|
cb164b277f468da1ab53ddd026ceb1c156e51a50.hip | // !!! This is a file automatically generated by hipify!!!
#include "darknet.h"
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <rocblas.h>
#include <float.h>
#include "activations.h"
#include "dark_cuda.h"
__device__ float lhtan_activate_kernel(float x)
{
if(x < 0) return .001*x;
if(x > 1) return .001*(x-1) + 1;
return x;
}
__device__ float lhtan_gradient_kernel(float x)
{
if(x > 0 && x < 1) return 1;
return .001;
}
__device__ float hardtan_activate_kernel(float x)
{
if (x < -1) return -1;
if (x > 1) return 1;
return x;
}
__device__ float linear_activate_kernel(float x){return x;}
__device__ float logistic_activate_kernel(float x){return 1.f/(1.f + expf(-x));}
__device__ float loggy_activate_kernel(float x){return 2.f/(1.f + expf(-x)) - 1;}
__device__ float relu_activate_kernel(float x){return x*(x>0);}
__device__ float relu6_activate_kernel(float x) { return min_val_cmp(max_val_cmp(x, 0), 6); }
__device__ float elu_activate_kernel(float x){return (x >= 0)*x + (x < 0)*(expf(x)-1);}
__device__ float selu_activate_kernel(float x) { return (x >= 0)*1.0507f*x + (x < 0)*1.0507f*1.6732f*(expf(x) - 1); }
__device__ float relie_activate_kernel(float x){return (x>0) ? x : .01f*x;}
__device__ float ramp_activate_kernel(float x){return x*(x>0)+.1f*x;}
__device__ float leaky_activate_kernel(float x){return (x>0) ? x : .1f*x;}
__device__ float tanh_activate_kernel(float x){return (2/(1 + expf(-2*x)) - 1);}
__device__ float gelu_activate_kernel(float x){return (0.5*x*(1 + tanhf(0.797885*x + 0.035677*powf(x, 3))));}
__device__ float softplus_kernel(float x, float threshold = 20) {
if (x > threshold) return x; // too large
else if (x < -threshold) return expf(x); // too small
return log1pf(expf(x));
//return logf(expf(x) + 1);
}
__device__ float plse_activate_kernel(float x)
{
if(x < -4) return .01f * (x + 4);
if(x > 4) return .01f * (x - 4) + 1;
return .125f*x + .5f;
}
__device__ float stair_activate_kernel(float x)
{
int n = floorf(x);
if (n%2 == 0) return floorf(x/2.f);
else return (x - n) + floorf(x/2.f);
}
__device__ float hardtan_gradient_kernel(float x)
{
if (x > -1 && x < 1) return 1;
return 0;
}
__device__ float linear_gradient_kernel(float x){return 1;}
__device__ float logistic_gradient_kernel(float x){return (1-x)*x;}
__device__ float loggy_gradient_kernel(float x)
{
float y = (x+1.F)/2.F;
return 2*(1-y)*y;
}
__device__ float relu_gradient_kernel(float x){return (x>0);}
__device__ float relu6_gradient_kernel(float x) { return (x > 0 && x < 6); }
__device__ float elu_gradient_kernel(float x){return (x >= 0) + (x < 0)*(x + 1);}
__device__ float selu_gradient_kernel(float x) { return (x >= 0)*1.0507f + (x < 0)*(x + 1.0507f*1.6732f); }
__device__ float relie_gradient_kernel(float x){return (x>0) ? 1 : .01f;}
__device__ float ramp_gradient_kernel(float x){return (x>0)+.1f;}
__device__ float leaky_gradient_kernel(float x){return (x>0) ? 1 : .1f;}
__device__ float tanh_gradient_kernel(float x){return 1-x*x;}
__device__ float sech_gpu(float x) { return 2 / (expf(x) + expf(-x)); }
__device__ float gelu_gradient_kernel(float x) {
const float x3 = powf(x, 3);
return 0.5*tanhf(0.0356774*x3 + 0.797885*x) + (0.0535161*x3 + 0.398942*x) * powf(sech_gpu(0.0356774*x3 + 0.797885*x), 2) + 0.5;
}
__device__ float plse_gradient_kernel(float x){return (x < 0 || x > 1) ? .01f : .125f;}
__device__ float stair_gradient_kernel(float x)
{
if (floorf(x) == x) return 0;
return 1;
}
__device__ float activate_kernel(float x, ACTIVATION a)
{
switch(a){
case LINEAR:
return linear_activate_kernel(x);
case LOGISTIC:
return logistic_activate_kernel(x);
case LOGGY:
return loggy_activate_kernel(x);
case RELU:
return relu_activate_kernel(x);
case RELU6:
return relu6_activate_kernel(x);
case ELU:
return elu_activate_kernel(x);
case SELU:
return selu_activate_kernel(x);
case GELU:
return gelu_activate_kernel(x);
case RELIE:
return relie_activate_kernel(x);
case RAMP:
return ramp_activate_kernel(x);
case LEAKY:
return leaky_activate_kernel(x);
case TANH:
return tanh_activate_kernel(x);
case PLSE:
return plse_activate_kernel(x);
case STAIR:
return stair_activate_kernel(x);
case HARDTAN:
return hardtan_activate_kernel(x);
case LHTAN:
return lhtan_activate_kernel(x);
}
return 0;
}
__device__ float gradient_kernel(float x, ACTIVATION a)
{
switch (a) {
case LINEAR:
return linear_gradient_kernel(x);
case LOGISTIC:
return logistic_gradient_kernel(x);
case LOGGY:
return loggy_gradient_kernel(x);
case RELU:
return relu_gradient_kernel(x);
case RELU6:
return relu6_gradient_kernel(x);
case NORM_CHAN:
return relu_gradient_kernel(x);
case ELU:
return elu_gradient_kernel(x);
case SELU:
return selu_gradient_kernel(x);
case GELU:
return gelu_gradient_kernel(x);
case RELIE:
return relie_gradient_kernel(x);
case RAMP:
return ramp_gradient_kernel(x);
case LEAKY:
return leaky_gradient_kernel(x);
case TANH:
return tanh_gradient_kernel(x);
case PLSE:
return plse_gradient_kernel(x);
case STAIR:
return stair_gradient_kernel(x);
case HARDTAN:
return hardtan_gradient_kernel(x);
case LHTAN:
return lhtan_gradient_kernel(x);
}
return 0;
}
__global__ void binary_gradient_array_kernel(float *x, float *dy, int n, int s, BINARY_ACTIVATION a, float *dx)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
int i = id % s;
int b = id / s;
float x1 = x[b*s + i];
float x2 = x[b*s + s / 2 + i];
if (id < n) {
float de = dy[id];
dx[b*s + i] = x2*de;
dx[b*s + s / 2 + i] = x1*de;
}
}
extern "C" void binary_gradient_array_gpu(float *x, float *dx, int n, int size, BINARY_ACTIVATION a, float *y)
{
binary_gradient_array_kernel << <cuda_gridsize(n / 2), BLOCK, 0, get_cuda_stream() >> >(x, dx, n / 2, size, a, y);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void binary_activate_array_kernel(float *x, int n, int s, BINARY_ACTIVATION a, float *y)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
int i = id % s;
int b = id / s;
float x1 = x[b*s + i];
float x2 = x[b*s + s / 2 + i];
if (id < n) y[id] = x1*x2;
}
extern "C" void binary_activate_array_gpu(float *x, int n, int size, BINARY_ACTIVATION a, float *y)
{
binary_activate_array_kernel << <cuda_gridsize(n / 2), BLOCK, 0, get_cuda_stream() >> >(x, n / 2, size, a, y);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void activate_array_kernel(float *x, int n, ACTIVATION a)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n) x[i] = activate_kernel(x[i], a);
}
__global__ void activate_array_swish_kernel(float *x, int n, float *output_sigmoid_gpu, float *output_gpu)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < n) {
float x_val = x[i];
float sigmoid = logistic_activate_kernel(x_val);
if (output_sigmoid_gpu) output_sigmoid_gpu[i] = sigmoid;
output_gpu[i] = x_val * sigmoid;
}
}
__device__ float mish_njuffa(float x)
{
float r;
float e = expf(x);
r = 1.0f / fmaf(fmaf(-0.5f, e, -1.0f), e, -1.0f);
r = fmaf(r, x, x);
return r;
}
__device__ float mish_yashas(float x)
{
float e = __expf(x);
if (x <= -18.0f)
return x * e;
float n = e * e + 2 * e;
if (x <= -5.0f)
return x * __fdividef(n, n + 2);
return x - 2 * __fdividef(x, n + 2);
}
__device__ float mish_yashas2(float x)
{
float e = __expf(x);
float n = e * e + 2 * e;
if (x <= -0.6f)
return x * __fdividef(n, n + 2);
return x - 2 * __fdividef(x, n + 2);
}
// https://github.com/digantamisra98/Mish
__global__ void activate_array_mish_kernel(float *x, int n, float *activation_input, float *output_gpu)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < n) {
const float MISH_THRESHOLD = 20;
float x_val = x[i];
if (activation_input) activation_input[i] = x_val; // store value before activation
//output_gpu[i] = x_val * tanh_activate_kernel(logf(1 + expf(x_val)));
// Pytorch: https://github.com/thomasbrandon/mish-cuda/blob/master/csrc/mish.h#L17-L20
// TF: https://github.com/tensorflow/addons/blob/093cdfa85d334cbe19a37624c33198f3140109ed/tensorflow_addons/custom_ops/activations/cc/kernels/mish_op.h#L40-L49
// log1p(x) == log(x + 1)
//output_gpu[i] = x_val * tanh_activate_kernel( softplus_kernel(x_val, MISH_THRESHOLD) );
output_gpu[i] = mish_yashas2(x_val);
//output_gpu[i] = mish_njuffa(x_val);
}
}
__device__ float hard_mish_yashas(float x)
{
if (x > 0)
return x;
if (x > -2)
return x * x / 2 + x;
return 0;
}
__global__ void activate_array_hard_mish_kernel(float *x, int n, float *activation_input, float *output_gpu)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < n) {
float x_val = x[i];
if (activation_input) activation_input[i] = x_val; // store value before activation
output_gpu[i] = hard_mish_yashas(x_val);
}
}
__global__ void activate_array_leaky_kernel(float *x, int n)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
x[index] = leaky_activate_kernel(x[index]);
}
}
__global__ void activate_array_selu_kernel(float *x, int n)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
x[index] = selu_activate_kernel(x[index]);
}
}
__global__ void activate_array_gelu_kernel(float *x, int n)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
x[index] = gelu_activate_kernel(x[index]);
}
}
__global__ void activate_array_logistic_kernel(float *x, int n)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
x[index] = logistic_activate_kernel(x[index]);
}
}
__global__ void activate_array_tanh_kernel(float *x, int n)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
x[index] = tanh_activate_kernel(x[index]);
}
}
__global__ void activate_array_hardtan_kernel(float *x, int n)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
x[index] = hardtan_activate_kernel(x[index]);
}
}
__global__ void activate_array_relu_kernel(float *x, int n)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
x[index] = relu_activate_kernel(x[index]);
}
}
__global__ void activate_array_relu6_kernel(float *x, int n)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
x[index] = relu6_activate_kernel(x[index]);
}
}
__global__ void gradient_array_kernel(float *x, int n, ACTIVATION a, float *delta)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n) delta[i] *= gradient_kernel(x[i], a);
}
// https://github.com/BVLC/caffe/blob/04ab089db018a292ae48d51732dd6c66766b36b6/src/caffe/layers/swish_layer.cu#L28-L30
__global__ void gradient_array_swish_kernel(float *x, int n, float *sigmoid_gpu, float *delta)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < n) {
float swish = x[i];
delta[i] *= swish + sigmoid_gpu[i] * (1 - swish); // gradient_kernel(x[i], a);
}
}
// https://github.com/digantamisra98/Mish
__global__ void gradient_array_mish_kernel(int n, float *activation_input_gpu, float *delta)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < n) {
const float MISH_THRESHOLD = 20.0f;
// implementation from TensorFlow: https://github.com/tensorflow/addons/blob/093cdfa85d334cbe19a37624c33198f3140109ed/tensorflow_addons/custom_ops/activations/cc/kernels/mish_op.h#L66-L80
// implementation from Pytorch: https://github.com/thomasbrandon/mish-cuda/blob/master/csrc/mish.h#L26-L31
// log1p(x) == log(x + 1)
const float inp = activation_input_gpu[i];
const float sp = softplus_kernel(inp, MISH_THRESHOLD);
const float grad_sp = -expm1f(-sp);
//const float grad_sp = 1 - expf(-sp);
const float tsp = tanh(sp);
const float grad_tsp = (1 - tsp*tsp) * grad_sp;
const float grad = inp * grad_tsp + tsp;
delta[i] *= grad;
//float x = activation_input[i];
//float d = 2 * expf(x) + expf(2 * x) + 2;
//float w = 4 * (x + 1) + 4 * expf(2 * x) + expf(3 * x) + expf(x)*(4 * x + 6);
//float derivative = expf(x) * w / (d * d);
//delta[i] *= derivative;
}
}
__device__ float hard_mish_yashas_grad(float x)
{
if (x > 0)
return 1;
if (x > -2)
return x + 1;
return 0;
}
__global__ void gradient_array_hard_mish_kernel(int n, float *activation_input_gpu, float *delta)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < n) {
const float x = activation_input_gpu[i];
delta[i] *= hard_mish_yashas_grad(x);
}
}
__global__ void gradient_array_leaky_kernel(float *x, int n, float *delta)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
delta[index] *= leaky_gradient_kernel(x[index]);
}
}
__global__ void gradient_array_revleaky_kernel(float *x, int n, float *delta)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
delta[index] /= leaky_gradient_kernel(x[index]);
}
}
__global__ void gradient_array_selu_kernel(float *x, int n, float *delta)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
delta[index] *= selu_gradient_kernel(x[index]);
}
}
__global__ void gradient_array_gelu_kernel(float *x, int n, float *delta)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
delta[index] *= gelu_gradient_kernel(x[index]);
}
}
__global__ void gradient_array_logistic_kernel(float *x, int n, float *delta)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
delta[index] *= logistic_gradient_kernel(x[index]);
}
}
__global__ void gradient_array_tanh_kernel(float *x, int n, float *delta)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
delta[index] *= tanh_gradient_kernel(x[index]);
}
}
__global__ void gradient_array_hardtan_kernel(float *x, int n, float *delta)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
delta[index] *= hardtan_gradient_kernel(x[index]);
}
}
__global__ void gradient_array_relu_kernel(float *x, int n, float *delta)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
delta[index] *= relu_gradient_kernel(x[index]);
}
}
__global__ void gradient_array_relu6_kernel(float *x, int n, float *delta)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
delta[index] *= relu6_gradient_kernel(x[index]);
}
}
extern "C" void activate_array_ongpu(float *x, int n, ACTIVATION a)
{
const int num_blocks = get_number_of_blocks(n, BLOCK);
if (a == LINEAR) return;
else if (a == LEAKY || a == REVLEAKY) activate_array_leaky_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n);
else if (a == LOGISTIC) activate_array_logistic_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n);
else if (a == TANH) activate_array_tanh_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n);
else if (a == HARDTAN) activate_array_hardtan_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n);
else if (a == RELU) activate_array_relu_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n);
else if (a == RELU6) activate_array_relu6_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n);
else if (a == SELU) activate_array_selu_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n);
else if (a == GELU) activate_array_gelu_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n);
else
hipLaunchKernelGGL(( activate_array_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, get_cuda_stream(), x, n, a);
CHECK_CUDA(hipPeekAtLastError());
}
extern "C" void activate_array_swish_ongpu(float *x, int n, float *output_sigmoid_gpu, float *output_gpu)
{
const int num_blocks = get_number_of_blocks(n, BLOCK);
activate_array_swish_kernel << <cuda_gridsize(n), BLOCK, 0, get_cuda_stream() >> >(x, n, output_sigmoid_gpu, output_gpu);
CHECK_CUDA(hipPeekAtLastError());
}
extern "C" void activate_array_mish_ongpu(float *x, int n, float *activation_input_gpu, float *output_gpu)
{
const int num_blocks = get_number_of_blocks(n, BLOCK);
activate_array_mish_kernel << <cuda_gridsize(n), BLOCK, 0, get_cuda_stream() >> >(x, n, activation_input_gpu, output_gpu);
CHECK_CUDA(hipPeekAtLastError());
}
extern "C" void activate_array_hard_mish_ongpu(float *x, int n, float *activation_input_gpu, float *output_gpu)
{
const int num_blocks = get_number_of_blocks(n, BLOCK);
activate_array_hard_mish_kernel << <cuda_gridsize(n), BLOCK, 0, get_cuda_stream() >> >(x, n, activation_input_gpu, output_gpu);
CHECK_CUDA(hipPeekAtLastError());
}
extern "C" void gradient_array_ongpu(float *x, int n, ACTIVATION a, float *delta)
{
const int num_blocks = get_number_of_blocks(n, BLOCK);
if (a == LINEAR) return;
else if (a == LEAKY) gradient_array_leaky_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n, delta);
else if (a == REVLEAKY) gradient_array_revleaky_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n, delta);
else if (a == LOGISTIC) gradient_array_logistic_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n, delta);
else if (a == TANH) gradient_array_tanh_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n, delta);
else if (a == HARDTAN) gradient_array_hardtan_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n, delta);
else if (a == RELU) gradient_array_relu_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n, delta);
else if (a == RELU6) gradient_array_relu6_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n, delta);
//else if (a == NORM_CHAN) gradient_array_relu_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n, delta);
else if (a == NORM_CHAN_SOFTMAX || a == NORM_CHAN) {
printf(" Error: should be used custom NORM_CHAN_SOFTMAX-function for gradient \n");
exit(0);
}
else if (a == SELU) gradient_array_selu_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n, delta);
else if (a == GELU) gradient_array_gelu_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n, delta);
else
gradient_array_kernel << <cuda_gridsize(n), BLOCK, 0, get_cuda_stream() >> > (x, n, a, delta);
CHECK_CUDA(hipPeekAtLastError());
}
extern "C" void gradient_array_swish_ongpu(float *x, int n, float *sigmoid_gpu, float *delta)
{
const int num_blocks = get_number_of_blocks(n, BLOCK);
gradient_array_swish_kernel << <cuda_gridsize(n), BLOCK, 0, get_cuda_stream() >> > (x, n, sigmoid_gpu, delta);
CHECK_CUDA(hipPeekAtLastError());
}
extern "C" void gradient_array_mish_ongpu(int n, float *activation_input_gpu, float *delta)
{
const int num_blocks = get_number_of_blocks(n, BLOCK);
gradient_array_mish_kernel << <cuda_gridsize(n), BLOCK, 0, get_cuda_stream() >> > (n, activation_input_gpu, delta);
CHECK_CUDA(hipPeekAtLastError());
}
extern "C" void gradient_array_hard_mish_ongpu(int n, float *activation_input_gpu, float *delta)
{
const int num_blocks = get_number_of_blocks(n, BLOCK);
gradient_array_hard_mish_kernel << <cuda_gridsize(n), BLOCK, 0, get_cuda_stream() >> > (n, activation_input_gpu, delta);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void activate_array_normalize_channels_kernel(float *x, int size, int batch, int channels, int wh_step, float *output_gpu)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int wh_i = i % wh_step;
int b = i / wh_step;
const float eps = 0.0001;
if (i < size) {
float sum = eps;
int k;
for (k = 0; k < channels; ++k) {
float val = x[wh_i + k * wh_step + b*wh_step*channels];
if (val > 0) sum += val;
}
for (k = 0; k < channels; ++k) {
float val = x[wh_i + k * wh_step + b*wh_step*channels];
if (val > 0) val = val / sum;
else val = 0;
output_gpu[wh_i + k * wh_step + b*wh_step*channels] = val;
}
}
}
extern "C" void activate_array_normalize_channels_ongpu(float *x, int n, int batch, int channels, int wh_step, float *output_gpu)
{
// n = w*h*c*batch
// size = w*h*batch
int size = n / channels;
const int num_blocks = get_number_of_blocks(size, BLOCK);
activate_array_normalize_channels_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> > (x, size, batch, channels, wh_step, output_gpu);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void activate_array_normalize_channels_softmax_kernel(float *x, int size, int batch, int channels, int wh_step, float *output_gpu, int use_max_val)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int wh_i = i % wh_step;
int b = i / wh_step;
const float eps = 0.0001;
if (i < size) {
float sum = eps;
float max_val = -FLT_MAX;
int k;
if (use_max_val) {
for (k = 0; k < channels; ++k) {
float val = x[wh_i + k * wh_step + b*wh_step*channels];
if (val > max_val || k == 0) max_val = val;
}
}
else
max_val = 0;
for (k = 0; k < channels; ++k) {
float val = x[wh_i + k * wh_step + b*wh_step*channels];
sum += expf(val - max_val);
}
for (k = 0; k < channels; ++k) {
float val = x[wh_i + k * wh_step + b*wh_step*channels];
val = expf(val - max_val) / sum;
if (isnan(val) || isinf(val)) val = 0;
output_gpu[wh_i + k * wh_step + b*wh_step*channels] = val;
}
}
}
extern "C" void activate_array_normalize_channels_softmax_ongpu(float *x, int n, int batch, int channels, int wh_step, float *output_gpu, int use_max_val)
{
// n = w*h*c*batch
// size = w*h*batch
int size = n / channels;
const int num_blocks = get_number_of_blocks(size, BLOCK);
activate_array_normalize_channels_softmax_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> > (x, size, batch, channels, wh_step, output_gpu, use_max_val);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void gradient_array_normalize_channels_softmax_kernel(float *x, int size, int batch, int channels, int wh_step, float *delta_gpu)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int wh_i = i % wh_step;
int b = i / wh_step;
if (i < size) {
int k;
/*
float grad = 0;
for (k = 0; k < channels; ++k) {
const int index = wh_i + k * wh_step + b*wh_step*channels;
float out = x[index];
float delta = delta_gpu[index];
grad += out*fabs(delta);
}
*/
for (k = 0; k < channels; ++k) {
const int index = wh_i + k * wh_step + b*wh_step*channels;
float delta = delta_gpu[index];
float grad = x[index] * (1 - x[index]);
delta = delta * grad;
if (isnan(delta) || isinf(delta)) delta = 0;
delta_gpu[index] = delta;
}
}
}
extern "C" void gradient_array_normalize_channels_softmax_ongpu(float *output_gpu, int n, int batch, int channels, int wh_step, float *delta_gpu)
{
// n = w*h*c*batch
// size = w*h*batch
int size = n / channels;
const int num_blocks = get_number_of_blocks(size, BLOCK);
gradient_array_normalize_channels_softmax_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> > (output_gpu, size, batch, channels, wh_step, delta_gpu);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void gradient_array_normalize_channels_kernel(float *x, int size, int batch, int channels, int wh_step, float *delta_gpu)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int wh_i = i % wh_step;
int b = i / wh_step;
if (i < size) {
int k;
/*
float grad = 0;
for (k = 0; k < channels; ++k) {
const int index = wh_i + k * wh_step + b*wh_step*channels;
float out = x[index];
float delta = delta_gpu[index];
grad += out*fabs(delta);
}
*/
for (k = 0; k < channels; ++k) {
const int index = wh_i + k * wh_step + b*wh_step*channels;
if (x[index] > 0) {
float delta = delta_gpu[index];
float grad = x[index];
delta = delta * grad;
delta_gpu[index] = delta;
}
}
}
}
extern "C" void gradient_array_normalize_channels_ongpu(float *output_gpu, int n, int batch, int channels, int wh_step, float *delta_gpu)
{
// n = w*h*c*batch
// size = w*h*batch
int size = n / channels;
const int num_blocks = get_number_of_blocks(size, BLOCK);
gradient_array_normalize_channels_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> > (output_gpu, size, batch, channels, wh_step, delta_gpu);
CHECK_CUDA(hipPeekAtLastError());
}
| cb164b277f468da1ab53ddd026ceb1c156e51a50.cu | #include "darknet.h"
#include <cuda_runtime.h>
#include <curand.h>
#include <cublas_v2.h>
#include <float.h>
#include "activations.h"
#include "dark_cuda.h"
__device__ float lhtan_activate_kernel(float x)
{
if(x < 0) return .001*x;
if(x > 1) return .001*(x-1) + 1;
return x;
}
__device__ float lhtan_gradient_kernel(float x)
{
if(x > 0 && x < 1) return 1;
return .001;
}
__device__ float hardtan_activate_kernel(float x)
{
if (x < -1) return -1;
if (x > 1) return 1;
return x;
}
__device__ float linear_activate_kernel(float x){return x;}
__device__ float logistic_activate_kernel(float x){return 1.f/(1.f + expf(-x));}
__device__ float loggy_activate_kernel(float x){return 2.f/(1.f + expf(-x)) - 1;}
__device__ float relu_activate_kernel(float x){return x*(x>0);}
__device__ float relu6_activate_kernel(float x) { return min_val_cmp(max_val_cmp(x, 0), 6); }
__device__ float elu_activate_kernel(float x){return (x >= 0)*x + (x < 0)*(expf(x)-1);}
__device__ float selu_activate_kernel(float x) { return (x >= 0)*1.0507f*x + (x < 0)*1.0507f*1.6732f*(expf(x) - 1); }
__device__ float relie_activate_kernel(float x){return (x>0) ? x : .01f*x;}
__device__ float ramp_activate_kernel(float x){return x*(x>0)+.1f*x;}
__device__ float leaky_activate_kernel(float x){return (x>0) ? x : .1f*x;}
__device__ float tanh_activate_kernel(float x){return (2/(1 + expf(-2*x)) - 1);}
__device__ float gelu_activate_kernel(float x){return (0.5*x*(1 + tanhf(0.797885*x + 0.035677*powf(x, 3))));}
__device__ float softplus_kernel(float x, float threshold = 20) {
if (x > threshold) return x; // too large
else if (x < -threshold) return expf(x); // too small
return log1pf(expf(x));
//return logf(expf(x) + 1);
}
__device__ float plse_activate_kernel(float x)
{
if(x < -4) return .01f * (x + 4);
if(x > 4) return .01f * (x - 4) + 1;
return .125f*x + .5f;
}
__device__ float stair_activate_kernel(float x)
{
int n = floorf(x);
if (n%2 == 0) return floorf(x/2.f);
else return (x - n) + floorf(x/2.f);
}
__device__ float hardtan_gradient_kernel(float x)
{
if (x > -1 && x < 1) return 1;
return 0;
}
__device__ float linear_gradient_kernel(float x){return 1;}
__device__ float logistic_gradient_kernel(float x){return (1-x)*x;}
__device__ float loggy_gradient_kernel(float x)
{
float y = (x+1.F)/2.F;
return 2*(1-y)*y;
}
__device__ float relu_gradient_kernel(float x){return (x>0);}
__device__ float relu6_gradient_kernel(float x) { return (x > 0 && x < 6); }
__device__ float elu_gradient_kernel(float x){return (x >= 0) + (x < 0)*(x + 1);}
__device__ float selu_gradient_kernel(float x) { return (x >= 0)*1.0507f + (x < 0)*(x + 1.0507f*1.6732f); }
__device__ float relie_gradient_kernel(float x){return (x>0) ? 1 : .01f;}
__device__ float ramp_gradient_kernel(float x){return (x>0)+.1f;}
__device__ float leaky_gradient_kernel(float x){return (x>0) ? 1 : .1f;}
__device__ float tanh_gradient_kernel(float x){return 1-x*x;}
__device__ float sech_gpu(float x) { return 2 / (expf(x) + expf(-x)); }
__device__ float gelu_gradient_kernel(float x) {
const float x3 = powf(x, 3);
return 0.5*tanhf(0.0356774*x3 + 0.797885*x) + (0.0535161*x3 + 0.398942*x) * powf(sech_gpu(0.0356774*x3 + 0.797885*x), 2) + 0.5;
}
__device__ float plse_gradient_kernel(float x){return (x < 0 || x > 1) ? .01f : .125f;}
__device__ float stair_gradient_kernel(float x)
{
if (floorf(x) == x) return 0;
return 1;
}
__device__ float activate_kernel(float x, ACTIVATION a)
{
switch(a){
case LINEAR:
return linear_activate_kernel(x);
case LOGISTIC:
return logistic_activate_kernel(x);
case LOGGY:
return loggy_activate_kernel(x);
case RELU:
return relu_activate_kernel(x);
case RELU6:
return relu6_activate_kernel(x);
case ELU:
return elu_activate_kernel(x);
case SELU:
return selu_activate_kernel(x);
case GELU:
return gelu_activate_kernel(x);
case RELIE:
return relie_activate_kernel(x);
case RAMP:
return ramp_activate_kernel(x);
case LEAKY:
return leaky_activate_kernel(x);
case TANH:
return tanh_activate_kernel(x);
case PLSE:
return plse_activate_kernel(x);
case STAIR:
return stair_activate_kernel(x);
case HARDTAN:
return hardtan_activate_kernel(x);
case LHTAN:
return lhtan_activate_kernel(x);
}
return 0;
}
__device__ float gradient_kernel(float x, ACTIVATION a)
{
switch (a) {
case LINEAR:
return linear_gradient_kernel(x);
case LOGISTIC:
return logistic_gradient_kernel(x);
case LOGGY:
return loggy_gradient_kernel(x);
case RELU:
return relu_gradient_kernel(x);
case RELU6:
return relu6_gradient_kernel(x);
case NORM_CHAN:
return relu_gradient_kernel(x);
case ELU:
return elu_gradient_kernel(x);
case SELU:
return selu_gradient_kernel(x);
case GELU:
return gelu_gradient_kernel(x);
case RELIE:
return relie_gradient_kernel(x);
case RAMP:
return ramp_gradient_kernel(x);
case LEAKY:
return leaky_gradient_kernel(x);
case TANH:
return tanh_gradient_kernel(x);
case PLSE:
return plse_gradient_kernel(x);
case STAIR:
return stair_gradient_kernel(x);
case HARDTAN:
return hardtan_gradient_kernel(x);
case LHTAN:
return lhtan_gradient_kernel(x);
}
return 0;
}
__global__ void binary_gradient_array_kernel(float *x, float *dy, int n, int s, BINARY_ACTIVATION a, float *dx)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
int i = id % s;
int b = id / s;
float x1 = x[b*s + i];
float x2 = x[b*s + s / 2 + i];
if (id < n) {
float de = dy[id];
dx[b*s + i] = x2*de;
dx[b*s + s / 2 + i] = x1*de;
}
}
extern "C" void binary_gradient_array_gpu(float *x, float *dx, int n, int size, BINARY_ACTIVATION a, float *y)
{
binary_gradient_array_kernel << <cuda_gridsize(n / 2), BLOCK, 0, get_cuda_stream() >> >(x, dx, n / 2, size, a, y);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void binary_activate_array_kernel(float *x, int n, int s, BINARY_ACTIVATION a, float *y)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
int i = id % s;
int b = id / s;
float x1 = x[b*s + i];
float x2 = x[b*s + s / 2 + i];
if (id < n) y[id] = x1*x2;
}
extern "C" void binary_activate_array_gpu(float *x, int n, int size, BINARY_ACTIVATION a, float *y)
{
binary_activate_array_kernel << <cuda_gridsize(n / 2), BLOCK, 0, get_cuda_stream() >> >(x, n / 2, size, a, y);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void activate_array_kernel(float *x, int n, ACTIVATION a)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n) x[i] = activate_kernel(x[i], a);
}
__global__ void activate_array_swish_kernel(float *x, int n, float *output_sigmoid_gpu, float *output_gpu)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < n) {
float x_val = x[i];
float sigmoid = logistic_activate_kernel(x_val);
if (output_sigmoid_gpu) output_sigmoid_gpu[i] = sigmoid;
output_gpu[i] = x_val * sigmoid;
}
}
__device__ float mish_njuffa(float x)
{
float r;
float e = expf(x);
r = 1.0f / fmaf(fmaf(-0.5f, e, -1.0f), e, -1.0f);
r = fmaf(r, x, x);
return r;
}
__device__ float mish_yashas(float x)
{
float e = __expf(x);
if (x <= -18.0f)
return x * e;
float n = e * e + 2 * e;
if (x <= -5.0f)
return x * __fdividef(n, n + 2);
return x - 2 * __fdividef(x, n + 2);
}
__device__ float mish_yashas2(float x)
{
float e = __expf(x);
float n = e * e + 2 * e;
if (x <= -0.6f)
return x * __fdividef(n, n + 2);
return x - 2 * __fdividef(x, n + 2);
}
// https://github.com/digantamisra98/Mish
__global__ void activate_array_mish_kernel(float *x, int n, float *activation_input, float *output_gpu)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < n) {
const float MISH_THRESHOLD = 20;
float x_val = x[i];
if (activation_input) activation_input[i] = x_val; // store value before activation
//output_gpu[i] = x_val * tanh_activate_kernel(logf(1 + expf(x_val)));
// Pytorch: https://github.com/thomasbrandon/mish-cuda/blob/master/csrc/mish.h#L17-L20
// TF: https://github.com/tensorflow/addons/blob/093cdfa85d334cbe19a37624c33198f3140109ed/tensorflow_addons/custom_ops/activations/cc/kernels/mish_op.h#L40-L49
// log1p(x) == log(x + 1)
//output_gpu[i] = x_val * tanh_activate_kernel( softplus_kernel(x_val, MISH_THRESHOLD) );
output_gpu[i] = mish_yashas2(x_val);
//output_gpu[i] = mish_njuffa(x_val);
}
}
__device__ float hard_mish_yashas(float x)
{
if (x > 0)
return x;
if (x > -2)
return x * x / 2 + x;
return 0;
}
__global__ void activate_array_hard_mish_kernel(float *x, int n, float *activation_input, float *output_gpu)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < n) {
float x_val = x[i];
if (activation_input) activation_input[i] = x_val; // store value before activation
output_gpu[i] = hard_mish_yashas(x_val);
}
}
__global__ void activate_array_leaky_kernel(float *x, int n)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
x[index] = leaky_activate_kernel(x[index]);
}
}
__global__ void activate_array_selu_kernel(float *x, int n)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
x[index] = selu_activate_kernel(x[index]);
}
}
__global__ void activate_array_gelu_kernel(float *x, int n)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
x[index] = gelu_activate_kernel(x[index]);
}
}
__global__ void activate_array_logistic_kernel(float *x, int n)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
x[index] = logistic_activate_kernel(x[index]);
}
}
__global__ void activate_array_tanh_kernel(float *x, int n)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
x[index] = tanh_activate_kernel(x[index]);
}
}
__global__ void activate_array_hardtan_kernel(float *x, int n)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
x[index] = hardtan_activate_kernel(x[index]);
}
}
__global__ void activate_array_relu_kernel(float *x, int n)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
x[index] = relu_activate_kernel(x[index]);
}
}
__global__ void activate_array_relu6_kernel(float *x, int n)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
x[index] = relu6_activate_kernel(x[index]);
}
}
__global__ void gradient_array_kernel(float *x, int n, ACTIVATION a, float *delta)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n) delta[i] *= gradient_kernel(x[i], a);
}
// https://github.com/BVLC/caffe/blob/04ab089db018a292ae48d51732dd6c66766b36b6/src/caffe/layers/swish_layer.cu#L28-L30
__global__ void gradient_array_swish_kernel(float *x, int n, float *sigmoid_gpu, float *delta)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < n) {
float swish = x[i];
delta[i] *= swish + sigmoid_gpu[i] * (1 - swish); // gradient_kernel(x[i], a);
}
}
// https://github.com/digantamisra98/Mish
__global__ void gradient_array_mish_kernel(int n, float *activation_input_gpu, float *delta)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < n) {
const float MISH_THRESHOLD = 20.0f;
// implementation from TensorFlow: https://github.com/tensorflow/addons/blob/093cdfa85d334cbe19a37624c33198f3140109ed/tensorflow_addons/custom_ops/activations/cc/kernels/mish_op.h#L66-L80
// implementation from Pytorch: https://github.com/thomasbrandon/mish-cuda/blob/master/csrc/mish.h#L26-L31
// log1p(x) == log(x + 1)
const float inp = activation_input_gpu[i];
const float sp = softplus_kernel(inp, MISH_THRESHOLD);
const float grad_sp = -expm1f(-sp);
//const float grad_sp = 1 - expf(-sp);
const float tsp = tanh(sp);
const float grad_tsp = (1 - tsp*tsp) * grad_sp;
const float grad = inp * grad_tsp + tsp;
delta[i] *= grad;
//float x = activation_input[i];
//float d = 2 * expf(x) + expf(2 * x) + 2;
//float w = 4 * (x + 1) + 4 * expf(2 * x) + expf(3 * x) + expf(x)*(4 * x + 6);
//float derivative = expf(x) * w / (d * d);
//delta[i] *= derivative;
}
}
__device__ float hard_mish_yashas_grad(float x)
{
if (x > 0)
return 1;
if (x > -2)
return x + 1;
return 0;
}
__global__ void gradient_array_hard_mish_kernel(int n, float *activation_input_gpu, float *delta)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < n) {
const float x = activation_input_gpu[i];
delta[i] *= hard_mish_yashas_grad(x);
}
}
__global__ void gradient_array_leaky_kernel(float *x, int n, float *delta)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
delta[index] *= leaky_gradient_kernel(x[index]);
}
}
__global__ void gradient_array_revleaky_kernel(float *x, int n, float *delta)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
delta[index] /= leaky_gradient_kernel(x[index]);
}
}
__global__ void gradient_array_selu_kernel(float *x, int n, float *delta)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
delta[index] *= selu_gradient_kernel(x[index]);
}
}
__global__ void gradient_array_gelu_kernel(float *x, int n, float *delta)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
delta[index] *= gelu_gradient_kernel(x[index]);
}
}
__global__ void gradient_array_logistic_kernel(float *x, int n, float *delta)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
delta[index] *= logistic_gradient_kernel(x[index]);
}
}
__global__ void gradient_array_tanh_kernel(float *x, int n, float *delta)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
delta[index] *= tanh_gradient_kernel(x[index]);
}
}
__global__ void gradient_array_hardtan_kernel(float *x, int n, float *delta)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
delta[index] *= hardtan_gradient_kernel(x[index]);
}
}
__global__ void gradient_array_relu_kernel(float *x, int n, float *delta)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
delta[index] *= relu_gradient_kernel(x[index]);
}
}
__global__ void gradient_array_relu6_kernel(float *x, int n, float *delta)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
delta[index] *= relu6_gradient_kernel(x[index]);
}
}
extern "C" void activate_array_ongpu(float *x, int n, ACTIVATION a)
{
const int num_blocks = get_number_of_blocks(n, BLOCK);
if (a == LINEAR) return;
else if (a == LEAKY || a == REVLEAKY) activate_array_leaky_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n);
else if (a == LOGISTIC) activate_array_logistic_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n);
else if (a == TANH) activate_array_tanh_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n);
else if (a == HARDTAN) activate_array_hardtan_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n);
else if (a == RELU) activate_array_relu_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n);
else if (a == RELU6) activate_array_relu6_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n);
else if (a == SELU) activate_array_selu_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n);
else if (a == GELU) activate_array_gelu_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n);
else
activate_array_kernel<<<cuda_gridsize(n), BLOCK, 0, get_cuda_stream()>>>(x, n, a);
CHECK_CUDA(cudaPeekAtLastError());
}
extern "C" void activate_array_swish_ongpu(float *x, int n, float *output_sigmoid_gpu, float *output_gpu)
{
const int num_blocks = get_number_of_blocks(n, BLOCK);
activate_array_swish_kernel << <cuda_gridsize(n), BLOCK, 0, get_cuda_stream() >> >(x, n, output_sigmoid_gpu, output_gpu);
CHECK_CUDA(cudaPeekAtLastError());
}
extern "C" void activate_array_mish_ongpu(float *x, int n, float *activation_input_gpu, float *output_gpu)
{
const int num_blocks = get_number_of_blocks(n, BLOCK);
activate_array_mish_kernel << <cuda_gridsize(n), BLOCK, 0, get_cuda_stream() >> >(x, n, activation_input_gpu, output_gpu);
CHECK_CUDA(cudaPeekAtLastError());
}
extern "C" void activate_array_hard_mish_ongpu(float *x, int n, float *activation_input_gpu, float *output_gpu)
{
const int num_blocks = get_number_of_blocks(n, BLOCK);
activate_array_hard_mish_kernel << <cuda_gridsize(n), BLOCK, 0, get_cuda_stream() >> >(x, n, activation_input_gpu, output_gpu);
CHECK_CUDA(cudaPeekAtLastError());
}
extern "C" void gradient_array_ongpu(float *x, int n, ACTIVATION a, float *delta)
{
const int num_blocks = get_number_of_blocks(n, BLOCK);
if (a == LINEAR) return;
else if (a == LEAKY) gradient_array_leaky_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n, delta);
else if (a == REVLEAKY) gradient_array_revleaky_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n, delta);
else if (a == LOGISTIC) gradient_array_logistic_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n, delta);
else if (a == TANH) gradient_array_tanh_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n, delta);
else if (a == HARDTAN) gradient_array_hardtan_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n, delta);
else if (a == RELU) gradient_array_relu_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n, delta);
else if (a == RELU6) gradient_array_relu6_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n, delta);
//else if (a == NORM_CHAN) gradient_array_relu_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n, delta);
else if (a == NORM_CHAN_SOFTMAX || a == NORM_CHAN) {
printf(" Error: should be used custom NORM_CHAN_SOFTMAX-function for gradient \n");
exit(0);
}
else if (a == SELU) gradient_array_selu_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n, delta);
else if (a == GELU) gradient_array_gelu_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n, delta);
else
gradient_array_kernel << <cuda_gridsize(n), BLOCK, 0, get_cuda_stream() >> > (x, n, a, delta);
CHECK_CUDA(cudaPeekAtLastError());
}
extern "C" void gradient_array_swish_ongpu(float *x, int n, float *sigmoid_gpu, float *delta)
{
const int num_blocks = get_number_of_blocks(n, BLOCK);
gradient_array_swish_kernel << <cuda_gridsize(n), BLOCK, 0, get_cuda_stream() >> > (x, n, sigmoid_gpu, delta);
CHECK_CUDA(cudaPeekAtLastError());
}
extern "C" void gradient_array_mish_ongpu(int n, float *activation_input_gpu, float *delta)
{
const int num_blocks = get_number_of_blocks(n, BLOCK);
gradient_array_mish_kernel << <cuda_gridsize(n), BLOCK, 0, get_cuda_stream() >> > (n, activation_input_gpu, delta);
CHECK_CUDA(cudaPeekAtLastError());
}
extern "C" void gradient_array_hard_mish_ongpu(int n, float *activation_input_gpu, float *delta)
{
const int num_blocks = get_number_of_blocks(n, BLOCK);
gradient_array_hard_mish_kernel << <cuda_gridsize(n), BLOCK, 0, get_cuda_stream() >> > (n, activation_input_gpu, delta);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void activate_array_normalize_channels_kernel(float *x, int size, int batch, int channels, int wh_step, float *output_gpu)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int wh_i = i % wh_step;
int b = i / wh_step;
const float eps = 0.0001;
if (i < size) {
float sum = eps;
int k;
for (k = 0; k < channels; ++k) {
float val = x[wh_i + k * wh_step + b*wh_step*channels];
if (val > 0) sum += val;
}
for (k = 0; k < channels; ++k) {
float val = x[wh_i + k * wh_step + b*wh_step*channels];
if (val > 0) val = val / sum;
else val = 0;
output_gpu[wh_i + k * wh_step + b*wh_step*channels] = val;
}
}
}
extern "C" void activate_array_normalize_channels_ongpu(float *x, int n, int batch, int channels, int wh_step, float *output_gpu)
{
// n = w*h*c*batch
// size = w*h*batch
int size = n / channels;
const int num_blocks = get_number_of_blocks(size, BLOCK);
activate_array_normalize_channels_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> > (x, size, batch, channels, wh_step, output_gpu);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void activate_array_normalize_channels_softmax_kernel(float *x, int size, int batch, int channels, int wh_step, float *output_gpu, int use_max_val)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int wh_i = i % wh_step;
int b = i / wh_step;
const float eps = 0.0001;
if (i < size) {
float sum = eps;
float max_val = -FLT_MAX;
int k;
if (use_max_val) {
for (k = 0; k < channels; ++k) {
float val = x[wh_i + k * wh_step + b*wh_step*channels];
if (val > max_val || k == 0) max_val = val;
}
}
else
max_val = 0;
for (k = 0; k < channels; ++k) {
float val = x[wh_i + k * wh_step + b*wh_step*channels];
sum += expf(val - max_val);
}
for (k = 0; k < channels; ++k) {
float val = x[wh_i + k * wh_step + b*wh_step*channels];
val = expf(val - max_val) / sum;
if (isnan(val) || isinf(val)) val = 0;
output_gpu[wh_i + k * wh_step + b*wh_step*channels] = val;
}
}
}
extern "C" void activate_array_normalize_channels_softmax_ongpu(float *x, int n, int batch, int channels, int wh_step, float *output_gpu, int use_max_val)
{
// n = w*h*c*batch
// size = w*h*batch
int size = n / channels;
const int num_blocks = get_number_of_blocks(size, BLOCK);
activate_array_normalize_channels_softmax_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> > (x, size, batch, channels, wh_step, output_gpu, use_max_val);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void gradient_array_normalize_channels_softmax_kernel(float *x, int size, int batch, int channels, int wh_step, float *delta_gpu)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int wh_i = i % wh_step;
int b = i / wh_step;
if (i < size) {
int k;
/*
float grad = 0;
for (k = 0; k < channels; ++k) {
const int index = wh_i + k * wh_step + b*wh_step*channels;
float out = x[index];
float delta = delta_gpu[index];
grad += out*fabs(delta);
}
*/
for (k = 0; k < channels; ++k) {
const int index = wh_i + k * wh_step + b*wh_step*channels;
float delta = delta_gpu[index];
float grad = x[index] * (1 - x[index]);
delta = delta * grad;
if (isnan(delta) || isinf(delta)) delta = 0;
delta_gpu[index] = delta;
}
}
}
extern "C" void gradient_array_normalize_channels_softmax_ongpu(float *output_gpu, int n, int batch, int channels, int wh_step, float *delta_gpu)
{
// n = w*h*c*batch
// size = w*h*batch
int size = n / channels;
const int num_blocks = get_number_of_blocks(size, BLOCK);
gradient_array_normalize_channels_softmax_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> > (output_gpu, size, batch, channels, wh_step, delta_gpu);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void gradient_array_normalize_channels_kernel(float *x, int size, int batch, int channels, int wh_step, float *delta_gpu)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int wh_i = i % wh_step;
int b = i / wh_step;
if (i < size) {
int k;
/*
float grad = 0;
for (k = 0; k < channels; ++k) {
const int index = wh_i + k * wh_step + b*wh_step*channels;
float out = x[index];
float delta = delta_gpu[index];
grad += out*fabs(delta);
}
*/
for (k = 0; k < channels; ++k) {
const int index = wh_i + k * wh_step + b*wh_step*channels;
if (x[index] > 0) {
float delta = delta_gpu[index];
float grad = x[index];
delta = delta * grad;
delta_gpu[index] = delta;
}
}
}
}
extern "C" void gradient_array_normalize_channels_ongpu(float *output_gpu, int n, int batch, int channels, int wh_step, float *delta_gpu)
{
// n = w*h*c*batch
// size = w*h*batch
int size = n / channels;
const int num_blocks = get_number_of_blocks(size, BLOCK);
gradient_array_normalize_channels_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> > (output_gpu, size, batch, channels, wh_step, delta_gpu);
CHECK_CUDA(cudaPeekAtLastError());
}
|
74baa507477eb784037b7490513e885050dc5336.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "operation/operation_interface.h"
#include "common_hip.cuh"
namespace SparseOperationKit {
__global__ void move_data(const int64_t* src_ptr,
int64_t* dst_ptr,
size_t size,
size_t* valid_nums,
size_t num_per_replica) {
size_t gid = blockIdx.x * blockDim.x + threadIdx.x;
size_t grid = blockDim.x * gridDim.x;
for (size_t i = gid; i < size; i += grid) {
size_t which_replica = i / num_per_replica;
size_t offset_in_replica = i % num_per_replica;
if (offset_in_replica >= valid_nums[which_replica]) continue;
size_t dst_offset = 0;
for (size_t j = which_replica; j > 0; j--) {
dst_offset += valid_nums[j-1];
}
dst_ptr[dst_offset + offset_in_replica] = src_ptr[i];
}
}
__global__ void add_offset(int64_t* ptr,
size_t size,
size_t* valid_nums,
size_t num_per_replica,
size_t rows_num_per_replica) {
size_t gid = blockIdx.x * blockDim.x + threadIdx.x;
size_t grid = blockDim.x * gridDim.x;
for (size_t i = gid; i < size; i += grid) {
size_t which_replica = i / num_per_replica;
size_t offset_in_replica = i % num_per_replica;
if (offset_in_replica >= valid_nums[which_replica]) continue;
ptr[i] += (which_replica * rows_num_per_replica);
}
}
class AllGatherDispatcher : public Dispatcher {
public:
explicit AllGatherDispatcher(ConstructionContext_t context)
: Dispatcher(context), resource_mgr_(context->get_resource_mgr()) {
}
void allocate_forward_spaces() override {
auto rows_num_per_sample = base_context()->get_slot_num();
auto max_nnz = base_context()->get_max_nnz();
auto max_feature_num = base_context()->get_max_feature_num();
const size_t global_batch_size = base_context()->get_global_batch_size();
for (size_t dev_id = 0; dev_id < resource_mgr_->get_local_gpu_count(); ++dev_id) {
auto &buffer = base_context()->get_buffer(dev_id);
auto &host_buffer = base_context()->get_host_buffer(dev_id);
// reserve spaces for values buffers
{
Tensor2<int64_t> values_buffer;
buffer->reserve({1, global_batch_size * rows_num_per_sample * max_nnz}, &values_buffer);
values_buffers_.push_back(values_buffer);
replica_num_elements_ = (global_batch_size * rows_num_per_sample * max_nnz) /
resource_mgr_->get_global_gpu_count();
}
// reserve spaces for indices buffers
{
Tensor2<int64_t> indices_buffer;
buffer->reserve({1, global_batch_size * rows_num_per_sample * max_nnz}, &indices_buffer);
indices_buffers_.push_back(indices_buffer);
}
{
Tensor2<int64_t> output_values;
buffer->reserve({1, global_batch_size * rows_num_per_sample * max_nnz}, &output_values);
output_values_.push_back(output_values);
}
// reserve spaces for num elements
{
Tensor2<size_t> host_num_element;
host_buffer->reserve({1, 1}, &host_num_element);
host_num_elements_.push_back(host_num_element);
Tensor2<size_t> num_element;
buffer->reserve({1, resource_mgr_->get_global_gpu_count()}, &num_element);
num_elements_.push_back(num_element);
}
{
Tensor2<size_t> total_valid_num;
host_buffer->reserve({1, 1}, &total_valid_num);
total_valid_num_.push_back(total_valid_num);
}
} // for dev_id
}
void allocate_backward_spaces() override {
}
void forward(const Context_t &replica_context, const bool training) override {
const size_t global_replica_id = replica_context->get_global_replica_id();
const size_t local_replica_id = resource_mgr_->cal_local_id_from_global_id(global_replica_id);
auto &local_gpu = resource_mgr_->get_local_gpu(local_replica_id);
auto &replica_values = replica_context->input("replica_values");
auto &replica_row_indices = replica_context->input("replica_indices");
CK_CUDA(hipMemcpyAsync(values_buffers_[local_replica_id].get_ptr() + (global_replica_id * replica_num_elements_),
replica_values->GetPtrWithType<void>(),
replica_values->get_size_in_bytes(),
hipMemcpyDeviceToDevice,
local_gpu->get_stream()));
CK_CUDA(hipMemcpyAsync(indices_buffers_[local_replica_id].get_ptr() + (global_replica_id * replica_num_elements_),
replica_row_indices->GetPtrWithType<void>(),
replica_row_indices->get_size_in_bytes(),
hipMemcpyDeviceToDevice,
local_gpu->get_stream()));
host_num_elements_[local_replica_id].get_ptr()[0] = replica_values->get_num_elements();
CK_CUDA(hipMemcpyAsync(num_elements_[local_replica_id].get_ptr() + global_replica_id,
host_num_elements_[local_replica_id].get_ptr(),
sizeof(size_t) * 1,
hipMemcpyHostToDevice,
local_gpu->get_stream()));
CK_NCCL(ncclGroupStart());
CK_NCCL(ncclAllGather(values_buffers_[local_replica_id].get_ptr() + (global_replica_id * replica_num_elements_),
values_buffers_[local_replica_id].get_ptr(),
replica_num_elements_,
ncclInt64,
local_gpu->get_nccl(),
local_gpu->get_stream()));
CK_NCCL(ncclAllGather(indices_buffers_[local_replica_id].get_ptr() + (global_replica_id * replica_num_elements_),
indices_buffers_[local_replica_id].get_ptr(),
replica_num_elements_,
ncclInt64,
local_gpu->get_nccl(),
local_gpu->get_stream()));
CK_NCCL(ncclAllGather(num_elements_[local_replica_id].get_ptr() + global_replica_id,
num_elements_[local_replica_id].get_ptr(),
1,
ncclUint64,
local_gpu->get_nccl(),
local_gpu->get_stream()));
CK_NCCL(ncclGroupEnd());
// make the memory successive
hipLaunchKernelGGL(( move_data), dim3(local_gpu->get_sm_count()), dim3(1024), 0, local_gpu->get_stream(),
values_buffers_[local_replica_id].get_ptr(),
output_values_[local_replica_id].get_ptr(),
values_buffers_[local_replica_id].get_num_elements(),
num_elements_[local_replica_id].get_ptr(),
replica_num_elements_);
// calculate the offset of row_indices
hipLaunchKernelGGL(( add_offset), dim3(local_gpu->get_sm_count()), dim3(1024), 0, local_gpu->get_stream(),
indices_buffers_[local_replica_id].get_ptr(),
indices_buffers_[local_replica_id].get_num_elements(),
num_elements_[local_replica_id].get_ptr(),
replica_num_elements_,
replica_num_elements_ / base_context()->get_max_nnz());
// values_buffers_ is useless now, so use it as the indices output buffer.
hipLaunchKernelGGL(( move_data), dim3(local_gpu->get_sm_count()), dim3(1024), 0, local_gpu->get_stream(),
indices_buffers_[local_replica_id].get_ptr(),
values_buffers_[local_replica_id].get_ptr(),
indices_buffers_[local_replica_id].get_num_elements(),
num_elements_[local_replica_id].get_ptr(),
replica_num_elements_);
hipLaunchKernelGGL(( reduce_sum), dim3(1), dim3(1), 0, local_gpu->get_stream(), num_elements_[local_replica_id].get_ptr(),
num_elements_[local_replica_id].get_num_elements(),
total_valid_num_[local_replica_id].get_ptr());
// copy back to host
resource_mgr_->sync_gpu(local_replica_id);
CK_CUDA(hipMemcpyAsync(host_num_elements_[local_replica_id].get_ptr(),
total_valid_num_[local_replica_id].get_ptr(),
total_valid_num_[local_replica_id].get_size_in_bytes(),
hipMemcpyDeviceToHost,
local_gpu->get_stream()));
resource_mgr_->sync_gpu(local_replica_id);
// set output for this operation
replica_context->set_output("total_values", output_values_[local_replica_id]);
replica_context->set_output("total_row_indices", values_buffers_[local_replica_id]);
replica_context->set_output("dev_total_num_elements", num_elements_[local_replica_id]);
replica_context->set_output("host_total_num_elements", host_num_elements_[local_replica_id]);
}
void backward(const Context_t &replica_context) override {
// it does nothing
}
private:
std::shared_ptr<ResourcesManager> resource_mgr_;
size_t replica_num_elements_ = 0;
Tensors2<int64_t> values_buffers_;
Tensors2<int64_t> indices_buffers_;
Tensors2<int64_t> output_values_;
Tensors2<size_t> host_num_elements_;
Tensors2<size_t> num_elements_;
Tensors2<size_t> total_valid_num_;
};
REGISTER_INPUT_DISPATCHER_BUILDER("all_gather_dispatcher",
AllGatherDispatcher);
} // namespace SparseOperationKit | 74baa507477eb784037b7490513e885050dc5336.cu | /*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "operation/operation_interface.h"
#include "common.cuh"
namespace SparseOperationKit {
__global__ void move_data(const int64_t* src_ptr,
int64_t* dst_ptr,
size_t size,
size_t* valid_nums,
size_t num_per_replica) {
size_t gid = blockIdx.x * blockDim.x + threadIdx.x;
size_t grid = blockDim.x * gridDim.x;
for (size_t i = gid; i < size; i += grid) {
size_t which_replica = i / num_per_replica;
size_t offset_in_replica = i % num_per_replica;
if (offset_in_replica >= valid_nums[which_replica]) continue;
size_t dst_offset = 0;
for (size_t j = which_replica; j > 0; j--) {
dst_offset += valid_nums[j-1];
}
dst_ptr[dst_offset + offset_in_replica] = src_ptr[i];
}
}
__global__ void add_offset(int64_t* ptr,
size_t size,
size_t* valid_nums,
size_t num_per_replica,
size_t rows_num_per_replica) {
size_t gid = blockIdx.x * blockDim.x + threadIdx.x;
size_t grid = blockDim.x * gridDim.x;
for (size_t i = gid; i < size; i += grid) {
size_t which_replica = i / num_per_replica;
size_t offset_in_replica = i % num_per_replica;
if (offset_in_replica >= valid_nums[which_replica]) continue;
ptr[i] += (which_replica * rows_num_per_replica);
}
}
class AllGatherDispatcher : public Dispatcher {
public:
explicit AllGatherDispatcher(ConstructionContext_t context)
: Dispatcher(context), resource_mgr_(context->get_resource_mgr()) {
}
void allocate_forward_spaces() override {
auto rows_num_per_sample = base_context()->get_slot_num();
auto max_nnz = base_context()->get_max_nnz();
auto max_feature_num = base_context()->get_max_feature_num();
const size_t global_batch_size = base_context()->get_global_batch_size();
for (size_t dev_id = 0; dev_id < resource_mgr_->get_local_gpu_count(); ++dev_id) {
auto &buffer = base_context()->get_buffer(dev_id);
auto &host_buffer = base_context()->get_host_buffer(dev_id);
// reserve spaces for values buffers
{
Tensor2<int64_t> values_buffer;
buffer->reserve({1, global_batch_size * rows_num_per_sample * max_nnz}, &values_buffer);
values_buffers_.push_back(values_buffer);
replica_num_elements_ = (global_batch_size * rows_num_per_sample * max_nnz) /
resource_mgr_->get_global_gpu_count();
}
// reserve spaces for indices buffers
{
Tensor2<int64_t> indices_buffer;
buffer->reserve({1, global_batch_size * rows_num_per_sample * max_nnz}, &indices_buffer);
indices_buffers_.push_back(indices_buffer);
}
{
Tensor2<int64_t> output_values;
buffer->reserve({1, global_batch_size * rows_num_per_sample * max_nnz}, &output_values);
output_values_.push_back(output_values);
}
// reserve spaces for num elements
{
Tensor2<size_t> host_num_element;
host_buffer->reserve({1, 1}, &host_num_element);
host_num_elements_.push_back(host_num_element);
Tensor2<size_t> num_element;
buffer->reserve({1, resource_mgr_->get_global_gpu_count()}, &num_element);
num_elements_.push_back(num_element);
}
{
Tensor2<size_t> total_valid_num;
host_buffer->reserve({1, 1}, &total_valid_num);
total_valid_num_.push_back(total_valid_num);
}
} // for dev_id
}
void allocate_backward_spaces() override {
}
void forward(const Context_t &replica_context, const bool training) override {
const size_t global_replica_id = replica_context->get_global_replica_id();
const size_t local_replica_id = resource_mgr_->cal_local_id_from_global_id(global_replica_id);
auto &local_gpu = resource_mgr_->get_local_gpu(local_replica_id);
auto &replica_values = replica_context->input("replica_values");
auto &replica_row_indices = replica_context->input("replica_indices");
CK_CUDA(cudaMemcpyAsync(values_buffers_[local_replica_id].get_ptr() + (global_replica_id * replica_num_elements_),
replica_values->GetPtrWithType<void>(),
replica_values->get_size_in_bytes(),
cudaMemcpyDeviceToDevice,
local_gpu->get_stream()));
CK_CUDA(cudaMemcpyAsync(indices_buffers_[local_replica_id].get_ptr() + (global_replica_id * replica_num_elements_),
replica_row_indices->GetPtrWithType<void>(),
replica_row_indices->get_size_in_bytes(),
cudaMemcpyDeviceToDevice,
local_gpu->get_stream()));
host_num_elements_[local_replica_id].get_ptr()[0] = replica_values->get_num_elements();
CK_CUDA(cudaMemcpyAsync(num_elements_[local_replica_id].get_ptr() + global_replica_id,
host_num_elements_[local_replica_id].get_ptr(),
sizeof(size_t) * 1,
cudaMemcpyHostToDevice,
local_gpu->get_stream()));
CK_NCCL(ncclGroupStart());
CK_NCCL(ncclAllGather(values_buffers_[local_replica_id].get_ptr() + (global_replica_id * replica_num_elements_),
values_buffers_[local_replica_id].get_ptr(),
replica_num_elements_,
ncclInt64,
local_gpu->get_nccl(),
local_gpu->get_stream()));
CK_NCCL(ncclAllGather(indices_buffers_[local_replica_id].get_ptr() + (global_replica_id * replica_num_elements_),
indices_buffers_[local_replica_id].get_ptr(),
replica_num_elements_,
ncclInt64,
local_gpu->get_nccl(),
local_gpu->get_stream()));
CK_NCCL(ncclAllGather(num_elements_[local_replica_id].get_ptr() + global_replica_id,
num_elements_[local_replica_id].get_ptr(),
1,
ncclUint64,
local_gpu->get_nccl(),
local_gpu->get_stream()));
CK_NCCL(ncclGroupEnd());
// make the memory successive
move_data<<<local_gpu->get_sm_count(), 1024, 0, local_gpu->get_stream()>>>(
values_buffers_[local_replica_id].get_ptr(),
output_values_[local_replica_id].get_ptr(),
values_buffers_[local_replica_id].get_num_elements(),
num_elements_[local_replica_id].get_ptr(),
replica_num_elements_);
// calculate the offset of row_indices
add_offset<<<local_gpu->get_sm_count(), 1024, 0, local_gpu->get_stream()>>>(
indices_buffers_[local_replica_id].get_ptr(),
indices_buffers_[local_replica_id].get_num_elements(),
num_elements_[local_replica_id].get_ptr(),
replica_num_elements_,
replica_num_elements_ / base_context()->get_max_nnz());
// values_buffers_ is useless now, so use it as the indices output buffer.
move_data<<<local_gpu->get_sm_count(), 1024, 0, local_gpu->get_stream()>>>(
indices_buffers_[local_replica_id].get_ptr(),
values_buffers_[local_replica_id].get_ptr(),
indices_buffers_[local_replica_id].get_num_elements(),
num_elements_[local_replica_id].get_ptr(),
replica_num_elements_);
reduce_sum<<<1, 1, 0, local_gpu->get_stream()>>>(num_elements_[local_replica_id].get_ptr(),
num_elements_[local_replica_id].get_num_elements(),
total_valid_num_[local_replica_id].get_ptr());
// copy back to host
resource_mgr_->sync_gpu(local_replica_id);
CK_CUDA(cudaMemcpyAsync(host_num_elements_[local_replica_id].get_ptr(),
total_valid_num_[local_replica_id].get_ptr(),
total_valid_num_[local_replica_id].get_size_in_bytes(),
cudaMemcpyDeviceToHost,
local_gpu->get_stream()));
resource_mgr_->sync_gpu(local_replica_id);
// set output for this operation
replica_context->set_output("total_values", output_values_[local_replica_id]);
replica_context->set_output("total_row_indices", values_buffers_[local_replica_id]);
replica_context->set_output("dev_total_num_elements", num_elements_[local_replica_id]);
replica_context->set_output("host_total_num_elements", host_num_elements_[local_replica_id]);
}
void backward(const Context_t &replica_context) override {
// it does nothing
}
private:
std::shared_ptr<ResourcesManager> resource_mgr_;
size_t replica_num_elements_ = 0;
Tensors2<int64_t> values_buffers_;
Tensors2<int64_t> indices_buffers_;
Tensors2<int64_t> output_values_;
Tensors2<size_t> host_num_elements_;
Tensors2<size_t> num_elements_;
Tensors2<size_t> total_valid_num_;
};
REGISTER_INPUT_DISPATCHER_BUILDER("all_gather_dispatcher",
AllGatherDispatcher);
} // namespace SparseOperationKit |
3391ee4eda8b2e44b68c0de2a78a3dcd13e1983d.hip | // !!! This is a file automatically generated by hipify!!!
extern "C"
{
#include "completion.h"
#include "ciss.h"
#include "base.h"
#include "matrixprocess.h"
#include <stdio.h>
#include <sys/time.h>
#include <stdlib.h>
}
#include "sgd.cuh"
#include "loss.cuh"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <omp.h>
/**
* @brief Update a three-mode model based on a given observation.
*
* @param DEFAULT_NFACTORS The rank.
* @param train The training data.
* @param nnz_index The index of the observation to update from.
* @param mats The model to update.
static inline void p_update_sgd(
sptensor_t * train,
idx_t nnz_index,
ordi_matrix ** mats,
double learning_rate,
double regularization_index
)
{
idx_t const x = nnz_index;
assert(train->nmodes == 3);
idx_t ** ind = train->ind;
double * arow = mats[0] + (ind[0][x] * DEFAULT_NFACTORS);
double * brow = mats[1] + (ind[1][x] * DEFAULT_NFACTORS);
double * crow = mats[2] + (ind[2][x] * DEFAULT_NFACTORS);
/* predict value
double predicted = 0;
for(idx_t f=0; f < DEFAULT_NFACTORS; ++f) {
predicted += arow[f] * brow[f] * crow[f];
}
double const loss = train->vals[x] - predicted;
double const rate = learning_rate;
double reg = regularization_index;
/* update rows
for(idx_t f=0; f < DEFAULT_NFACTORS; ++f) {
double const moda = (loss * brow[f] * crow[f]) - (reg[0] * arow[f]);
double const modb = (loss * arow[f] * crow[f]) - (reg[1] * brow[f]);
double const modc = (loss * arow[f] * brow[f]) - (reg[2] * crow[f]);
arow[f] += rate * moda;
brow[f] += rate * modb;
crow[f] += rate * modc;
}
}*/
//the gpu kernel
__global__ void p_update_sgd_gpu(cissbasic_t * d_traina,
ordi_matrix * d_factora,
ordi_matrix * d_factorb,
ordi_matrix * d_factorc,
double * d_value_ha,
double * d_value_hb,
double * d_value_hc,
double learning_rate,
double regularization_index,
idx_t tilebegin,
idx_t tileend)
{
//get thread and block index
idx_t bid = blockIdx.x;
idx_t tid = threadIdx.x;
idx_t tileid = bid * DEFAULT_BLOCKSIZE + tid + tilebegin;
//idx_t tileid = bid * DEFAULT_BLOCKSIZE + tid;
double * entries = d_traina->entries;
idx_t localtile = tileid*((DEFAULT_T_TILE_LENGTH + 1) * DEFAULT_T_TILE_WIDTH);
//buffer for matrices
double __align__(256) mabuffer[DEFAULT_NFACTORS];
double __align__(256) mbbuffer[DEFAULT_NFACTORS];
double __align__(256) mcbuffer[DEFAULT_NFACTORS];
double __align__(256) localtbuffer[6];
idx_t a,b,c, localcounter;
double localvalue;
#ifdef SGD_DEBUG
if(bid ==0 && tid == 0) {printf("my tilebegin is %ld, tileend is %ld\n", tilebegin, tileend);}
__syncthreads();
#endif
if(tileid < tileend)
{
#ifdef SGD_DEBUG
printf("now mytileid is %ld, localtile is %ld, the first element in entries is %lf\n", tileid, localtile, entries[localtile]);
#endif
//get the indices and value
idx_t f_id = (idx_t)(entries[localtile] * (-1));
#ifdef SGD_DEBUG
//printf("now in thread %ld my fid is %ld\n", tid, f_id);
#endif
idx_t l_id = (idx_t)(entries[localtile+1] * (-1));
idx_t bitmap = (idx_t)(entries[localtile+2]);
if(bitmap != 0)
{
bitmap = __brevll(bitmap);
while((bitmap & 1) == 0) {bitmap = (bitmap >> 1);}
bitmap = (bitmap >> 1);
localtile += DEFAULT_T_TILE_WIDTH;
for(idx_t j = 0; j < DEFAULT_T_TILE_LENGTH/2; j++)
{
//unroll loop and load
localtbuffer[0] = entries[localtile];
localtbuffer[1] = entries[localtile + 1];
localtbuffer[2] = entries[localtile + 2];
if(localtbuffer[0] == -1 && localtbuffer[1] == -1) break;
//for the first
f_id += (!(bitmap & 1));
#ifdef SGD_DEBUG
if(f_id >= 260208)printf("now in thread %ld my fid is %ld\n", tid, f_id);
#endif
bitmap = bitmap >> 1;
a = d_traina->directory[f_id] - 1;
localcounter = d_traina->dcounter[f_id + 1] - d_traina->dcounter[f_id];
//dcounter = d_traina->dcounter[f_id];
b = (idx_t)localtbuffer[0] - 1;
c = (idx_t)localtbuffer[1] - 1;
#ifdef SGD_DEBUG
if(localtbuffer[1] == 0 or localtbuffer[2] == 0)printf("now in thread %ld are zero indices\n", tid);
#endif
localvalue = localtbuffer[2];
#ifdef SGD_DEBUG
printf("now a b c in tile %ld are %ld %ld %ld\n", tileid, a, b, c);
#endif
//if(localtbuffer[0] == -1 && localtbuffer[1] == -1) break;
for(idx_t i = 0; i< DEFAULT_NFACTORS; i++)
{
mabuffer[i] = ((d_factora->values)[a * DEFAULT_NFACTORS + i] + d_value_ha[a * DEFAULT_NFACTORS + i])/2;
mbbuffer[i] = ((d_factorb->values)[b * DEFAULT_NFACTORS + i] + d_value_hb[b * DEFAULT_NFACTORS + i])/2;
mcbuffer[i] = ((d_factorc->values)[c * DEFAULT_NFACTORS + i] + d_value_hc[c * DEFAULT_NFACTORS + i])/2;
}
/* predict value */
double predicted = 0;
for(idx_t f=0; f < DEFAULT_NFACTORS; f++) {
predicted += mabuffer[f] * mbbuffer[f] * mcbuffer[f];
}
predicted = localvalue - predicted;
/* update rows */
for(idx_t f=0; f < DEFAULT_NFACTORS; f++) {
double moda = (predicted * mbbuffer[f] * mcbuffer[f]) - (regularization_index * mabuffer[f]);
double modb = (predicted * mabuffer[f] * mcbuffer[f]) - (regularization_index * mbbuffer[f]);
double modc = (predicted * mbbuffer[f] * mabuffer[f]) - (regularization_index * mcbuffer[f]);
atomicAdd(&(d_factora->values[a * DEFAULT_NFACTORS + f]), learning_rate*moda * (double)(SGD_MODIFICATIONA));
atomicAdd(&(d_factorb->values[b * DEFAULT_NFACTORS + f]), learning_rate*modb * (double)(SGD_MODIFICATIONB));
atomicAdd(&(d_factorc->values[c * DEFAULT_NFACTORS + f]), learning_rate*modc * (double)(SGD_MODIFICATIONC));
}
//for the second
localtbuffer[3] = entries[localtile + 3];
localtbuffer[4] = entries[localtile + 4];
localtbuffer[5] = entries[localtile + 5];
f_id += (!(bitmap & 1));
#ifdef SGD_DEBUG
if(f_id >= 260208)printf("now in thread %ld my fid is %ld\n", tid, f_id);
#endif
bitmap = bitmap >> 1;
a = d_traina->directory[f_id] - 1;
localcounter = d_traina->dcounter[f_id + 1] - d_traina->dcounter[f_id];
b = (idx_t)localtbuffer[3] - 1;
c = (idx_t)localtbuffer[4] - 1;
#ifdef SGD_DEBUG
if(localtbuffer[3] == 0 or localtbuffer[4] == 0)printf("now in thread %ld are zero indices\n", tid);
#endif
#ifdef SGD_DEBUG
printf("now a b c in tile %ld are %ld %ld %ld\n", tileid, a, b, c);
#endif
localvalue = localtbuffer[5];
if(localtbuffer[3] == -1 && localtbuffer[4] == -1) break;
for(idx_t i = 0; i< DEFAULT_NFACTORS; i++)
{
mabuffer[i] = (d_factora->values)[a * DEFAULT_NFACTORS + i] + d_value_ha[a * DEFAULT_NFACTORS + i];
mbbuffer[i] = (d_factorb->values)[b * DEFAULT_NFACTORS + i] + d_value_hb[b * DEFAULT_NFACTORS + i];
mcbuffer[i] = (d_factorc->values)[c * DEFAULT_NFACTORS + i] + d_value_hc[c * DEFAULT_NFACTORS + i];
}
/* predict value */
predicted = 0;
for(idx_t f=0; f < DEFAULT_NFACTORS; f++) {
predicted += mabuffer[f] * mbbuffer[f] * mcbuffer[f];
}
predicted = localvalue - predicted;
/* update rows */
for(idx_t f=0; f < DEFAULT_NFACTORS; f++) {
double moda = (predicted * mbbuffer[f] * mcbuffer[f]) - (regularization_index * mabuffer[f]);
double modb = (predicted * mabuffer[f] * mcbuffer[f]) - (regularization_index * mbbuffer[f]);
double modc = (predicted * mbbuffer[f] * mabuffer[f]) - (regularization_index * mcbuffer[f]);
atomicAdd(&(d_factora->values[a * DEFAULT_NFACTORS + f]), learning_rate*moda * (double)(SGD_MODIFICATIONA));
atomicAdd(&(d_factorb->values[b * DEFAULT_NFACTORS + f]), learning_rate*modb * (double)(SGD_MODIFICATIONB));
atomicAdd(&(d_factorc->values[c * DEFAULT_NFACTORS + f]), learning_rate*modc * (double)(SGD_MODIFICATIONC));
}
localtile += 2 * DEFAULT_T_TILE_WIDTH;
}
}
}
__syncthreads();
}
/**
* @brief The main function for tensor completion in sgd
* @param train The tensor for generating factor matrices
* @param validation The tensor for validation(RMSE)
* @param test The tensor for testing the quality
* @param regularization_index Lambda
*/
extern "C"{
void tc_sgd(sptensor_t * traina,
sptensor_t * trainb,
sptensor_t * trainc,
sptensor_t * validation,
sptensor_t * test,
ordi_matrix ** mats,
ordi_matrix ** best_mats,
ordi_matrix ** aux_mats,
int algorithm_index,
double regularization_index,
double learning_rate,
double * best_rmse,
double * tolerance,
idx_t * nbadepochs,
idx_t * bestepochs,
idx_t * max_badepochs)
{
//only in sgd
idx_t steps_size = 1000;
idx_t nmodes = traina->nmodes;
//initialize the devices
int deviceCount;
hipGetDeviceCount(&deviceCount);
int n;
//print the GPU status
for(n = 0; n < deviceCount; n++)
{
hipDeviceProp_t dprop;
hipGetDeviceProperties(&dprop, n);
printf(" %d: %s\n", n, dprop.name);
}
omp_set_num_threads(deviceCount);
//prepare the tensor in TB-COO
cissbasic_t * h_traina = cissbasic_alloc(traina, 0, traina->ind[0][0], (traina->ind[0][traina->nnz - 1] + 1));
#ifdef MCISS_DEBUG
fprintf(stdout, "the newtensor\n");
fprintf(stdout, "the lasti is %ld\n",traina->ind[0][traina->nnz - 1] + 1);
cissbasic_display(h_traina);
#endif
struct timeval start;
struct timeval end;
idx_t diff;
//copy the real and auxiliary factor matrices
cissbasic_t ** d_traina = (cissbasic_t**)malloc(deviceCount * sizeof(cissbasic_t*));
idx_t ** d_directory_a = (idx_t**)malloc(deviceCount * sizeof(idx_t*));
idx_t ** d_counter_a = (idx_t**)malloc(deviceCount * sizeof(idx_t*));
idx_t ** d_dims_a = (idx_t**)malloc(deviceCount * sizeof(idx_t*));
double ** d_entries_a = (double**)malloc(deviceCount * sizeof(double*));
double ** d_value_ha = (double**)malloc(deviceCount * sizeof(double*));
double ** d_value_hb = (double**)malloc(deviceCount * sizeof(double*));
double ** d_value_hc = (double**)malloc(deviceCount * sizeof(double*));
ordi_matrix ** d_factora = (ordi_matrix**)malloc(deviceCount * sizeof(ordi_matrix*));
ordi_matrix ** d_factorb = (ordi_matrix**)malloc(deviceCount * sizeof(ordi_matrix*));
ordi_matrix ** d_factorc = (ordi_matrix**)malloc(deviceCount * sizeof(ordi_matrix*));
double ** d_value_a = (double**)malloc(deviceCount * sizeof(double*));
double ** d_value_b = (double**)malloc(deviceCount * sizeof(double*));
double ** d_value_c = (double**)malloc(deviceCount * sizeof(double*));
#pragma omp parallel
{
//prepare the threads
//
unsigned int cpu_thread_id = omp_get_thread_num();
//unsigned int num_cpu_threads = omp_get_num_threads();
//set gpus
hipSetDevice(cpu_thread_id % deviceCount); // "% num_gpus" allows more CPU threads than GPU devices
idx_t * d_itemp1, *d_itemp2, * d_itemp3;
double * d_ftemp;
cissbasic_t * myh_traina = (cissbasic_t*)malloc(sizeof(cissbasic_t));
myh_traina->directory = h_traina->directory;
myh_traina->dcounter = h_traina->dcounter;
myh_traina->entries = h_traina->entries;
myh_traina->dims = h_traina->dims;
myh_traina->nnz = h_traina->nnz;
myh_traina->nmodes = h_traina->nmodes;
myh_traina->size = h_traina->size;
myh_traina->dlength = h_traina->dlength;
#ifdef MCISS_DEBUG
fprintf(stdout, "in cpu_thread %d, my tensor\n", cpu_thread_id);
cissbasic_display(myh_traina);
#endif
//copy tensor for mode-1
HANDLE_ERROR(hipMalloc((void**)&(d_traina[cpu_thread_id]), sizeof(cissbasic_t)));
HANDLE_ERROR(hipMalloc((void**)&(d_directory_a[cpu_thread_id]), myh_traina->dlength * sizeof(idx_t)));
HANDLE_ERROR(hipMalloc((void**)&(d_counter_a[cpu_thread_id]), (myh_traina->dlength + 1) * sizeof(idx_t)));
HANDLE_ERROR(hipMalloc((void**)&(d_entries_a[cpu_thread_id]), myh_traina->size * DEFAULT_T_TILE_WIDTH * sizeof(double)));
HANDLE_ERROR(hipMalloc((void**)&(d_dims_a[cpu_thread_id]), nmodes * sizeof(idx_t)));
HANDLE_ERROR(hipMemcpy(d_directory_a[cpu_thread_id], myh_traina->directory, myh_traina->dlength*sizeof(idx_t), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_counter_a[cpu_thread_id], myh_traina->dcounter, (myh_traina->dlength + 1)*sizeof(idx_t), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_entries_a[cpu_thread_id], myh_traina->entries, myh_traina->size * DEFAULT_T_TILE_WIDTH * sizeof(double), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_dims_a[cpu_thread_id], myh_traina->dims, nmodes*sizeof(idx_t), hipMemcpyHostToDevice));
d_itemp1 = myh_traina->directory;
d_itemp2 = myh_traina->dims;
d_itemp3 = myh_traina->dcounter;
d_ftemp = myh_traina->entries;
myh_traina->directory = d_directory_a[cpu_thread_id];
myh_traina->dcounter = d_counter_a[cpu_thread_id];
myh_traina->dims = d_dims_a[cpu_thread_id];
myh_traina->entries = d_entries_a[cpu_thread_id];
HANDLE_ERROR(hipMemcpy(d_traina[cpu_thread_id], myh_traina, sizeof(cissbasic_t), hipMemcpyHostToDevice));
myh_traina->directory = d_itemp1;
myh_traina->dims = d_itemp2;
myh_traina->dcounter = d_itemp3;
myh_traina->entries = d_ftemp;
HANDLE_ERROR(hipMalloc((void**)&(d_factora[cpu_thread_id]), sizeof(ordi_matrix)));
HANDLE_ERROR(hipMalloc((void**)&(d_value_a[cpu_thread_id]), mats[0]->I * DEFAULT_NFACTORS * sizeof(double)));
HANDLE_ERROR(hipMemcpy(d_value_a[cpu_thread_id], mats[0]->values, mats[0]->I * DEFAULT_NFACTORS * sizeof(double), hipMemcpyHostToDevice));
#pragma omp critical
{d_ftemp = mats[0]->values;
mats[0]->values = d_value_a[cpu_thread_id];
HANDLE_ERROR(hipMemcpy(d_factora[cpu_thread_id], mats[0], sizeof(ordi_matrix), hipMemcpyHostToDevice));
mats[0]->values = d_ftemp;
}
HANDLE_ERROR(hipDeviceSynchronize());
#pragma omp barrier
HANDLE_ERROR(hipMalloc((void**)&(d_factorb[cpu_thread_id]), sizeof(ordi_matrix)));
HANDLE_ERROR(hipMalloc((void**)&(d_value_b[cpu_thread_id]), mats[1]->I * DEFAULT_NFACTORS * sizeof(double)));
HANDLE_ERROR(hipMemcpy(d_value_b[cpu_thread_id], mats[1]->values, mats[1]->I * DEFAULT_NFACTORS * sizeof(double), hipMemcpyHostToDevice));
#pragma omp critical
{
d_ftemp = mats[1]->values;
mats[1]->values = d_value_b[cpu_thread_id];
HANDLE_ERROR(hipMemcpy(d_factorb[cpu_thread_id], mats[1], sizeof(ordi_matrix), hipMemcpyHostToDevice));
mats[1]->values = d_ftemp;
}
HANDLE_ERROR(hipDeviceSynchronize());
#pragma omp barrier
HANDLE_ERROR(hipMalloc((void**)&(d_factorc[cpu_thread_id]), sizeof(ordi_matrix)));
HANDLE_ERROR(hipMalloc((void**)&(d_value_c[cpu_thread_id]), mats[2]->I * DEFAULT_NFACTORS * sizeof(double)));
HANDLE_ERROR(hipMemcpy(d_value_c[cpu_thread_id], mats[2]->values, mats[2]->I * DEFAULT_NFACTORS * sizeof(double), hipMemcpyHostToDevice));
#pragma omp critical
{d_ftemp = mats[2]->values;
mats[2]->values = d_value_c[cpu_thread_id];
HANDLE_ERROR(hipMemcpy(d_factorc[cpu_thread_id], mats[2], sizeof(ordi_matrix), hipMemcpyHostToDevice));
mats[2]->values = d_ftemp;
}
#pragma omp barrier
//for auxiliary factor matrices
HANDLE_ERROR(hipMalloc((void**)&(d_value_ha[cpu_thread_id]), mats[0]->I * DEFAULT_NFACTORS * sizeof(double)));
HANDLE_ERROR(hipMalloc((void**)&(d_value_hb[cpu_thread_id]), mats[1]->I * DEFAULT_NFACTORS * sizeof(double)));
HANDLE_ERROR(hipMalloc((void**)&(d_value_hc[cpu_thread_id]), mats[2]->I * DEFAULT_NFACTORS * sizeof(double)));
HANDLE_ERROR(hipDeviceSynchronize());
HANDLE_ERROR(hipMemset(d_value_ha[cpu_thread_id], 0, mats[0]->I * DEFAULT_NFACTORS * sizeof(double)));
HANDLE_ERROR(hipMemset(d_value_hb[cpu_thread_id], 0, mats[1]->I * DEFAULT_NFACTORS * sizeof(double)));
HANDLE_ERROR(hipMemset(d_value_hc[cpu_thread_id], 0, mats[2]->I * DEFAULT_NFACTORS * sizeof(double)));
HANDLE_ERROR(hipDeviceSynchronize());
free(myh_traina);
}
#ifdef CUDA_LOSS
//to be done
#else
double loss = tc_loss_sq(traina, mats, algorithm_index);
double frobsq = tc_frob_sq(nmodes, regularization_index, mats);
tc_converge(traina, validation, mats, best_mats, algorithm_index, loss, frobsq, 0, nmodes, best_rmse, tolerance, nbadepochs, bestepochs, max_badepochs);
#endif
/* for bold driver */
double obj = loss + frobsq;
double prev_obj = obj;
//step into the kernel
idx_t nnz = traina->nnz;
idx_t tilenum = nnz/DEFAULT_T_TILE_LENGTH + 1;
idx_t* tileptr = (idx_t*)malloc((deviceCount + 1) * sizeof(idx_t));
tileptr[0] = 0;
tileptr[deviceCount] = tilenum;
for(n = 1; n < deviceCount; n++)
{
tileptr[n] = tilenum / deviceCount * (n);
}
#ifdef SGD_DEBUG
for(n = 0; n < deviceCount + 1; n++)
printf("now the tileptr[%ld] is %ld\n", n, tileptr[n]);
#endif
#ifdef SGD_DEBUG
printf("nnz %d tilenum %d\n", nnz, tilenum);
#endif
/* foreach epoch */
for(idx_t e=1; e < DEFAULT_MAX_ITERATE; ++e) {
/* update model from all training observations */
gettimeofday(&start,NULL);
#pragma omp parallel
{
//prepare the threads
unsigned int cpu_thread_id = omp_get_thread_num();
idx_t blocknum_m = (tileptr[cpu_thread_id + 1] - tileptr[cpu_thread_id] - 1)/DEFAULT_BLOCKSIZE + 1;
//idx_t blocknum_m = tilenum/DEFAULT_BLOCKSIZE + 1;
//set gpus
hipSetDevice(cpu_thread_id % deviceCount); // "% num_gpus" allows more CPU threads than GPU devices
#ifdef SGD_DEBUG
printf("now in thread %d, the sgd starts, tilebegin at %ld, tileend at %ld, blocknum is %ld\n", cpu_thread_id, tileptr[cpu_thread_id], tileptr[cpu_thread_id + 1], blocknum_m);
#endif
hipLaunchKernelGGL(( p_update_sgd_gpu), dim3(blocknum_m), dim3(DEFAULT_BLOCKSIZE), 0, 0, d_traina[cpu_thread_id], d_factora[cpu_thread_id], d_factorb[cpu_thread_id], d_factorc[cpu_thread_id], d_value_ha[cpu_thread_id], d_value_hb[cpu_thread_id], d_value_hc[cpu_thread_id], learning_rate, regularization_index, tileptr[cpu_thread_id], tileptr[cpu_thread_id + 1]);
HANDLE_ERROR(hipDeviceSynchronize());
#ifdef SGD_DEBUG
printf("now in thread %d, the sgd ends, tilebegin at %ld, tileend at %ld\n", cpu_thread_id, tileptr[cpu_thread_id], tileptr[cpu_thread_id + 1]);
#endif
#pragma omp barrier
if(!cpu_thread_id)
{
HANDLE_ERROR(hipMemcpy(mats[0]->values, d_value_a[cpu_thread_id], mats[0]->I * DEFAULT_NFACTORS * sizeof(double), hipMemcpyDeviceToHost));
HANDLE_ERROR(hipMemcpy(mats[1]->values, d_value_b[cpu_thread_id], mats[1]->I * DEFAULT_NFACTORS * sizeof(double), hipMemcpyDeviceToHost));
HANDLE_ERROR(hipMemcpy(mats[2]->values, d_value_c[cpu_thread_id], mats[2]->I * DEFAULT_NFACTORS * sizeof(double), hipMemcpyDeviceToHost));
}
else
{
HANDLE_ERROR(hipMemcpy(aux_mats[0]->values, d_value_a[cpu_thread_id], mats[0]->I * DEFAULT_NFACTORS * sizeof(double), hipMemcpyDeviceToHost));
HANDLE_ERROR(hipMemcpy(aux_mats[1]->values, d_value_b[cpu_thread_id], mats[1]->I * DEFAULT_NFACTORS * sizeof(double), hipMemcpyDeviceToHost));
HANDLE_ERROR(hipMemcpy(aux_mats[2]->values, d_value_c[cpu_thread_id], mats[2]->I * DEFAULT_NFACTORS * sizeof(double), hipMemcpyDeviceToHost));
}
HANDLE_ERROR(hipDeviceSynchronize());
#pragma omp barrier
if(!cpu_thread_id)
{
HANDLE_ERROR(hipMemcpy(d_value_ha[cpu_thread_id], aux_mats[0]->values, mats[0]->I * DEFAULT_NFACTORS * sizeof(double), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_value_hb[cpu_thread_id], aux_mats[1]->values, mats[1]->I * DEFAULT_NFACTORS * sizeof(double), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_value_hc[cpu_thread_id], aux_mats[2]->values, mats[2]->I * DEFAULT_NFACTORS * sizeof(double), hipMemcpyHostToDevice));
}
else
{
HANDLE_ERROR(hipMemcpy(d_value_ha[cpu_thread_id], mats[0]->values, mats[0]->I * DEFAULT_NFACTORS * sizeof(double), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_value_hb[cpu_thread_id], mats[1]->values, mats[1]->I * DEFAULT_NFACTORS * sizeof(double), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_value_hc[cpu_thread_id], mats[2]->values, mats[2]->I * DEFAULT_NFACTORS * sizeof(double), hipMemcpyHostToDevice));
}
HANDLE_ERROR(hipDeviceSynchronize());
}
gettimeofday(&end,NULL);
diff = 1000000*(end.tv_sec-start.tv_sec) + end.tv_usec - start.tv_usec;
printf("this time cost %ld\n",diff);
/* compute RMSE and adjust learning rate */
loss = tc_loss_sq_sgd(traina, mats, aux_mats, algorithm_index);
frobsq = tc_frob_sq_sgd(nmodes, regularization_index, mats, aux_mats);
obj = loss + frobsq;
if(tc_converge_sgd(traina, validation, mats, best_mats, aux_mats, algorithm_index, loss, frobsq, e, nmodes, best_rmse, tolerance, nbadepochs, bestepochs, max_badepochs)) {
break;
}
/* bold driver */
if(e > 1) {
if(obj < prev_obj) {
learning_rate *= 1.05;
} else {
learning_rate *= 0.50;
}
}
prev_obj = obj;
}
//free the cudabuffer
#pragma omp parallel
{
unsigned int cpu_thread_id = omp_get_thread_num();
//set gpus
hipSetDevice(cpu_thread_id % deviceCount); // "% num_gpus" allows more CPU threads than GPU devices
hipFree(d_directory_a[cpu_thread_id]);
hipFree(d_dims_a[cpu_thread_id]);
hipFree(d_entries_a[cpu_thread_id]);
hipFree(d_value_ha[cpu_thread_id]);
hipFree(d_value_hb[cpu_thread_id]);
hipFree(d_value_hc[cpu_thread_id]);
hipFree(d_value_a[cpu_thread_id]);
hipFree(d_value_b[cpu_thread_id]);
hipFree(d_value_c[cpu_thread_id]);
hipFree(d_traina[cpu_thread_id]);
hipFree(d_factora[cpu_thread_id]);
hipFree(d_factorb[cpu_thread_id]);
hipFree(d_factorc[cpu_thread_id]);
hipDeviceReset();
}
cissbasic_free(h_traina);
free(d_traina);
free(d_directory_a);
free(d_counter_a);
free(d_dims_a);
free(d_entries_a);
free(d_value_ha);
free(d_value_hb);
free(d_value_hc);
free(d_factora);
free(d_factorb);
free(d_factorc);
free(d_value_a);
free(d_value_b);
free(d_value_c);
free(tileptr);
}
} | 3391ee4eda8b2e44b68c0de2a78a3dcd13e1983d.cu |
extern "C"
{
#include "completion.h"
#include "ciss.h"
#include "base.h"
#include "matrixprocess.h"
#include <stdio.h>
#include <sys/time.h>
#include <stdlib.h>
}
#include "sgd.cuh"
#include "loss.cuh"
#include <cuda.h>
#include <cuda_runtime.h>
#include <omp.h>
/**
* @brief Update a three-mode model based on a given observation.
*
* @param DEFAULT_NFACTORS The rank.
* @param train The training data.
* @param nnz_index The index of the observation to update from.
* @param mats The model to update.
static inline void p_update_sgd(
sptensor_t * train,
idx_t nnz_index,
ordi_matrix ** mats,
double learning_rate,
double regularization_index
)
{
idx_t const x = nnz_index;
assert(train->nmodes == 3);
idx_t ** ind = train->ind;
double * arow = mats[0] + (ind[0][x] * DEFAULT_NFACTORS);
double * brow = mats[1] + (ind[1][x] * DEFAULT_NFACTORS);
double * crow = mats[2] + (ind[2][x] * DEFAULT_NFACTORS);
/* predict value
double predicted = 0;
for(idx_t f=0; f < DEFAULT_NFACTORS; ++f) {
predicted += arow[f] * brow[f] * crow[f];
}
double const loss = train->vals[x] - predicted;
double const rate = learning_rate;
double reg = regularization_index;
/* update rows
for(idx_t f=0; f < DEFAULT_NFACTORS; ++f) {
double const moda = (loss * brow[f] * crow[f]) - (reg[0] * arow[f]);
double const modb = (loss * arow[f] * crow[f]) - (reg[1] * brow[f]);
double const modc = (loss * arow[f] * brow[f]) - (reg[2] * crow[f]);
arow[f] += rate * moda;
brow[f] += rate * modb;
crow[f] += rate * modc;
}
}*/
//the gpu kernel
__global__ void p_update_sgd_gpu(cissbasic_t * d_traina,
ordi_matrix * d_factora,
ordi_matrix * d_factorb,
ordi_matrix * d_factorc,
double * d_value_ha,
double * d_value_hb,
double * d_value_hc,
double learning_rate,
double regularization_index,
idx_t tilebegin,
idx_t tileend)
{
//get thread and block index
idx_t bid = blockIdx.x;
idx_t tid = threadIdx.x;
idx_t tileid = bid * DEFAULT_BLOCKSIZE + tid + tilebegin;
//idx_t tileid = bid * DEFAULT_BLOCKSIZE + tid;
double * entries = d_traina->entries;
idx_t localtile = tileid*((DEFAULT_T_TILE_LENGTH + 1) * DEFAULT_T_TILE_WIDTH);
//buffer for matrices
double __align__(256) mabuffer[DEFAULT_NFACTORS];
double __align__(256) mbbuffer[DEFAULT_NFACTORS];
double __align__(256) mcbuffer[DEFAULT_NFACTORS];
double __align__(256) localtbuffer[6];
idx_t a,b,c, localcounter;
double localvalue;
#ifdef SGD_DEBUG
if(bid ==0 && tid == 0) {printf("my tilebegin is %ld, tileend is %ld\n", tilebegin, tileend);}
__syncthreads();
#endif
if(tileid < tileend)
{
#ifdef SGD_DEBUG
printf("now mytileid is %ld, localtile is %ld, the first element in entries is %lf\n", tileid, localtile, entries[localtile]);
#endif
//get the indices and value
idx_t f_id = (idx_t)(entries[localtile] * (-1));
#ifdef SGD_DEBUG
//printf("now in thread %ld my fid is %ld\n", tid, f_id);
#endif
idx_t l_id = (idx_t)(entries[localtile+1] * (-1));
idx_t bitmap = (idx_t)(entries[localtile+2]);
if(bitmap != 0)
{
bitmap = __brevll(bitmap);
while((bitmap & 1) == 0) {bitmap = (bitmap >> 1);}
bitmap = (bitmap >> 1);
localtile += DEFAULT_T_TILE_WIDTH;
for(idx_t j = 0; j < DEFAULT_T_TILE_LENGTH/2; j++)
{
//unroll loop and load
localtbuffer[0] = entries[localtile];
localtbuffer[1] = entries[localtile + 1];
localtbuffer[2] = entries[localtile + 2];
if(localtbuffer[0] == -1 && localtbuffer[1] == -1) break;
//for the first
f_id += (!(bitmap & 1));
#ifdef SGD_DEBUG
if(f_id >= 260208)printf("now in thread %ld my fid is %ld\n", tid, f_id);
#endif
bitmap = bitmap >> 1;
a = d_traina->directory[f_id] - 1;
localcounter = d_traina->dcounter[f_id + 1] - d_traina->dcounter[f_id];
//dcounter = d_traina->dcounter[f_id];
b = (idx_t)localtbuffer[0] - 1;
c = (idx_t)localtbuffer[1] - 1;
#ifdef SGD_DEBUG
if(localtbuffer[1] == 0 or localtbuffer[2] == 0)printf("now in thread %ld are zero indices\n", tid);
#endif
localvalue = localtbuffer[2];
#ifdef SGD_DEBUG
printf("now a b c in tile %ld are %ld %ld %ld\n", tileid, a, b, c);
#endif
//if(localtbuffer[0] == -1 && localtbuffer[1] == -1) break;
for(idx_t i = 0; i< DEFAULT_NFACTORS; i++)
{
mabuffer[i] = ((d_factora->values)[a * DEFAULT_NFACTORS + i] + d_value_ha[a * DEFAULT_NFACTORS + i])/2;
mbbuffer[i] = ((d_factorb->values)[b * DEFAULT_NFACTORS + i] + d_value_hb[b * DEFAULT_NFACTORS + i])/2;
mcbuffer[i] = ((d_factorc->values)[c * DEFAULT_NFACTORS + i] + d_value_hc[c * DEFAULT_NFACTORS + i])/2;
}
/* predict value */
double predicted = 0;
for(idx_t f=0; f < DEFAULT_NFACTORS; f++) {
predicted += mabuffer[f] * mbbuffer[f] * mcbuffer[f];
}
predicted = localvalue - predicted;
/* update rows */
for(idx_t f=0; f < DEFAULT_NFACTORS; f++) {
double moda = (predicted * mbbuffer[f] * mcbuffer[f]) - (regularization_index * mabuffer[f]);
double modb = (predicted * mabuffer[f] * mcbuffer[f]) - (regularization_index * mbbuffer[f]);
double modc = (predicted * mbbuffer[f] * mabuffer[f]) - (regularization_index * mcbuffer[f]);
atomicAdd(&(d_factora->values[a * DEFAULT_NFACTORS + f]), learning_rate*moda * (double)(SGD_MODIFICATIONA));
atomicAdd(&(d_factorb->values[b * DEFAULT_NFACTORS + f]), learning_rate*modb * (double)(SGD_MODIFICATIONB));
atomicAdd(&(d_factorc->values[c * DEFAULT_NFACTORS + f]), learning_rate*modc * (double)(SGD_MODIFICATIONC));
}
//for the second
localtbuffer[3] = entries[localtile + 3];
localtbuffer[4] = entries[localtile + 4];
localtbuffer[5] = entries[localtile + 5];
f_id += (!(bitmap & 1));
#ifdef SGD_DEBUG
if(f_id >= 260208)printf("now in thread %ld my fid is %ld\n", tid, f_id);
#endif
bitmap = bitmap >> 1;
a = d_traina->directory[f_id] - 1;
localcounter = d_traina->dcounter[f_id + 1] - d_traina->dcounter[f_id];
b = (idx_t)localtbuffer[3] - 1;
c = (idx_t)localtbuffer[4] - 1;
#ifdef SGD_DEBUG
if(localtbuffer[3] == 0 or localtbuffer[4] == 0)printf("now in thread %ld are zero indices\n", tid);
#endif
#ifdef SGD_DEBUG
printf("now a b c in tile %ld are %ld %ld %ld\n", tileid, a, b, c);
#endif
localvalue = localtbuffer[5];
if(localtbuffer[3] == -1 && localtbuffer[4] == -1) break;
for(idx_t i = 0; i< DEFAULT_NFACTORS; i++)
{
mabuffer[i] = (d_factora->values)[a * DEFAULT_NFACTORS + i] + d_value_ha[a * DEFAULT_NFACTORS + i];
mbbuffer[i] = (d_factorb->values)[b * DEFAULT_NFACTORS + i] + d_value_hb[b * DEFAULT_NFACTORS + i];
mcbuffer[i] = (d_factorc->values)[c * DEFAULT_NFACTORS + i] + d_value_hc[c * DEFAULT_NFACTORS + i];
}
/* predict value */
predicted = 0;
for(idx_t f=0; f < DEFAULT_NFACTORS; f++) {
predicted += mabuffer[f] * mbbuffer[f] * mcbuffer[f];
}
predicted = localvalue - predicted;
/* update rows */
for(idx_t f=0; f < DEFAULT_NFACTORS; f++) {
double moda = (predicted * mbbuffer[f] * mcbuffer[f]) - (regularization_index * mabuffer[f]);
double modb = (predicted * mabuffer[f] * mcbuffer[f]) - (regularization_index * mbbuffer[f]);
double modc = (predicted * mbbuffer[f] * mabuffer[f]) - (regularization_index * mcbuffer[f]);
atomicAdd(&(d_factora->values[a * DEFAULT_NFACTORS + f]), learning_rate*moda * (double)(SGD_MODIFICATIONA));
atomicAdd(&(d_factorb->values[b * DEFAULT_NFACTORS + f]), learning_rate*modb * (double)(SGD_MODIFICATIONB));
atomicAdd(&(d_factorc->values[c * DEFAULT_NFACTORS + f]), learning_rate*modc * (double)(SGD_MODIFICATIONC));
}
localtile += 2 * DEFAULT_T_TILE_WIDTH;
}
}
}
__syncthreads();
}
/**
* @brief The main function for tensor completion in sgd
* @param train The tensor for generating factor matrices
* @param validation The tensor for validation(RMSE)
* @param test The tensor for testing the quality
* @param regularization_index Lambda
*/
extern "C"{
void tc_sgd(sptensor_t * traina,
sptensor_t * trainb,
sptensor_t * trainc,
sptensor_t * validation,
sptensor_t * test,
ordi_matrix ** mats,
ordi_matrix ** best_mats,
ordi_matrix ** aux_mats,
int algorithm_index,
double regularization_index,
double learning_rate,
double * best_rmse,
double * tolerance,
idx_t * nbadepochs,
idx_t * bestepochs,
idx_t * max_badepochs)
{
//only in sgd
idx_t steps_size = 1000;
idx_t nmodes = traina->nmodes;
//initialize the devices
int deviceCount;
cudaGetDeviceCount(&deviceCount);
int n;
//print the GPU status
for(n = 0; n < deviceCount; n++)
{
cudaDeviceProp dprop;
cudaGetDeviceProperties(&dprop, n);
printf(" %d: %s\n", n, dprop.name);
}
omp_set_num_threads(deviceCount);
//prepare the tensor in TB-COO
cissbasic_t * h_traina = cissbasic_alloc(traina, 0, traina->ind[0][0], (traina->ind[0][traina->nnz - 1] + 1));
#ifdef MCISS_DEBUG
fprintf(stdout, "the newtensor\n");
fprintf(stdout, "the lasti is %ld\n",traina->ind[0][traina->nnz - 1] + 1);
cissbasic_display(h_traina);
#endif
struct timeval start;
struct timeval end;
idx_t diff;
//copy the real and auxiliary factor matrices
cissbasic_t ** d_traina = (cissbasic_t**)malloc(deviceCount * sizeof(cissbasic_t*));
idx_t ** d_directory_a = (idx_t**)malloc(deviceCount * sizeof(idx_t*));
idx_t ** d_counter_a = (idx_t**)malloc(deviceCount * sizeof(idx_t*));
idx_t ** d_dims_a = (idx_t**)malloc(deviceCount * sizeof(idx_t*));
double ** d_entries_a = (double**)malloc(deviceCount * sizeof(double*));
double ** d_value_ha = (double**)malloc(deviceCount * sizeof(double*));
double ** d_value_hb = (double**)malloc(deviceCount * sizeof(double*));
double ** d_value_hc = (double**)malloc(deviceCount * sizeof(double*));
ordi_matrix ** d_factora = (ordi_matrix**)malloc(deviceCount * sizeof(ordi_matrix*));
ordi_matrix ** d_factorb = (ordi_matrix**)malloc(deviceCount * sizeof(ordi_matrix*));
ordi_matrix ** d_factorc = (ordi_matrix**)malloc(deviceCount * sizeof(ordi_matrix*));
double ** d_value_a = (double**)malloc(deviceCount * sizeof(double*));
double ** d_value_b = (double**)malloc(deviceCount * sizeof(double*));
double ** d_value_c = (double**)malloc(deviceCount * sizeof(double*));
#pragma omp parallel
{
//prepare the threads
//
unsigned int cpu_thread_id = omp_get_thread_num();
//unsigned int num_cpu_threads = omp_get_num_threads();
//set gpus
cudaSetDevice(cpu_thread_id % deviceCount); // "% num_gpus" allows more CPU threads than GPU devices
idx_t * d_itemp1, *d_itemp2, * d_itemp3;
double * d_ftemp;
cissbasic_t * myh_traina = (cissbasic_t*)malloc(sizeof(cissbasic_t));
myh_traina->directory = h_traina->directory;
myh_traina->dcounter = h_traina->dcounter;
myh_traina->entries = h_traina->entries;
myh_traina->dims = h_traina->dims;
myh_traina->nnz = h_traina->nnz;
myh_traina->nmodes = h_traina->nmodes;
myh_traina->size = h_traina->size;
myh_traina->dlength = h_traina->dlength;
#ifdef MCISS_DEBUG
fprintf(stdout, "in cpu_thread %d, my tensor\n", cpu_thread_id);
cissbasic_display(myh_traina);
#endif
//copy tensor for mode-1
HANDLE_ERROR(cudaMalloc((void**)&(d_traina[cpu_thread_id]), sizeof(cissbasic_t)));
HANDLE_ERROR(cudaMalloc((void**)&(d_directory_a[cpu_thread_id]), myh_traina->dlength * sizeof(idx_t)));
HANDLE_ERROR(cudaMalloc((void**)&(d_counter_a[cpu_thread_id]), (myh_traina->dlength + 1) * sizeof(idx_t)));
HANDLE_ERROR(cudaMalloc((void**)&(d_entries_a[cpu_thread_id]), myh_traina->size * DEFAULT_T_TILE_WIDTH * sizeof(double)));
HANDLE_ERROR(cudaMalloc((void**)&(d_dims_a[cpu_thread_id]), nmodes * sizeof(idx_t)));
HANDLE_ERROR(cudaMemcpy(d_directory_a[cpu_thread_id], myh_traina->directory, myh_traina->dlength*sizeof(idx_t), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_counter_a[cpu_thread_id], myh_traina->dcounter, (myh_traina->dlength + 1)*sizeof(idx_t), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_entries_a[cpu_thread_id], myh_traina->entries, myh_traina->size * DEFAULT_T_TILE_WIDTH * sizeof(double), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_dims_a[cpu_thread_id], myh_traina->dims, nmodes*sizeof(idx_t), cudaMemcpyHostToDevice));
d_itemp1 = myh_traina->directory;
d_itemp2 = myh_traina->dims;
d_itemp3 = myh_traina->dcounter;
d_ftemp = myh_traina->entries;
myh_traina->directory = d_directory_a[cpu_thread_id];
myh_traina->dcounter = d_counter_a[cpu_thread_id];
myh_traina->dims = d_dims_a[cpu_thread_id];
myh_traina->entries = d_entries_a[cpu_thread_id];
HANDLE_ERROR(cudaMemcpy(d_traina[cpu_thread_id], myh_traina, sizeof(cissbasic_t), cudaMemcpyHostToDevice));
myh_traina->directory = d_itemp1;
myh_traina->dims = d_itemp2;
myh_traina->dcounter = d_itemp3;
myh_traina->entries = d_ftemp;
HANDLE_ERROR(cudaMalloc((void**)&(d_factora[cpu_thread_id]), sizeof(ordi_matrix)));
HANDLE_ERROR(cudaMalloc((void**)&(d_value_a[cpu_thread_id]), mats[0]->I * DEFAULT_NFACTORS * sizeof(double)));
HANDLE_ERROR(cudaMemcpy(d_value_a[cpu_thread_id], mats[0]->values, mats[0]->I * DEFAULT_NFACTORS * sizeof(double), cudaMemcpyHostToDevice));
#pragma omp critical
{d_ftemp = mats[0]->values;
mats[0]->values = d_value_a[cpu_thread_id];
HANDLE_ERROR(cudaMemcpy(d_factora[cpu_thread_id], mats[0], sizeof(ordi_matrix), cudaMemcpyHostToDevice));
mats[0]->values = d_ftemp;
}
HANDLE_ERROR(cudaDeviceSynchronize());
#pragma omp barrier
HANDLE_ERROR(cudaMalloc((void**)&(d_factorb[cpu_thread_id]), sizeof(ordi_matrix)));
HANDLE_ERROR(cudaMalloc((void**)&(d_value_b[cpu_thread_id]), mats[1]->I * DEFAULT_NFACTORS * sizeof(double)));
HANDLE_ERROR(cudaMemcpy(d_value_b[cpu_thread_id], mats[1]->values, mats[1]->I * DEFAULT_NFACTORS * sizeof(double), cudaMemcpyHostToDevice));
#pragma omp critical
{
d_ftemp = mats[1]->values;
mats[1]->values = d_value_b[cpu_thread_id];
HANDLE_ERROR(cudaMemcpy(d_factorb[cpu_thread_id], mats[1], sizeof(ordi_matrix), cudaMemcpyHostToDevice));
mats[1]->values = d_ftemp;
}
HANDLE_ERROR(cudaDeviceSynchronize());
#pragma omp barrier
HANDLE_ERROR(cudaMalloc((void**)&(d_factorc[cpu_thread_id]), sizeof(ordi_matrix)));
HANDLE_ERROR(cudaMalloc((void**)&(d_value_c[cpu_thread_id]), mats[2]->I * DEFAULT_NFACTORS * sizeof(double)));
HANDLE_ERROR(cudaMemcpy(d_value_c[cpu_thread_id], mats[2]->values, mats[2]->I * DEFAULT_NFACTORS * sizeof(double), cudaMemcpyHostToDevice));
#pragma omp critical
{d_ftemp = mats[2]->values;
mats[2]->values = d_value_c[cpu_thread_id];
HANDLE_ERROR(cudaMemcpy(d_factorc[cpu_thread_id], mats[2], sizeof(ordi_matrix), cudaMemcpyHostToDevice));
mats[2]->values = d_ftemp;
}
#pragma omp barrier
//for auxiliary factor matrices
HANDLE_ERROR(cudaMalloc((void**)&(d_value_ha[cpu_thread_id]), mats[0]->I * DEFAULT_NFACTORS * sizeof(double)));
HANDLE_ERROR(cudaMalloc((void**)&(d_value_hb[cpu_thread_id]), mats[1]->I * DEFAULT_NFACTORS * sizeof(double)));
HANDLE_ERROR(cudaMalloc((void**)&(d_value_hc[cpu_thread_id]), mats[2]->I * DEFAULT_NFACTORS * sizeof(double)));
HANDLE_ERROR(cudaDeviceSynchronize());
HANDLE_ERROR(cudaMemset(d_value_ha[cpu_thread_id], 0, mats[0]->I * DEFAULT_NFACTORS * sizeof(double)));
HANDLE_ERROR(cudaMemset(d_value_hb[cpu_thread_id], 0, mats[1]->I * DEFAULT_NFACTORS * sizeof(double)));
HANDLE_ERROR(cudaMemset(d_value_hc[cpu_thread_id], 0, mats[2]->I * DEFAULT_NFACTORS * sizeof(double)));
HANDLE_ERROR(cudaDeviceSynchronize());
free(myh_traina);
}
#ifdef CUDA_LOSS
//to be done
#else
double loss = tc_loss_sq(traina, mats, algorithm_index);
double frobsq = tc_frob_sq(nmodes, regularization_index, mats);
tc_converge(traina, validation, mats, best_mats, algorithm_index, loss, frobsq, 0, nmodes, best_rmse, tolerance, nbadepochs, bestepochs, max_badepochs);
#endif
/* for bold driver */
double obj = loss + frobsq;
double prev_obj = obj;
//step into the kernel
idx_t nnz = traina->nnz;
idx_t tilenum = nnz/DEFAULT_T_TILE_LENGTH + 1;
idx_t* tileptr = (idx_t*)malloc((deviceCount + 1) * sizeof(idx_t));
tileptr[0] = 0;
tileptr[deviceCount] = tilenum;
for(n = 1; n < deviceCount; n++)
{
tileptr[n] = tilenum / deviceCount * (n);
}
#ifdef SGD_DEBUG
for(n = 0; n < deviceCount + 1; n++)
printf("now the tileptr[%ld] is %ld\n", n, tileptr[n]);
#endif
#ifdef SGD_DEBUG
printf("nnz %d tilenum %d\n", nnz, tilenum);
#endif
/* foreach epoch */
for(idx_t e=1; e < DEFAULT_MAX_ITERATE; ++e) {
/* update model from all training observations */
gettimeofday(&start,NULL);
#pragma omp parallel
{
//prepare the threads
unsigned int cpu_thread_id = omp_get_thread_num();
idx_t blocknum_m = (tileptr[cpu_thread_id + 1] - tileptr[cpu_thread_id] - 1)/DEFAULT_BLOCKSIZE + 1;
//idx_t blocknum_m = tilenum/DEFAULT_BLOCKSIZE + 1;
//set gpus
cudaSetDevice(cpu_thread_id % deviceCount); // "% num_gpus" allows more CPU threads than GPU devices
#ifdef SGD_DEBUG
printf("now in thread %d, the sgd starts, tilebegin at %ld, tileend at %ld, blocknum is %ld\n", cpu_thread_id, tileptr[cpu_thread_id], tileptr[cpu_thread_id + 1], blocknum_m);
#endif
p_update_sgd_gpu<<<blocknum_m, DEFAULT_BLOCKSIZE, 0>>>(d_traina[cpu_thread_id], d_factora[cpu_thread_id], d_factorb[cpu_thread_id], d_factorc[cpu_thread_id], d_value_ha[cpu_thread_id], d_value_hb[cpu_thread_id], d_value_hc[cpu_thread_id], learning_rate, regularization_index, tileptr[cpu_thread_id], tileptr[cpu_thread_id + 1]);
HANDLE_ERROR(cudaDeviceSynchronize());
#ifdef SGD_DEBUG
printf("now in thread %d, the sgd ends, tilebegin at %ld, tileend at %ld\n", cpu_thread_id, tileptr[cpu_thread_id], tileptr[cpu_thread_id + 1]);
#endif
#pragma omp barrier
if(!cpu_thread_id)
{
HANDLE_ERROR(cudaMemcpy(mats[0]->values, d_value_a[cpu_thread_id], mats[0]->I * DEFAULT_NFACTORS * sizeof(double), cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaMemcpy(mats[1]->values, d_value_b[cpu_thread_id], mats[1]->I * DEFAULT_NFACTORS * sizeof(double), cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaMemcpy(mats[2]->values, d_value_c[cpu_thread_id], mats[2]->I * DEFAULT_NFACTORS * sizeof(double), cudaMemcpyDeviceToHost));
}
else
{
HANDLE_ERROR(cudaMemcpy(aux_mats[0]->values, d_value_a[cpu_thread_id], mats[0]->I * DEFAULT_NFACTORS * sizeof(double), cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaMemcpy(aux_mats[1]->values, d_value_b[cpu_thread_id], mats[1]->I * DEFAULT_NFACTORS * sizeof(double), cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaMemcpy(aux_mats[2]->values, d_value_c[cpu_thread_id], mats[2]->I * DEFAULT_NFACTORS * sizeof(double), cudaMemcpyDeviceToHost));
}
HANDLE_ERROR(cudaDeviceSynchronize());
#pragma omp barrier
if(!cpu_thread_id)
{
HANDLE_ERROR(cudaMemcpy(d_value_ha[cpu_thread_id], aux_mats[0]->values, mats[0]->I * DEFAULT_NFACTORS * sizeof(double), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_value_hb[cpu_thread_id], aux_mats[1]->values, mats[1]->I * DEFAULT_NFACTORS * sizeof(double), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_value_hc[cpu_thread_id], aux_mats[2]->values, mats[2]->I * DEFAULT_NFACTORS * sizeof(double), cudaMemcpyHostToDevice));
}
else
{
HANDLE_ERROR(cudaMemcpy(d_value_ha[cpu_thread_id], mats[0]->values, mats[0]->I * DEFAULT_NFACTORS * sizeof(double), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_value_hb[cpu_thread_id], mats[1]->values, mats[1]->I * DEFAULT_NFACTORS * sizeof(double), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_value_hc[cpu_thread_id], mats[2]->values, mats[2]->I * DEFAULT_NFACTORS * sizeof(double), cudaMemcpyHostToDevice));
}
HANDLE_ERROR(cudaDeviceSynchronize());
}
gettimeofday(&end,NULL);
diff = 1000000*(end.tv_sec-start.tv_sec) + end.tv_usec - start.tv_usec;
printf("this time cost %ld\n",diff);
/* compute RMSE and adjust learning rate */
loss = tc_loss_sq_sgd(traina, mats, aux_mats, algorithm_index);
frobsq = tc_frob_sq_sgd(nmodes, regularization_index, mats, aux_mats);
obj = loss + frobsq;
if(tc_converge_sgd(traina, validation, mats, best_mats, aux_mats, algorithm_index, loss, frobsq, e, nmodes, best_rmse, tolerance, nbadepochs, bestepochs, max_badepochs)) {
break;
}
/* bold driver */
if(e > 1) {
if(obj < prev_obj) {
learning_rate *= 1.05;
} else {
learning_rate *= 0.50;
}
}
prev_obj = obj;
}
//free the cudabuffer
#pragma omp parallel
{
unsigned int cpu_thread_id = omp_get_thread_num();
//set gpus
cudaSetDevice(cpu_thread_id % deviceCount); // "% num_gpus" allows more CPU threads than GPU devices
cudaFree(d_directory_a[cpu_thread_id]);
cudaFree(d_dims_a[cpu_thread_id]);
cudaFree(d_entries_a[cpu_thread_id]);
cudaFree(d_value_ha[cpu_thread_id]);
cudaFree(d_value_hb[cpu_thread_id]);
cudaFree(d_value_hc[cpu_thread_id]);
cudaFree(d_value_a[cpu_thread_id]);
cudaFree(d_value_b[cpu_thread_id]);
cudaFree(d_value_c[cpu_thread_id]);
cudaFree(d_traina[cpu_thread_id]);
cudaFree(d_factora[cpu_thread_id]);
cudaFree(d_factorb[cpu_thread_id]);
cudaFree(d_factorc[cpu_thread_id]);
cudaDeviceReset();
}
cissbasic_free(h_traina);
free(d_traina);
free(d_directory_a);
free(d_counter_a);
free(d_dims_a);
free(d_entries_a);
free(d_value_ha);
free(d_value_hb);
free(d_value_hc);
free(d_factora);
free(d_factorb);
free(d_factorc);
free(d_value_a);
free(d_value_b);
free(d_value_c);
free(tileptr);
}
} |
random_horiz_flip.hip | // !!! This is a file automatically generated by hipify!!!
/******************************************************************************
*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
******************************************************************************/
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THHNumerics.cuh>
#include <THH/THH.h>
#include <hip/hip_runtime.h>
#include <torch/torch.h>
#include <torch/extension.h>
/**
* Each block will handle one channel of each image
**/
template <typename T>
__global__
void HorizFlipImagesAndBoxes(
const int N,
const int C,
const int H,
const int W,
const T* img_in,
float* bboxes,
const int* offsets,
const float p,
const float* flip,
T* img_out,
const bool nhwc) {
// early return if not flipping
if (flip[blockIdx.x] < p) return;
// pointer offset into images
const int img_offset = blockIdx.x * C * H * W;
const T* img = &img_in[img_offset];
T* img_o = &img_out[img_offset];
// flip bboxes
auto bbox_offset_begin = offsets[blockIdx.x];
auto bbox_offset_end = offsets[blockIdx.x + 1];
auto num_bboxes = bbox_offset_end - bbox_offset_begin;
const int thread_idx = threadIdx.y * blockDim.x + threadIdx.x;
// bboxes in ltrb format, scaled to [0, 1]
for (int i = thread_idx; i < num_bboxes; i += blockDim.x * blockDim.y) {
float *bbox = &bboxes[(bbox_offset_begin + thread_idx) * 4];
// Could do this inplace, but not register constrained
auto bbox_0 = bbox[0];
auto bbox_2 = bbox[2];
bbox[0] = 1. - bbox_2;
bbox[2] = 1. - bbox_0;
}
if (nhwc) {
// loop over float3 pixels, handle 3 values / thread
for (int h = threadIdx.y; h < H; h += blockDim.y) {
for (int w = threadIdx.x; w < W; w += blockDim.x) {
const T* img_hw = &img[h * W * C + w * C];
T * img_out_hw = &img_o[h * W * C + (W - 1 - w) * C];
for (int c = 0; c < C; ++c) {
img_out_hw[c] = img_hw[c];
}
}
}
} else {
// loop over channels
for (int c = 0; c < C; ++c) {
const T* img_c = &img[c * H * W];
T *img_out_c = &img_o[c * H * W];
// handle tiles of (h, w) at a time
for (int h = threadIdx.y; h < H; h += blockDim.y) {
for (int w = threadIdx.x; w < W; w += blockDim.x) {
const int input_idx = h * W + w;
const int output_idx = h * W + (W - 1 - w);
img_out_c[output_idx] = img_c[input_idx];
}
}
}
}
}
/**
* Take images and their bboxes, randomly flip on horizontal axis
* In/Out: img: NCHW tensor of N, C-channel images of constant (H, W)
* In/Out: bboxes: [N_i, 4] tensor of original bboxes in ltrb format
* In: bbox_offsets: [N] offset values into bboxes
* In: p \in [0, 1): probability of flipping each (img, bbox) pair
* In: nhwc: Tensor in NHWC format
* ----
* Note: allocate temp memory, but effectively do this inplace
*/
std::vector<at::Tensor> random_horiz_flip(
at::Tensor& img,
at::Tensor& bboxes,
const at::Tensor& bbox_offsets,
const float p,
const bool nhwc) {
// dimensions
const int N = img.size(0);
int C, H, W;
if (nhwc) {
C = img.size(3);
H = img.size(1);
W = img.size(2);
} else {
C = img.size(1);
H = img.size(2);
W = img.size(3);
}
assert(img.type().is_cuda());
assert(bboxes.type().is_cuda());
assert(bbox_offsets.type().is_cuda());
// printf("%d %d %d %d\n", N, C, H, W);
// Need temp storage of size img
at::Tensor tmp_img = img.clone();
at::Tensor flip = torch::zeros({N}, at::CUDA(at::kFloat)).uniform_(0., 1.);
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
img.type(),
"HorizFlipImagesAndBoxes",
[&] {
hipLaunchKernelGGL(( HorizFlipImagesAndBoxes<scalar_t>), dim3(N), dim3(dim3(16, 16)), 0, stream.stream(),
N,
C,
H,
W,
img.data_ptr<scalar_t>(),
bboxes.data_ptr<float>(),
bbox_offsets.data_ptr<int>(),
p,
flip.data_ptr<float>(),
tmp_img.data_ptr<scalar_t>(),
nhwc);
THCudaCheck(hipGetLastError());
});
// copy tmp_img -> img
// img = tmp_img;
return {tmp_img, bboxes};
}
| random_horiz_flip.cu | /******************************************************************************
*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
******************************************************************************/
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THCNumerics.cuh>
#include <THC/THC.h>
#include <cuda.h>
#include <torch/torch.h>
#include <torch/extension.h>
/**
* Each block will handle one channel of each image
**/
template <typename T>
__global__
void HorizFlipImagesAndBoxes(
const int N,
const int C,
const int H,
const int W,
const T* img_in,
float* bboxes,
const int* offsets,
const float p,
const float* flip,
T* img_out,
const bool nhwc) {
// early return if not flipping
if (flip[blockIdx.x] < p) return;
// pointer offset into images
const int img_offset = blockIdx.x * C * H * W;
const T* img = &img_in[img_offset];
T* img_o = &img_out[img_offset];
// flip bboxes
auto bbox_offset_begin = offsets[blockIdx.x];
auto bbox_offset_end = offsets[blockIdx.x + 1];
auto num_bboxes = bbox_offset_end - bbox_offset_begin;
const int thread_idx = threadIdx.y * blockDim.x + threadIdx.x;
// bboxes in ltrb format, scaled to [0, 1]
for (int i = thread_idx; i < num_bboxes; i += blockDim.x * blockDim.y) {
float *bbox = &bboxes[(bbox_offset_begin + thread_idx) * 4];
// Could do this inplace, but not register constrained
auto bbox_0 = bbox[0];
auto bbox_2 = bbox[2];
bbox[0] = 1. - bbox_2;
bbox[2] = 1. - bbox_0;
}
if (nhwc) {
// loop over float3 pixels, handle 3 values / thread
for (int h = threadIdx.y; h < H; h += blockDim.y) {
for (int w = threadIdx.x; w < W; w += blockDim.x) {
const T* img_hw = &img[h * W * C + w * C];
T * img_out_hw = &img_o[h * W * C + (W - 1 - w) * C];
for (int c = 0; c < C; ++c) {
img_out_hw[c] = img_hw[c];
}
}
}
} else {
// loop over channels
for (int c = 0; c < C; ++c) {
const T* img_c = &img[c * H * W];
T *img_out_c = &img_o[c * H * W];
// handle tiles of (h, w) at a time
for (int h = threadIdx.y; h < H; h += blockDim.y) {
for (int w = threadIdx.x; w < W; w += blockDim.x) {
const int input_idx = h * W + w;
const int output_idx = h * W + (W - 1 - w);
img_out_c[output_idx] = img_c[input_idx];
}
}
}
}
}
/**
* Take images and their bboxes, randomly flip on horizontal axis
* In/Out: img: NCHW tensor of N, C-channel images of constant (H, W)
* In/Out: bboxes: [N_i, 4] tensor of original bboxes in ltrb format
* In: bbox_offsets: [N] offset values into bboxes
* In: p \in [0, 1): probability of flipping each (img, bbox) pair
* In: nhwc: Tensor in NHWC format
* ----
* Note: allocate temp memory, but effectively do this inplace
*/
std::vector<at::Tensor> random_horiz_flip(
at::Tensor& img,
at::Tensor& bboxes,
const at::Tensor& bbox_offsets,
const float p,
const bool nhwc) {
// dimensions
const int N = img.size(0);
int C, H, W;
if (nhwc) {
C = img.size(3);
H = img.size(1);
W = img.size(2);
} else {
C = img.size(1);
H = img.size(2);
W = img.size(3);
}
assert(img.type().is_cuda());
assert(bboxes.type().is_cuda());
assert(bbox_offsets.type().is_cuda());
// printf("%d %d %d %d\n", N, C, H, W);
// Need temp storage of size img
at::Tensor tmp_img = img.clone();
at::Tensor flip = torch::zeros({N}, at::CUDA(at::kFloat)).uniform_(0., 1.);
auto stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
img.type(),
"HorizFlipImagesAndBoxes",
[&] {
HorizFlipImagesAndBoxes<scalar_t><<<N, dim3(16, 16), 0, stream.stream()>>>(
N,
C,
H,
W,
img.data_ptr<scalar_t>(),
bboxes.data_ptr<float>(),
bbox_offsets.data_ptr<int>(),
p,
flip.data_ptr<float>(),
tmp_img.data_ptr<scalar_t>(),
nhwc);
THCudaCheck(cudaGetLastError());
});
// copy tmp_img -> img
// img = tmp_img;
return {tmp_img, bboxes};
}
|
b2fc6b59248f239c2b5d008cc7541899d953d79e.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <thrust/device_ptr.h>
#include <thrust/functional.h>
#include <thrust/transform.h>
#include "implicit/gpu/random.h"
#include "implicit/gpu/utils.h"
namespace implicit {
namespace gpu {
RandomState::RandomState(long seed) {
CHECK_CURAND(hiprandCreateGenerator(&rng, HIPRAND_RNG_PSEUDO_DEFAULT));
CHECK_CURAND(hiprandSetPseudoRandomGeneratorSeed(rng, seed));
}
Matrix RandomState::uniform(size_t rows, size_t cols, float low, float high) {
Matrix ret(rows, cols, NULL);
CHECK_CURAND(hiprandGenerateUniform(rng, ret, rows * cols));
if ((low != 0.0) || (high != 1.0)) {
float *data = ret;
auto start = thrust::device_pointer_cast(data);
thrust::transform(start, start + rows * cols, start,
thrust::placeholders::_1 =
thrust::placeholders::_1 * (high - low) + low);
}
return ret;
}
Matrix RandomState::randn(size_t rows, size_t cols, float mean, float stddev) {
Matrix ret(rows, cols, NULL);
CHECK_CURAND(hiprandGenerateNormal(rng, ret, rows * cols, mean, stddev));
return ret;
}
RandomState::~RandomState() { hiprandDestroyGenerator(rng); }
} // namespace gpu
} // namespace implicit
| b2fc6b59248f239c2b5d008cc7541899d953d79e.cu | #include <cuda_runtime.h>
#include <curand.h>
#include <thrust/device_ptr.h>
#include <thrust/functional.h>
#include <thrust/transform.h>
#include "implicit/gpu/random.h"
#include "implicit/gpu/utils.h"
namespace implicit {
namespace gpu {
RandomState::RandomState(long seed) {
CHECK_CURAND(curandCreateGenerator(&rng, CURAND_RNG_PSEUDO_DEFAULT));
CHECK_CURAND(curandSetPseudoRandomGeneratorSeed(rng, seed));
}
Matrix RandomState::uniform(size_t rows, size_t cols, float low, float high) {
Matrix ret(rows, cols, NULL);
CHECK_CURAND(curandGenerateUniform(rng, ret, rows * cols));
if ((low != 0.0) || (high != 1.0)) {
float *data = ret;
auto start = thrust::device_pointer_cast(data);
thrust::transform(start, start + rows * cols, start,
thrust::placeholders::_1 =
thrust::placeholders::_1 * (high - low) + low);
}
return ret;
}
Matrix RandomState::randn(size_t rows, size_t cols, float mean, float stddev) {
Matrix ret(rows, cols, NULL);
CHECK_CURAND(curandGenerateNormal(rng, ret, rows * cols, mean, stddev));
return ret;
}
RandomState::~RandomState() { curandDestroyGenerator(rng); }
} // namespace gpu
} // namespace implicit
|
e39204f6185c9ca73c98bbec780f1c6fd1728794.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0-beta3) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date July 2014
@generated from zlobpcg_residuals.cu normal z -> s, Fri Jul 18 17:34:28 2014
*/
#include "common_magma.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define BLOCK_SIZEx 32
#define BLOCK_SIZEy 16
#define PRECISION_s
// copied from snrm2.cu in trunk/magmablas
// ----------------------------------------
// Does sum reduction of array x, leaving total in x[0].
// Contents of x are destroyed in the process.
// With k threads, can reduce array up to 2*k in size.
// Assumes number of threads <= 1024 (which is max number of threads up to CUDA capability 3.0)
// Having n as template parameter allows compiler to evaluate some conditions at compile time.
template< int n >
__device__ void sum_reduce( /*int n,*/ int i, float* x )
{
__syncthreads();
if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[i] += x[i+1024]; } __syncthreads(); }
if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i] += x[i+ 512]; } __syncthreads(); }
if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i] += x[i+ 256]; } __syncthreads(); }
if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i] += x[i+ 128]; } __syncthreads(); }
if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i] += x[i+ 64]; } __syncthreads(); }
if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i] += x[i+ 32]; } __syncthreads(); }
// probably don't need __syncthreads for < 16 threads
// because of implicit warp level synchronization.
if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i] += x[i+ 16]; } __syncthreads(); }
if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i] += x[i+ 8]; } __syncthreads(); }
if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i] += x[i+ 4]; } __syncthreads(); }
if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i] += x[i+ 2]; } __syncthreads(); }
if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i] += x[i+ 1]; } __syncthreads(); }
}
// end sum_reduce
__global__ void
magma_slobpcg_res_kernel( magma_int_t num_rows,
magma_int_t num_vecs,
float *evals,
float *X,
float *R,
float *res){
int row = blockIdx.x * blockDim.x + threadIdx.x; // global row index
if( row<num_rows){
for( int i=0; i<num_vecs; i++ ){
R[row + i*num_rows] = R[row + i*num_rows]
+ MAGMA_S_MAKE( -evals[i], 0.0 )
* X[ row + i*num_rows ];
}
}
}
/*
magmablas_snrm2_kernel( int m, float *da, int ldda, float *dxnorm )
{
const int i = threadIdx.x;
float *dx = da + blockIdx.x * ldda;
__shared__ float sum[ BLOCK_SIZE ];
float re, lsum;
// get norm of dx
lsum = 0;
for( int j = i; j < m; j += BLOCK_SIZE ) {
#if (defined(PRECISION_s) || defined(PRECISION_d))
re = dx[j];
lsum += re*re;
#else
re = MAGMA_S_REAL( dx[j] );
float im = MAGMA_S_IMAG( dx[j] );
lsum += re*re + im*im;
#endif
}
sum[i] = lsum;
sum_reduce< BLOCK_SIZE >( i, sum );
if (i==0)
res[blockIdx.x] = sqrt(sum[0]);
}
*/
/**
Purpose
-------
This routine computes for Block-LOBPCG, the set of residuals.
R = Ax - x evalues
It replaces:
for(int i=0; i < n; i++){
magma_saxpy(m, MAGMA_S_MAKE(-evalues[i],0),blockX+i*m,1,blockR+i*m,1);
}
The memory layout of x is:
/ x1[0] x2[0] x3[0] \
| x1[1] x2[1] x3[1] |
x = | x1[2] x2[2] x3[2] | = x1[0] x1[1] x1[2] x1[3] x1[4] x2[0] x2[1] .
| x1[3] x2[3] x3[3] |
\ x1[4] x2[4] x3[4] /
Arguments
---------
@param
num_rows magma_int_t
number of rows
@param
num_vecs magma_int_t
number of vectors
@param
evalues float*
array of eigenvalues/approximations
@param
X float*
block of eigenvector approximations
@param
R float*
block of residuals
@param
res float*
array of residuals
@ingroup magmasparse_saux
********************************************************************/
extern "C" magma_int_t
magma_slobpcg_res( magma_int_t num_rows,
magma_int_t num_vecs,
float *evalues,
float *X,
float *R,
float *res ){
// every thread handles one row
magma_int_t block_size = BLOCK_SIZE;
dim3 block( block_size );
dim3 grid( (num_rows+block_size-1)/block_size );
hipLaunchKernelGGL(( magma_slobpcg_res_kernel), dim3(grid), dim3(block), 0, magma_stream ,
num_rows, num_vecs, evalues, X, R, res );
return MAGMA_SUCCESS;
}
| e39204f6185c9ca73c98bbec780f1c6fd1728794.cu | /*
-- MAGMA (version 1.5.0-beta3) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date July 2014
@generated from zlobpcg_residuals.cu normal z -> s, Fri Jul 18 17:34:28 2014
*/
#include "common_magma.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define BLOCK_SIZEx 32
#define BLOCK_SIZEy 16
#define PRECISION_s
// copied from snrm2.cu in trunk/magmablas
// ----------------------------------------
// Does sum reduction of array x, leaving total in x[0].
// Contents of x are destroyed in the process.
// With k threads, can reduce array up to 2*k in size.
// Assumes number of threads <= 1024 (which is max number of threads up to CUDA capability 3.0)
// Having n as template parameter allows compiler to evaluate some conditions at compile time.
template< int n >
__device__ void sum_reduce( /*int n,*/ int i, float* x )
{
__syncthreads();
if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[i] += x[i+1024]; } __syncthreads(); }
if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i] += x[i+ 512]; } __syncthreads(); }
if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i] += x[i+ 256]; } __syncthreads(); }
if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i] += x[i+ 128]; } __syncthreads(); }
if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i] += x[i+ 64]; } __syncthreads(); }
if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i] += x[i+ 32]; } __syncthreads(); }
// probably don't need __syncthreads for < 16 threads
// because of implicit warp level synchronization.
if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i] += x[i+ 16]; } __syncthreads(); }
if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i] += x[i+ 8]; } __syncthreads(); }
if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i] += x[i+ 4]; } __syncthreads(); }
if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i] += x[i+ 2]; } __syncthreads(); }
if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i] += x[i+ 1]; } __syncthreads(); }
}
// end sum_reduce
__global__ void
magma_slobpcg_res_kernel( magma_int_t num_rows,
magma_int_t num_vecs,
float *evals,
float *X,
float *R,
float *res){
int row = blockIdx.x * blockDim.x + threadIdx.x; // global row index
if( row<num_rows){
for( int i=0; i<num_vecs; i++ ){
R[row + i*num_rows] = R[row + i*num_rows]
+ MAGMA_S_MAKE( -evals[i], 0.0 )
* X[ row + i*num_rows ];
}
}
}
/*
magmablas_snrm2_kernel( int m, float *da, int ldda, float *dxnorm )
{
const int i = threadIdx.x;
float *dx = da + blockIdx.x * ldda;
__shared__ float sum[ BLOCK_SIZE ];
float re, lsum;
// get norm of dx
lsum = 0;
for( int j = i; j < m; j += BLOCK_SIZE ) {
#if (defined(PRECISION_s) || defined(PRECISION_d))
re = dx[j];
lsum += re*re;
#else
re = MAGMA_S_REAL( dx[j] );
float im = MAGMA_S_IMAG( dx[j] );
lsum += re*re + im*im;
#endif
}
sum[i] = lsum;
sum_reduce< BLOCK_SIZE >( i, sum );
if (i==0)
res[blockIdx.x] = sqrt(sum[0]);
}
*/
/**
Purpose
-------
This routine computes for Block-LOBPCG, the set of residuals.
R = Ax - x evalues
It replaces:
for(int i=0; i < n; i++){
magma_saxpy(m, MAGMA_S_MAKE(-evalues[i],0),blockX+i*m,1,blockR+i*m,1);
}
The memory layout of x is:
/ x1[0] x2[0] x3[0] \
| x1[1] x2[1] x3[1] |
x = | x1[2] x2[2] x3[2] | = x1[0] x1[1] x1[2] x1[3] x1[4] x2[0] x2[1] .
| x1[3] x2[3] x3[3] |
\ x1[4] x2[4] x3[4] /
Arguments
---------
@param
num_rows magma_int_t
number of rows
@param
num_vecs magma_int_t
number of vectors
@param
evalues float*
array of eigenvalues/approximations
@param
X float*
block of eigenvector approximations
@param
R float*
block of residuals
@param
res float*
array of residuals
@ingroup magmasparse_saux
********************************************************************/
extern "C" magma_int_t
magma_slobpcg_res( magma_int_t num_rows,
magma_int_t num_vecs,
float *evalues,
float *X,
float *R,
float *res ){
// every thread handles one row
magma_int_t block_size = BLOCK_SIZE;
dim3 block( block_size );
dim3 grid( (num_rows+block_size-1)/block_size );
magma_slobpcg_res_kernel<<< grid, block, 0, magma_stream >>>
( num_rows, num_vecs, evalues, X, R, res );
return MAGMA_SUCCESS;
}
|
5f59daecd48aa028bded9f12ae53e11083c2e171.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "saber/core/common.h"
#include "saber/core/tensor.h"
#include "saber/funcs/calibrate.h"
#include <cfloat>
namespace anakin {
namespace saber {
__global__
void transform_nchw_2_c4(char* out_data, const float* in_data,
int valid_num, int valid_channel_4, int valid_height, int valid_width,
int in_n_stride, int in_c_stride, int in_h_stride, int in_w_stride,
int out_n_stride, int out_c_stride, int out_h_stride, int out_w_stride,
float scale,
int count) {
int load0, load1, load2, load3;
int gid = threadIdx.x + blockIdx.x * blockDim.x;
int write_w = (gid) % valid_width;
int write_h = (gid / (out_h_stride)) % valid_height;
int write_c = (gid / (out_c_stride)) % valid_channel_4;
int write_n = (gid / (out_n_stride)) % valid_num;
int in_offset = write_n * in_n_stride
+ write_c * in_c_stride * 4
+ write_h * in_h_stride
+ write_w * in_w_stride;
int out_offset = write_n * out_n_stride
+ write_c * out_c_stride
+ write_h * out_h_stride
+ write_w;
if (gid < count) {
char4 write;
load0 = __float2int_rn(__ldg(&in_data[in_offset]) * scale);
write.x = static_cast<char>(load0);
in_offset += in_c_stride;
load1 = __float2int_rn(__ldg(&in_data[in_offset]) * scale);
write.y = static_cast<char>(load1);
in_offset += in_c_stride;
load2 = __float2int_rn(__ldg(&in_data[in_offset]) * scale);
write.z = static_cast<char>(load2);
in_offset += in_c_stride;
load3 = __float2int_rn(__ldg(&in_data[in_offset]) * scale);
write.w = static_cast<char>(load3);
((char4*)out_data)[out_offset] = write;
}
}
template<>
SaberStatus conv_calibrate_fp32_int8_c4<NV>(Tensor<NV> &out_tensor,
const Tensor<NV> &in_tensor, const float in_scale, Context<NV> ctx) {
const float * in_data = (const float*)in_tensor.data();
char * out_data = (char*)out_tensor.mutable_data();
Shape in_stride = in_tensor.get_stride();
Shape in_shape = in_tensor.valid_shape();
Shape out_shape = out_tensor.valid_shape();
int count = out_shape[0] * out_shape[1] * out_shape[2] * out_shape[3];
hipStream_t cuda_stream = ctx.get_compute_stream();
hipLaunchKernelGGL(( transform_nchw_2_c4), dim3(CUDA_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS),
0, cuda_stream, out_data, in_data,
out_shape[0], out_shape[1], out_shape[2], out_shape[3],
in_stride[0], in_stride[1], in_stride[2], in_stride[3],
out_shape[1] * out_shape[2] * out_shape[3],
out_shape[2] * out_shape[3], out_shape[3], 1,
(1.f / in_scale), count);
return SaberSuccess;
}
__global__ void transform_nchw_2_nchw(float * out_data,
const float* in_data, const int count,
int in_n, int in_c, int in_h, int in_w,
int in_n_stride, int in_c_stride, int in_h_stride, int in_w_stride,
int out_n, int out_c, int out_h, int out_w,
int out_n_stride, int out_c_stride, int out_h_stride, int out_w_stride,
const float *scale, const float input_scale) {
CUDA_KERNEL_LOOP(tid, count){
int read_w = tid % in_w;
int read_h = (tid / (in_w)) % in_h;
int read_c = (tid / (in_h * in_w)) % in_c;
int read_n = (tid / (in_c * in_h * in_w)) % in_n;
int write_w = tid % out_w;
int write_h = (tid / (out_w)) % out_h;
int write_c = (tid / (out_h * out_w)) % out_c;
int write_n = (tid / (out_c * out_h * out_w)) % out_n;
int in_idx = read_n * in_n_stride
+ read_c * in_c_stride
+ read_h * in_h_stride
+ read_w * in_w_stride;
int out_idx = write_n * out_n_stride
+ write_c * out_c_stride
+ write_h * out_h_stride
+ write_w * out_w_stride;
float in_var = in_data[in_idx];
float in_scale = scale[read_c];
out_data[out_idx] = in_var * in_scale * input_scale;
}
}
template<>
SaberStatus conv_calibrate_int32_fp32<NV>(
Tensor<NV> &out_tensor, const Tensor<NV> &in_tensor,
const float in_scale, const float* weight_scale, Context<NV> ctx) {
Shape in_shape = in_tensor.valid_shape();
Shape out_shape = out_tensor.valid_shape();
Shape stride_in = in_tensor.get_stride();
Shape stride_out = out_tensor.get_stride();
const float *in_data = (const float*)in_tensor.data();
float *out_data = (float*)out_tensor.mutable_data();
const int count = in_tensor.valid_size();
hipStream_t cuda_stream = ctx.get_compute_stream();
hipLaunchKernelGGL(( transform_nchw_2_nchw)
, dim3(CUDA_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, cuda_stream,
out_data, in_data, count,
in_shape[0], in_shape[1], in_shape[2], in_shape[3],
stride_in[0], stride_in[1], stride_in[2], stride_in[3],
out_shape[0], out_shape[1], out_shape[2], out_shape[3],
stride_out[0], stride_out[1], stride_out[2], stride_out[3],
weight_scale, in_scale);
return SaberSuccess;
}
__global__
void int8nchwc4_fp32nchw(float* out_data, const char* in_data,
int valid_num, int valid_channel_4, int valid_height, int valid_width,
int in_n_stride, int in_c_stride, int in_h_stride, int in_w_stride,
int out_n_stride, int out_c_stride, int out_h_stride, int out_w_stride,
const float* scale, int count) {
float load0, load1, load2, load3;
int gid = threadIdx.x + blockIdx.x * blockDim.x;
int read_w = (gid) % valid_width;
int read_h = (gid / (in_h_stride)) % valid_height;
int read_c = (gid / (in_c_stride)) % valid_channel_4;
int read_n = (gid / (in_n_stride)) % valid_num;
int scale_index = read_c << 2;
int in_offset = read_n * in_n_stride
+ read_c * in_c_stride
+ read_h * in_h_stride
+ read_w;
int out_offset = read_n * out_n_stride
+ read_c * (out_c_stride << 2)
+ read_h * out_h_stride
+ read_w * out_w_stride;
if (gid < count) {
char4 readin = __ldg(&((const char4*)in_data)[in_offset]);
load0 = static_cast<float>(readin.x);
load1 = static_cast<float>(readin.y);
load2 = static_cast<float>(readin.z);
load3 = static_cast<float>(readin.w);
out_data[out_offset] = load0 * scale[scale_index]; out_offset += out_c_stride;
out_data[out_offset] = load1 * scale[scale_index + 1]; out_offset += out_c_stride;
out_data[out_offset] = load2 * scale[scale_index + 2]; out_offset += out_c_stride;
out_data[out_offset] = load3 * scale[scale_index + 3];
}
}
template<>
SaberStatus conv_calibrate_int8_c4_fp32<NV>(
Tensor<NV> &out_tensor,
const Tensor<NV> &in_tensor,
const float* weight_scale,
Context<NV> ctx) {
Shape out_stride = out_tensor.get_stride();
Shape in_shape = in_tensor.valid_shape();
Shape out_shape = out_tensor.valid_shape();
int count = in_shape[0] * in_shape[1] * in_shape[2] * in_shape[3];
const char * in_data = (const char*)in_tensor.data();
float * out_data = (float*)out_tensor.mutable_data();
hipStream_t cuda_stream = ctx.get_compute_stream();
hipLaunchKernelGGL(( int8nchwc4_fp32nchw), dim3(CUDA_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, cuda_stream, out_data, in_data,
in_shape[0], in_shape[1], in_shape[2], in_shape[3],
in_shape[1] * in_shape[2] * in_shape[3],
in_shape[2] * in_shape[3],
in_shape[3], 1,
out_stride[0], out_stride[1], out_stride[2], out_stride[3],
weight_scale, count);
return SaberSuccess;
}
#define JUDGESIGN(x) (((x) >= 0) ? +1 : -1)
__global__
void calibrate_float2char_col(signed char* dst, const float* src,
float * scale, int height, int width) {
int gid = threadIdx.x + blockIdx.x * blockDim.x;
float col_max = 0.0f;
const float *data = src + gid;
for(int idx = 0; idx < height; ++idx){
if (gid < width) {
float temp = fabsf(data[idx * width]);
col_max = (col_max >= temp)? col_max : temp;
}
}
signed char* target = dst + gid;
float col_scale = (float)((1 << 7) - 1) / col_max;
for(int idx = 0; idx < height; ++idx) {
if(gid < width) {
float temp = data[idx * width];
if(temp >= col_max - FLT_EPSILON) {
target[idx * width] = (signed char)((1 << 7) - 1);
} else if(temp <= -col_max + FLT_EPSILON) {
target[idx * width] = (signed char)(-(1 << 7));
} else {
target[idx * width] = (signed char)(temp * col_scale + JUDGESIGN(temp) * 0.5);
}
}
}
scale[gid] = 1.f / col_scale;
}
__global__
void calibrate_float2char_row(signed char* dst, const float* src,
float * scale, int height, int width) {
int gid = threadIdx.x + blockIdx.x * blockDim.x;
float row_max = 0.0f;
const float * data = src + width * gid;
for(int idx = 0; idx < width; ++idx) {
if(gid < height){
float temp = fabsf(data[idx]);
row_max = (row_max >= temp) ? row_max : temp;
}
}
signed char * target = dst + width * gid;
float row_scale = (float)((1 << 7) - 1) / row_max;
for(int idx = 0; idx < width; ++idx) {
if(gid < height) {
float temp = data[idx];
if(temp >= row_max - FLT_EPSILON) {
target[idx] = (signed char)((1 << 7) - 1);
} else if(temp <= -row_max + FLT_EPSILON) {
target[idx] = (signed char)(-(1 << 7));
} else {
target[idx] = (signed char)(temp * row_scale + JUDGESIGN(temp) * 0.5);
}
}
}
scale[gid] = 1.f / row_scale;
}
__global__ void calibrate_fix2float(float * dst,
const float* sA, const float* sB,
float alpha, float beta, int height,
int width, int threads) {
int ri = blockIdx.x;
int tid = threadIdx.x;
int loop = (width / threads) + ((width % threads == 0) ? 0 : 1);
float rscale = (sA[ri] == 0.0f) ? 1.0f : sA[ri];
float * data = dst + width * ri;
int idx = 0;
for (int i = 0; i < loop; ++i) {
if(idx + tid < width){
float temp = data[idx + tid];
float cscale = (sB[idx + tid] == 0.0f) ? 255.0f : sB[idx + tid];
data[idx + tid] = beta * temp + alpha * temp * rscale * cscale;
}
idx += threads;
}
}
template <>
void float2char<NV>(bool col_direct, signed char* dst, const float* src,
float *scale, int height, int width, Context<NV> ctx) {
int threads = 32;
hipStream_t cuda_stream = ctx.get_compute_stream();
if (col_direct) {
hipLaunchKernelGGL(( calibrate_float2char_col) , dim3((width / threads) + (((width % threads) == 0) ? 0 : 1)), dim3(threads), 0,
cuda_stream ,
dst, src, scale, height, width);
} else {
hipLaunchKernelGGL(( calibrate_float2char_row), dim3((height / threads) + (((height % threads)==0) ? 0 : 1)), dim3(threads), 0, cuda_stream,
dst, src, scale, height, width);
}
}
template <>
void fix2float<NV>(float * dst,
const float *sA, const float *sB,
const float alpha, const float beta, int height, int width, Context<NV> ctx) {
int threads = 256;
hipStream_t cuda_stream = ctx.get_compute_stream();
hipLaunchKernelGGL(( calibrate_fix2float), dim3(height), dim3(threads), 0, cuda_stream, dst, sA, sB, alpha, beta,
height, width, threads);
}
}
} | 5f59daecd48aa028bded9f12ae53e11083c2e171.cu |
#include "saber/core/common.h"
#include "saber/core/tensor.h"
#include "saber/funcs/calibrate.h"
#include <cfloat>
namespace anakin {
namespace saber {
__global__
void transform_nchw_2_c4(char* out_data, const float* in_data,
int valid_num, int valid_channel_4, int valid_height, int valid_width,
int in_n_stride, int in_c_stride, int in_h_stride, int in_w_stride,
int out_n_stride, int out_c_stride, int out_h_stride, int out_w_stride,
float scale,
int count) {
int load0, load1, load2, load3;
int gid = threadIdx.x + blockIdx.x * blockDim.x;
int write_w = (gid) % valid_width;
int write_h = (gid / (out_h_stride)) % valid_height;
int write_c = (gid / (out_c_stride)) % valid_channel_4;
int write_n = (gid / (out_n_stride)) % valid_num;
int in_offset = write_n * in_n_stride
+ write_c * in_c_stride * 4
+ write_h * in_h_stride
+ write_w * in_w_stride;
int out_offset = write_n * out_n_stride
+ write_c * out_c_stride
+ write_h * out_h_stride
+ write_w;
if (gid < count) {
char4 write;
load0 = __float2int_rn(__ldg(&in_data[in_offset]) * scale);
write.x = static_cast<char>(load0);
in_offset += in_c_stride;
load1 = __float2int_rn(__ldg(&in_data[in_offset]) * scale);
write.y = static_cast<char>(load1);
in_offset += in_c_stride;
load2 = __float2int_rn(__ldg(&in_data[in_offset]) * scale);
write.z = static_cast<char>(load2);
in_offset += in_c_stride;
load3 = __float2int_rn(__ldg(&in_data[in_offset]) * scale);
write.w = static_cast<char>(load3);
((char4*)out_data)[out_offset] = write;
}
}
template<>
SaberStatus conv_calibrate_fp32_int8_c4<NV>(Tensor<NV> &out_tensor,
const Tensor<NV> &in_tensor, const float in_scale, Context<NV> ctx) {
const float * in_data = (const float*)in_tensor.data();
char * out_data = (char*)out_tensor.mutable_data();
Shape in_stride = in_tensor.get_stride();
Shape in_shape = in_tensor.valid_shape();
Shape out_shape = out_tensor.valid_shape();
int count = out_shape[0] * out_shape[1] * out_shape[2] * out_shape[3];
cudaStream_t cuda_stream = ctx.get_compute_stream();
transform_nchw_2_c4<<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS,
0, cuda_stream>>>(out_data, in_data,
out_shape[0], out_shape[1], out_shape[2], out_shape[3],
in_stride[0], in_stride[1], in_stride[2], in_stride[3],
out_shape[1] * out_shape[2] * out_shape[3],
out_shape[2] * out_shape[3], out_shape[3], 1,
(1.f / in_scale), count);
return SaberSuccess;
}
__global__ void transform_nchw_2_nchw(float * out_data,
const float* in_data, const int count,
int in_n, int in_c, int in_h, int in_w,
int in_n_stride, int in_c_stride, int in_h_stride, int in_w_stride,
int out_n, int out_c, int out_h, int out_w,
int out_n_stride, int out_c_stride, int out_h_stride, int out_w_stride,
const float *scale, const float input_scale) {
CUDA_KERNEL_LOOP(tid, count){
int read_w = tid % in_w;
int read_h = (tid / (in_w)) % in_h;
int read_c = (tid / (in_h * in_w)) % in_c;
int read_n = (tid / (in_c * in_h * in_w)) % in_n;
int write_w = tid % out_w;
int write_h = (tid / (out_w)) % out_h;
int write_c = (tid / (out_h * out_w)) % out_c;
int write_n = (tid / (out_c * out_h * out_w)) % out_n;
int in_idx = read_n * in_n_stride
+ read_c * in_c_stride
+ read_h * in_h_stride
+ read_w * in_w_stride;
int out_idx = write_n * out_n_stride
+ write_c * out_c_stride
+ write_h * out_h_stride
+ write_w * out_w_stride;
float in_var = in_data[in_idx];
float in_scale = scale[read_c];
out_data[out_idx] = in_var * in_scale * input_scale;
}
}
template<>
SaberStatus conv_calibrate_int32_fp32<NV>(
Tensor<NV> &out_tensor, const Tensor<NV> &in_tensor,
const float in_scale, const float* weight_scale, Context<NV> ctx) {
Shape in_shape = in_tensor.valid_shape();
Shape out_shape = out_tensor.valid_shape();
Shape stride_in = in_tensor.get_stride();
Shape stride_out = out_tensor.get_stride();
const float *in_data = (const float*)in_tensor.data();
float *out_data = (float*)out_tensor.mutable_data();
const int count = in_tensor.valid_size();
cudaStream_t cuda_stream = ctx.get_compute_stream();
transform_nchw_2_nchw
<<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, cuda_stream>>>(
out_data, in_data, count,
in_shape[0], in_shape[1], in_shape[2], in_shape[3],
stride_in[0], stride_in[1], stride_in[2], stride_in[3],
out_shape[0], out_shape[1], out_shape[2], out_shape[3],
stride_out[0], stride_out[1], stride_out[2], stride_out[3],
weight_scale, in_scale);
return SaberSuccess;
}
__global__
void int8nchwc4_fp32nchw(float* out_data, const char* in_data,
int valid_num, int valid_channel_4, int valid_height, int valid_width,
int in_n_stride, int in_c_stride, int in_h_stride, int in_w_stride,
int out_n_stride, int out_c_stride, int out_h_stride, int out_w_stride,
const float* scale, int count) {
float load0, load1, load2, load3;
int gid = threadIdx.x + blockIdx.x * blockDim.x;
int read_w = (gid) % valid_width;
int read_h = (gid / (in_h_stride)) % valid_height;
int read_c = (gid / (in_c_stride)) % valid_channel_4;
int read_n = (gid / (in_n_stride)) % valid_num;
int scale_index = read_c << 2;
int in_offset = read_n * in_n_stride
+ read_c * in_c_stride
+ read_h * in_h_stride
+ read_w;
int out_offset = read_n * out_n_stride
+ read_c * (out_c_stride << 2)
+ read_h * out_h_stride
+ read_w * out_w_stride;
if (gid < count) {
char4 readin = __ldg(&((const char4*)in_data)[in_offset]);
load0 = static_cast<float>(readin.x);
load1 = static_cast<float>(readin.y);
load2 = static_cast<float>(readin.z);
load3 = static_cast<float>(readin.w);
out_data[out_offset] = load0 * scale[scale_index]; out_offset += out_c_stride;
out_data[out_offset] = load1 * scale[scale_index + 1]; out_offset += out_c_stride;
out_data[out_offset] = load2 * scale[scale_index + 2]; out_offset += out_c_stride;
out_data[out_offset] = load3 * scale[scale_index + 3];
}
}
template<>
SaberStatus conv_calibrate_int8_c4_fp32<NV>(
Tensor<NV> &out_tensor,
const Tensor<NV> &in_tensor,
const float* weight_scale,
Context<NV> ctx) {
Shape out_stride = out_tensor.get_stride();
Shape in_shape = in_tensor.valid_shape();
Shape out_shape = out_tensor.valid_shape();
int count = in_shape[0] * in_shape[1] * in_shape[2] * in_shape[3];
const char * in_data = (const char*)in_tensor.data();
float * out_data = (float*)out_tensor.mutable_data();
cudaStream_t cuda_stream = ctx.get_compute_stream();
int8nchwc4_fp32nchw<<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, cuda_stream>>>(out_data, in_data,
in_shape[0], in_shape[1], in_shape[2], in_shape[3],
in_shape[1] * in_shape[2] * in_shape[3],
in_shape[2] * in_shape[3],
in_shape[3], 1,
out_stride[0], out_stride[1], out_stride[2], out_stride[3],
weight_scale, count);
return SaberSuccess;
}
#define JUDGESIGN(x) (((x) >= 0) ? +1 : -1)
__global__
void calibrate_float2char_col(signed char* dst, const float* src,
float * scale, int height, int width) {
int gid = threadIdx.x + blockIdx.x * blockDim.x;
float col_max = 0.0f;
const float *data = src + gid;
for(int idx = 0; idx < height; ++idx){
if (gid < width) {
float temp = fabsf(data[idx * width]);
col_max = (col_max >= temp)? col_max : temp;
}
}
signed char* target = dst + gid;
float col_scale = (float)((1 << 7) - 1) / col_max;
for(int idx = 0; idx < height; ++idx) {
if(gid < width) {
float temp = data[idx * width];
if(temp >= col_max - FLT_EPSILON) {
target[idx * width] = (signed char)((1 << 7) - 1);
} else if(temp <= -col_max + FLT_EPSILON) {
target[idx * width] = (signed char)(-(1 << 7));
} else {
target[idx * width] = (signed char)(temp * col_scale + JUDGESIGN(temp) * 0.5);
}
}
}
scale[gid] = 1.f / col_scale;
}
__global__
void calibrate_float2char_row(signed char* dst, const float* src,
float * scale, int height, int width) {
int gid = threadIdx.x + blockIdx.x * blockDim.x;
float row_max = 0.0f;
const float * data = src + width * gid;
for(int idx = 0; idx < width; ++idx) {
if(gid < height){
float temp = fabsf(data[idx]);
row_max = (row_max >= temp) ? row_max : temp;
}
}
signed char * target = dst + width * gid;
float row_scale = (float)((1 << 7) - 1) / row_max;
for(int idx = 0; idx < width; ++idx) {
if(gid < height) {
float temp = data[idx];
if(temp >= row_max - FLT_EPSILON) {
target[idx] = (signed char)((1 << 7) - 1);
} else if(temp <= -row_max + FLT_EPSILON) {
target[idx] = (signed char)(-(1 << 7));
} else {
target[idx] = (signed char)(temp * row_scale + JUDGESIGN(temp) * 0.5);
}
}
}
scale[gid] = 1.f / row_scale;
}
__global__ void calibrate_fix2float(float * dst,
const float* sA, const float* sB,
float alpha, float beta, int height,
int width, int threads) {
int ri = blockIdx.x;
int tid = threadIdx.x;
int loop = (width / threads) + ((width % threads == 0) ? 0 : 1);
float rscale = (sA[ri] == 0.0f) ? 1.0f : sA[ri];
float * data = dst + width * ri;
int idx = 0;
for (int i = 0; i < loop; ++i) {
if(idx + tid < width){
float temp = data[idx + tid];
float cscale = (sB[idx + tid] == 0.0f) ? 255.0f : sB[idx + tid];
data[idx + tid] = beta * temp + alpha * temp * rscale * cscale;
}
idx += threads;
}
}
template <>
void float2char<NV>(bool col_direct, signed char* dst, const float* src,
float *scale, int height, int width, Context<NV> ctx) {
int threads = 32;
cudaStream_t cuda_stream = ctx.get_compute_stream();
if (col_direct) {
calibrate_float2char_col <<< (width / threads) + (((width % threads) == 0) ? 0 : 1), threads, 0,
cuda_stream >>> (
dst, src, scale, height, width);
} else {
calibrate_float2char_row<<<(height / threads) + (((height % threads)==0) ? 0 : 1), threads, 0, cuda_stream>>>(
dst, src, scale, height, width);
}
}
template <>
void fix2float<NV>(float * dst,
const float *sA, const float *sB,
const float alpha, const float beta, int height, int width, Context<NV> ctx) {
int threads = 256;
cudaStream_t cuda_stream = ctx.get_compute_stream();
calibrate_fix2float<<<height, threads, 0, cuda_stream>>>(dst, sA, sB, alpha, beta,
height, width, threads);
}
}
} |
e68a632fdf4fad4c6520b4e44657cd0f09e00659.hip | // !!! This is a file automatically generated by hipify!!!
#include "helper/common.h"
#include "visitors.h"
#include "engine.h"
void MetaVisitor::visit(FCNode& n) {
weight_sz = n.functor.size_parameters();
map_dim = n.functor.out_dim();
workspace = 0;
}
void MetaVisitor::visit(ActivationNode& n) {
weight_sz = 0;
map_dim = n.functor.out_dim();
workspace = 0;
}
void MetaVisitor::visit(PlaceHolderNode& n) {
weight_sz = 0;
map_dim = n.dim;
workspace = 0;
}
void MetaVisitor::visit(AddNode& n) {
weight_sz = 0;
map_dim = n.dim;
workspace = 0;
}
void MetaVisitor::visit(BatchNormNode& n) {
weight_sz = n.functor.weight_size();
map_dim = n.functor.out_dim();
workspace = 0;
}
void MetaVisitor::visit(PoolingNode& n) {
weight_sz = 0;
map_dim = n.functor.dims_out();
workspace = 0;
}
void MetaVisitor::visit(ConvolutionNode& n) {
weight_sz = n.functor.get_weight_size();
map_dim = n.functor.dims_out();
workspace = n.functor.get_workspace_size();
}
void ForwardVisitor::visit(FCNode& n) {
auto& mm = eng.get_mm();
auto& opt = eng.get_opt();
auto out = mm.get_feature_write(n.out_id);
auto in = mm.get_feature(n.in_id);
auto weight = opt.get_weight(n.out_id);
n.functor.forward(out, in, weight);
}
void ForwardVisitor::visit(ActivationNode& n) {
auto& mm = eng.get_mm();
auto out = mm.get_feature_write(n.out_id);
auto in = mm.get_feature(n.in_id);
n.functor.forward(out, in);
}
void ForwardVisitor::visit(PlaceHolderNode& n) {
//
assert(n.node_id == 0);
auto dev_p = eng.get_mm().get_feature_write(n.node_id);
hipMemcpy(dev_p, input_, n.size * sizeof(T), hipMemcpyDefault);
}
void ForwardVisitor::visit(AddNode& n) {
// n
auto& mm = eng.get_mm();
auto out = mm.get_feature_write(n.out_id);
auto a = mm.get_feature(n.a_id);
auto b = mm.get_feature(n.b_id);
thrust::transform(thrust::device, a, a + n.size, b, out, thrust::plus<float>());
}
void ForwardVisitor::visit(BatchNormNode& n) {
// assert(false);
auto& mm = eng.get_mm();
auto& opt = eng.get_opt();
auto weight = opt.get_weight(n.out_id);
auto in = mm.get_feature(n.in_id);
auto out = mm.get_feature_write(n.out_id);
n.functor.forward(out, in, weight);
}
void ForwardVisitor::visit(ConvolutionNode& n) {
// assert(false);
auto& mm = eng.get_mm();
auto& opt = eng.get_opt();
auto weight = opt.get_weight(n.out_id);
auto in = mm.get_feature(n.in_id);
auto out = mm.get_feature_write(n.out_id);
n.functor.forward(out, in, weight);
}
void ForwardVisitor::visit(PoolingNode& n) {
// assert(false);
auto& mm = eng.get_mm();
auto& opt = eng.get_opt();
auto in = mm.get_feature(n.in_id);
auto out = mm.get_feature_write(n.out_id);
n.functor.forward(out, in);
}
void FakeVisitor::visit(FCNode& n) {}
void FakeVisitor::visit(ActivationNode& n) {}
void FakeVisitor::visit(PlaceHolderNode& n) {}
void FakeVisitor::visit(AddNode& n) {}
void FakeVisitor::visit(BatchNormNode& n) {}
void FakeVisitor::visit(ConvolutionNode& n) {}
void FakeVisitor::visit(PoolingNode& n) {}
void BackwardVisitor::visit(FCNode& n) {
auto& mm = eng.get_mm();
auto& opt = eng.get_opt();
auto out = mm.get_feature(n.out_id);
auto in = mm.get_feature(n.in_id);
auto out_grad = mm.get_gradient_final(n.out_id);
auto in_grad = mm.get_gradient(n.in_id);
auto weight = opt.get_weight(n.out_id);
auto weight_grad = opt.get_weight_grad(n.out_id);
n.functor.backward(in_grad, weight_grad, in, out_grad, weight);
}
void BackwardVisitor::visit(ActivationNode& n) {
auto& mm = eng.get_mm();
auto out = mm.get_feature(n.out_id);
auto in = mm.get_feature(n.in_id);
auto out_grad = mm.get_gradient_final(n.out_id);
auto in_grad = mm.get_gradient(n.in_id);
n.functor.backward(in_grad, out_grad, in, out);
}
void BackwardVisitor::visit(PlaceHolderNode& n) {
auto& mm = eng.get_mm();
auto x = mm.get_gradient_final(n.node_id);
return;
}
void BackwardVisitor::visit(AddNode& n) {
auto& mm = eng.get_mm();
auto a_g = mm.get_gradient(n.a_id);
auto b_g = mm.get_gradient(n.b_id);
auto out_grad_ = mm.get_gradient_final(n.out_id);
auto out_grad = static_cast<const float*>(out_grad_);
thrust::transform(
thrust::device, a_g, a_g + n.size, out_grad, a_g, thrust::plus<double>());
thrust::transform(
thrust::device, b_g, b_g + n.size, out_grad, b_g, thrust::plus<double>());
}
void BackwardVisitor::visit(BatchNormNode& n) {
auto& mm = eng.get_mm();
auto& opt = eng.get_opt();
auto out = mm.get_feature(n.out_id);
auto in = mm.get_feature(n.in_id);
auto out_grad = mm.get_gradient_final(n.out_id);
auto in_grad = mm.get_gradient(n.in_id);
auto weight = opt.get_weight(n.out_id);
auto weight_grad = opt.get_weight_grad(n.out_id);
n.functor.backward(in_grad, weight_grad, in, out_grad, weight);
}
void BackwardVisitor::visit(ConvolutionNode& n) {
auto& mm = eng.get_mm();
auto& opt = eng.get_opt();
auto out = mm.get_feature(n.out_id);
auto in = mm.get_feature(n.in_id);
auto out_grad = mm.get_gradient_final(n.out_id);
auto in_grad = mm.get_gradient(n.in_id);
auto weight = opt.get_weight(n.out_id);
auto weight_grad = opt.get_weight_grad(n.out_id);
n.functor.backward(in_grad, weight_grad, in, out_grad, weight);
}
void BackwardVisitor::visit(PoolingNode& n) {
auto& mm = eng.get_mm();
auto& opt = eng.get_opt();
auto out = mm.get_feature(n.out_id);
auto in = mm.get_feature(n.in_id);
auto out_grad = mm.get_gradient_final(n.out_id);
auto in_grad = mm.get_gradient(n.in_id);
n.functor.backward(in_grad, in, out_grad, out);
}
| e68a632fdf4fad4c6520b4e44657cd0f09e00659.cu | #include "helper/common.h"
#include "visitors.h"
#include "engine.h"
void MetaVisitor::visit(FCNode& n) {
weight_sz = n.functor.size_parameters();
map_dim = n.functor.out_dim();
workspace = 0;
}
void MetaVisitor::visit(ActivationNode& n) {
weight_sz = 0;
map_dim = n.functor.out_dim();
workspace = 0;
}
void MetaVisitor::visit(PlaceHolderNode& n) {
weight_sz = 0;
map_dim = n.dim;
workspace = 0;
}
void MetaVisitor::visit(AddNode& n) {
weight_sz = 0;
map_dim = n.dim;
workspace = 0;
}
void MetaVisitor::visit(BatchNormNode& n) {
weight_sz = n.functor.weight_size();
map_dim = n.functor.out_dim();
workspace = 0;
}
void MetaVisitor::visit(PoolingNode& n) {
weight_sz = 0;
map_dim = n.functor.dims_out();
workspace = 0;
}
void MetaVisitor::visit(ConvolutionNode& n) {
weight_sz = n.functor.get_weight_size();
map_dim = n.functor.dims_out();
workspace = n.functor.get_workspace_size();
}
void ForwardVisitor::visit(FCNode& n) {
auto& mm = eng.get_mm();
auto& opt = eng.get_opt();
auto out = mm.get_feature_write(n.out_id);
auto in = mm.get_feature(n.in_id);
auto weight = opt.get_weight(n.out_id);
n.functor.forward(out, in, weight);
}
void ForwardVisitor::visit(ActivationNode& n) {
auto& mm = eng.get_mm();
auto out = mm.get_feature_write(n.out_id);
auto in = mm.get_feature(n.in_id);
n.functor.forward(out, in);
}
void ForwardVisitor::visit(PlaceHolderNode& n) {
//
assert(n.node_id == 0);
auto dev_p = eng.get_mm().get_feature_write(n.node_id);
cudaMemcpy(dev_p, input_, n.size * sizeof(T), cudaMemcpyDefault);
}
void ForwardVisitor::visit(AddNode& n) {
// n
auto& mm = eng.get_mm();
auto out = mm.get_feature_write(n.out_id);
auto a = mm.get_feature(n.a_id);
auto b = mm.get_feature(n.b_id);
thrust::transform(thrust::device, a, a + n.size, b, out, thrust::plus<float>());
}
void ForwardVisitor::visit(BatchNormNode& n) {
// assert(false);
auto& mm = eng.get_mm();
auto& opt = eng.get_opt();
auto weight = opt.get_weight(n.out_id);
auto in = mm.get_feature(n.in_id);
auto out = mm.get_feature_write(n.out_id);
n.functor.forward(out, in, weight);
}
void ForwardVisitor::visit(ConvolutionNode& n) {
// assert(false);
auto& mm = eng.get_mm();
auto& opt = eng.get_opt();
auto weight = opt.get_weight(n.out_id);
auto in = mm.get_feature(n.in_id);
auto out = mm.get_feature_write(n.out_id);
n.functor.forward(out, in, weight);
}
void ForwardVisitor::visit(PoolingNode& n) {
// assert(false);
auto& mm = eng.get_mm();
auto& opt = eng.get_opt();
auto in = mm.get_feature(n.in_id);
auto out = mm.get_feature_write(n.out_id);
n.functor.forward(out, in);
}
void FakeVisitor::visit(FCNode& n) {}
void FakeVisitor::visit(ActivationNode& n) {}
void FakeVisitor::visit(PlaceHolderNode& n) {}
void FakeVisitor::visit(AddNode& n) {}
void FakeVisitor::visit(BatchNormNode& n) {}
void FakeVisitor::visit(ConvolutionNode& n) {}
void FakeVisitor::visit(PoolingNode& n) {}
void BackwardVisitor::visit(FCNode& n) {
auto& mm = eng.get_mm();
auto& opt = eng.get_opt();
auto out = mm.get_feature(n.out_id);
auto in = mm.get_feature(n.in_id);
auto out_grad = mm.get_gradient_final(n.out_id);
auto in_grad = mm.get_gradient(n.in_id);
auto weight = opt.get_weight(n.out_id);
auto weight_grad = opt.get_weight_grad(n.out_id);
n.functor.backward(in_grad, weight_grad, in, out_grad, weight);
}
void BackwardVisitor::visit(ActivationNode& n) {
auto& mm = eng.get_mm();
auto out = mm.get_feature(n.out_id);
auto in = mm.get_feature(n.in_id);
auto out_grad = mm.get_gradient_final(n.out_id);
auto in_grad = mm.get_gradient(n.in_id);
n.functor.backward(in_grad, out_grad, in, out);
}
void BackwardVisitor::visit(PlaceHolderNode& n) {
auto& mm = eng.get_mm();
auto x = mm.get_gradient_final(n.node_id);
return;
}
void BackwardVisitor::visit(AddNode& n) {
auto& mm = eng.get_mm();
auto a_g = mm.get_gradient(n.a_id);
auto b_g = mm.get_gradient(n.b_id);
auto out_grad_ = mm.get_gradient_final(n.out_id);
auto out_grad = static_cast<const float*>(out_grad_);
thrust::transform(
thrust::device, a_g, a_g + n.size, out_grad, a_g, thrust::plus<double>());
thrust::transform(
thrust::device, b_g, b_g + n.size, out_grad, b_g, thrust::plus<double>());
}
void BackwardVisitor::visit(BatchNormNode& n) {
auto& mm = eng.get_mm();
auto& opt = eng.get_opt();
auto out = mm.get_feature(n.out_id);
auto in = mm.get_feature(n.in_id);
auto out_grad = mm.get_gradient_final(n.out_id);
auto in_grad = mm.get_gradient(n.in_id);
auto weight = opt.get_weight(n.out_id);
auto weight_grad = opt.get_weight_grad(n.out_id);
n.functor.backward(in_grad, weight_grad, in, out_grad, weight);
}
void BackwardVisitor::visit(ConvolutionNode& n) {
auto& mm = eng.get_mm();
auto& opt = eng.get_opt();
auto out = mm.get_feature(n.out_id);
auto in = mm.get_feature(n.in_id);
auto out_grad = mm.get_gradient_final(n.out_id);
auto in_grad = mm.get_gradient(n.in_id);
auto weight = opt.get_weight(n.out_id);
auto weight_grad = opt.get_weight_grad(n.out_id);
n.functor.backward(in_grad, weight_grad, in, out_grad, weight);
}
void BackwardVisitor::visit(PoolingNode& n) {
auto& mm = eng.get_mm();
auto& opt = eng.get_opt();
auto out = mm.get_feature(n.out_id);
auto in = mm.get_feature(n.in_id);
auto out_grad = mm.get_gradient_final(n.out_id);
auto in_grad = mm.get_gradient(n.in_id);
n.functor.backward(in_grad, in, out_grad, out);
}
|
0bc5c8d6886657d2dd41480ed1ea178c8a716e1b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// HermiteCUDA.h
//
//
// Created by Raunak Bardia on 12/10/17.
//
//
#ifndef _HermiteCUDA_h
#define _HermiteCUDA_h
__global__ void devicetodevicecopy(double *dphi, double *dpsix, double *dpsiy, double *mphi, double *mpsix, double *mpsiy,
unsigned int nx, unsigned int TileSize)
{
unsigned int bx = blockIdx.x;
unsigned int by = blockIdx.y;
unsigned int tx = threadIdx.x;
unsigned int ty = threadIdx.y;
unsigned int index_x = bx * TileSize + tx;
unsigned int index_y = by * TileSize + ty;
unsigned int indexToWrite = index_y * nx + index_x;
mphi[indexToWrite] = dphi[indexToWrite];
mpsix[indexToWrite] = dpsix[indexToWrite];
mpsiy[indexToWrite] = dpsiy[indexToWrite];
}
__device__ double basepolynomial(double x, double y, int alphax, int alphay, int vx, int vy, double dx, double dy, double xo, double yo){
double bpx = 0.0, bpy = 0.0;
double etax = (x - xo)/dx;
double etay = (y - yo)/dy;
switch(2 * alphax + vx + 1){ // Switching between base polynomials for x
case 1:
bpx = 1 - 3 * pow(etax,2) + 2 * pow(etax,3);
break;
case 2:
bpx = -2 * pow(etax,3) + 3 * pow(etax,2);
break;
case 3:
bpx = pow(etax,3) - 2 * pow(etax,2) + etax;
break;
case 4:
bpx = pow(etax,3) - pow(etax,2);
break;
}
switch(2 * alphay + vy + 1){ // Switching between base polynomials for y
case 1:
bpy = 1 - 3 * pow(etay,2) + 2 * pow(etay,3);
break;
case 2:
bpy = -2 * pow(etay,3) + 3 * pow(etay,2);
break;
case 3:
bpy = pow(etay,3) - 2 * pow(etay,2) + etay;
break;
case 4:
bpy = pow(etay,3) - pow(etay,2);
break;
}
double result = bpx * bpy;
return result;
}
__device__ double gradbpx(double x, double y, int alphax, int alphay, int vx, int vy, double dx, double dy, double xo, double yo){
double bpx = 0.0, bpy = 0.0;
double etax = (x - xo)/dx;
double etay = (y - yo)/dy;
switch(2 * alphax + vx + 1){ // Switching between base polynomials for x
case 1:
bpx = - 6 * etax + 6 * pow(etax,2);
break;
case 2:
bpx = -6 * pow(etax,2) + 6 * etax;
break;
case 3:
bpx = 3 * pow(etax,2) - 4 * etax + 1;
break;
case 4:
bpx = 3 * pow(etax,2) - 2 * etax;
break;
}
switch(2 * alphay + vy + 1){ // Switching between base polynomials for y
case 1:
bpy = 1 - 3 * pow(etay,2) + 2 * pow(etay,3);
break;
case 2:
bpy = -2 * pow(etay,3) + 3 * pow(etay,2);
break;
case 3:
bpy = pow(etay,3) - 2 * pow(etay,2) + etay;
break;
case 4:
bpy = pow(etay,3) - pow(etay,2);
break;
}
double result = bpx * bpy;
return result;
}
__device__ double gradbpy(double x, double y, int alphax, int alphay, int vx, int vy, double dx, double dy, double xo, double yo){
double bpx = 0.0, bpy = 0.0;
double etax = (x - xo)/dx;
double etay = (y - yo)/dy;
switch(2 * alphax + vx + 1){ // Switching between base polynomials for x
case 1:
bpx = 1 - 3 * pow(etax,2) + 2 * pow(etax,3);
break;
case 2:
bpx = -2 * pow(etax,3) + 3 * pow(etax,2);
break;
case 3:
bpx = pow(etax,3) - 2 * pow(etax,2) + etax;
break;
case 4:
bpx = pow(etax,3) - pow(etax,2);
break;
}
switch(2 * alphay + vy + 1){ // Switching between base polynomials for y
case 1:
bpy = - 6 * etay + 6 * pow(etay,2);
break;
case 2:
bpy = -6 * pow(etay,2) + 6 * etay;
break;
case 3:
bpy = 3 * pow(etay,2) - 4 * etay + 1;
break;
case 4:
bpy = 3 * pow(etay,2) - 2 * etay;
break;
}
double result = bpx * bpy;
return result;
}
__device__ double hp(double phi[4], double psix[4], double psiy[4], double psixy[4], double x, double y, double xo, double yo, double dx, double dy){
double H = 0, delta[4], d;
int alphax, alphay, vx, vy;
//double bp; //Base Polynomial Section
for(int i = 0; i < 4; i++){
switch (i){
case 0:{
alphax = 0;
alphay = 0;
memcpy(delta, phi, sizeof(delta));
break;
}
case 1:{
alphax = 0;
alphay = 1;
memcpy(delta, psiy, sizeof(delta));
break;
}
case 2:{
alphax = 1;
alphay = 0;
memcpy(delta, psix, sizeof(delta));
break;
}
case 3:{
alphax = 1;
alphay = 1;
memcpy(delta, psixy, sizeof(delta));
break;
}
}
for(int j = 0; j < 4; j++){
switch (j){
case 0:{
vx = 0;
vy = 0;
d = delta[0];
break;
}
case 1:{
vx = 0;
vy = 1;
d = delta[2];
break;
}
case 2:{
vx = 1;
vy = 0;
d = delta[1];
break;
}
case 3:{
vx = 1;
vy = 1;
d = delta[3];
break;
}
}
//bp = basepolynomial(x, y, alphax, alphay, vx, vy, dx, dy, xo, yo);
H = H + pow(dx, alphax) * pow(dy, alphay) * basepolynomial(x, y, alphax, alphay, vx, vy, dx, dy, xo, yo) * d;
}
}
return H;
}
__device__ double hermx(double* phi, double* psix, double* psiy, double* psixy, double xadv, double yadv, double xo, double yo, double dx, double dy){
double gradientx = 0, delta[4], d;
int alphax, alphay, vx, vy;
//double bp; //Base Polynomial Section
for(int i = 0; i < 4; i++){
switch (i){
case 0:{
alphax = 0;
alphay = 0;
memcpy(delta, phi, sizeof(delta));
break;
}
case 1:{
alphax = 0;
alphay = 1;
memcpy(delta, psiy, sizeof(delta));
break;
}
case 2:{
alphax = 1;
alphay = 0;
memcpy(delta, psix, sizeof(delta));
break;
}
case 3:{
alphax = 1;
alphay = 1;
memcpy(delta, psixy, sizeof(delta));
break;
}
}
for(int j = 0; j < 4; j++){
switch (j){
case 0:{
vx = 0;
vy = 0;
d = delta[0];
break;
}
case 1:{
vx = 0;
vy = 1;
d = delta[2];
break;
}
case 2:{
vx = 1;
vy = 0;
d = delta[1];
break;
}
case 3:{
vx = 1;
vy = 1;
d = delta[3];
break;
}
}
//bp = gradbpx(xadv, yadv, alphax, alphay, vx, vy, dx, dy, xo, yo);
gradientx = gradientx + pow(dx, alphax) * pow(dy, alphay) * gradbpx(xadv, yadv, alphax, alphay, vx, vy, dx, dy, xo, yo) * d * (1/dx);
}
}
return gradientx;
}
__device__ double hermy(double* phi, double* psix, double* psiy, double* psixy, double xadv, double yadv, double xo, double yo, double dx, double dy){
double gradienty = 0, d, delta[4];
int alphax, alphay, vx, vy;
//double bp; //Base Polynomial Section
for(int i = 0; i < 4; i++){
switch (i){
case 0:{
alphax = 0;
alphay = 0;
memcpy(delta, phi, sizeof(delta));
break;
}
case 1:{
alphax = 0;
alphay = 1;
memcpy(delta, psiy, sizeof(delta));
break;
}
case 2:{
alphax = 1;
alphay = 0;
memcpy(delta, psix, sizeof(delta));
break;
}
case 3:{
alphax = 1;
alphay = 1;
memcpy(delta, psixy, sizeof(delta));
break;
}
}
for(int j = 0; j < 4; j++){
switch (j){
case 0:{
vx = 0;
vy = 0;
d = delta[0];
break;
}
case 1:{
vx = 0;
vy = 1;
d = delta[2];
break;
}
case 2:{
vx = 1;
vy = 0;
d = delta[1];
break;
}
case 3:{
vx = 1;
vy = 1;
d = delta[3];
break;
}
}
//bp = gradbpy(xadv, yadv, alphax, alphay, vx, vy, dx, dy, xo, yo);
gradienty = gradienty + pow(dx, alphax) * pow(dy, alphay) * gradbpy(xadv, yadv, alphax, alphay, vx, vy, dx, dy, xo, yo) * d * (1/dy);
}
}
return gradienty;
}
__device__ double hp1D(double phi[2], double psix[2], double x, double xo, double dx){
double H = 0;
double etax = (x - xo)/dx;
H = (1 - 3*pow(etax,2) + 2 * pow(etax,3)) * phi[0] + (3 * pow(etax,2) - 2 * pow(etax,3)) * phi[1] + (etax + pow(etax,3) - 2 * pow(etax,2)) * psix[0] + (pow(etax,3) - pow(etax,2)) * psix[1];
return H;
}
#endif
| 0bc5c8d6886657d2dd41480ed1ea178c8a716e1b.cu | //
// HermiteCUDA.h
//
//
// Created by Raunak Bardia on 12/10/17.
//
//
#ifndef _HermiteCUDA_h
#define _HermiteCUDA_h
__global__ void devicetodevicecopy(double *dphi, double *dpsix, double *dpsiy, double *mphi, double *mpsix, double *mpsiy,
unsigned int nx, unsigned int TileSize)
{
unsigned int bx = blockIdx.x;
unsigned int by = blockIdx.y;
unsigned int tx = threadIdx.x;
unsigned int ty = threadIdx.y;
unsigned int index_x = bx * TileSize + tx;
unsigned int index_y = by * TileSize + ty;
unsigned int indexToWrite = index_y * nx + index_x;
mphi[indexToWrite] = dphi[indexToWrite];
mpsix[indexToWrite] = dpsix[indexToWrite];
mpsiy[indexToWrite] = dpsiy[indexToWrite];
}
__device__ double basepolynomial(double x, double y, int alphax, int alphay, int vx, int vy, double dx, double dy, double xo, double yo){
double bpx = 0.0, bpy = 0.0;
double etax = (x - xo)/dx;
double etay = (y - yo)/dy;
switch(2 * alphax + vx + 1){ // Switching between base polynomials for x
case 1:
bpx = 1 - 3 * pow(etax,2) + 2 * pow(etax,3);
break;
case 2:
bpx = -2 * pow(etax,3) + 3 * pow(etax,2);
break;
case 3:
bpx = pow(etax,3) - 2 * pow(etax,2) + etax;
break;
case 4:
bpx = pow(etax,3) - pow(etax,2);
break;
}
switch(2 * alphay + vy + 1){ // Switching between base polynomials for y
case 1:
bpy = 1 - 3 * pow(etay,2) + 2 * pow(etay,3);
break;
case 2:
bpy = -2 * pow(etay,3) + 3 * pow(etay,2);
break;
case 3:
bpy = pow(etay,3) - 2 * pow(etay,2) + etay;
break;
case 4:
bpy = pow(etay,3) - pow(etay,2);
break;
}
double result = bpx * bpy;
return result;
}
__device__ double gradbpx(double x, double y, int alphax, int alphay, int vx, int vy, double dx, double dy, double xo, double yo){
double bpx = 0.0, bpy = 0.0;
double etax = (x - xo)/dx;
double etay = (y - yo)/dy;
switch(2 * alphax + vx + 1){ // Switching between base polynomials for x
case 1:
bpx = - 6 * etax + 6 * pow(etax,2);
break;
case 2:
bpx = -6 * pow(etax,2) + 6 * etax;
break;
case 3:
bpx = 3 * pow(etax,2) - 4 * etax + 1;
break;
case 4:
bpx = 3 * pow(etax,2) - 2 * etax;
break;
}
switch(2 * alphay + vy + 1){ // Switching between base polynomials for y
case 1:
bpy = 1 - 3 * pow(etay,2) + 2 * pow(etay,3);
break;
case 2:
bpy = -2 * pow(etay,3) + 3 * pow(etay,2);
break;
case 3:
bpy = pow(etay,3) - 2 * pow(etay,2) + etay;
break;
case 4:
bpy = pow(etay,3) - pow(etay,2);
break;
}
double result = bpx * bpy;
return result;
}
__device__ double gradbpy(double x, double y, int alphax, int alphay, int vx, int vy, double dx, double dy, double xo, double yo){
double bpx = 0.0, bpy = 0.0;
double etax = (x - xo)/dx;
double etay = (y - yo)/dy;
switch(2 * alphax + vx + 1){ // Switching between base polynomials for x
case 1:
bpx = 1 - 3 * pow(etax,2) + 2 * pow(etax,3);
break;
case 2:
bpx = -2 * pow(etax,3) + 3 * pow(etax,2);
break;
case 3:
bpx = pow(etax,3) - 2 * pow(etax,2) + etax;
break;
case 4:
bpx = pow(etax,3) - pow(etax,2);
break;
}
switch(2 * alphay + vy + 1){ // Switching between base polynomials for y
case 1:
bpy = - 6 * etay + 6 * pow(etay,2);
break;
case 2:
bpy = -6 * pow(etay,2) + 6 * etay;
break;
case 3:
bpy = 3 * pow(etay,2) - 4 * etay + 1;
break;
case 4:
bpy = 3 * pow(etay,2) - 2 * etay;
break;
}
double result = bpx * bpy;
return result;
}
__device__ double hp(double phi[4], double psix[4], double psiy[4], double psixy[4], double x, double y, double xo, double yo, double dx, double dy){
double H = 0, delta[4], d;
int alphax, alphay, vx, vy;
//double bp; //Base Polynomial Section
for(int i = 0; i < 4; i++){
switch (i){
case 0:{
alphax = 0;
alphay = 0;
memcpy(delta, phi, sizeof(delta));
break;
}
case 1:{
alphax = 0;
alphay = 1;
memcpy(delta, psiy, sizeof(delta));
break;
}
case 2:{
alphax = 1;
alphay = 0;
memcpy(delta, psix, sizeof(delta));
break;
}
case 3:{
alphax = 1;
alphay = 1;
memcpy(delta, psixy, sizeof(delta));
break;
}
}
for(int j = 0; j < 4; j++){
switch (j){
case 0:{
vx = 0;
vy = 0;
d = delta[0];
break;
}
case 1:{
vx = 0;
vy = 1;
d = delta[2];
break;
}
case 2:{
vx = 1;
vy = 0;
d = delta[1];
break;
}
case 3:{
vx = 1;
vy = 1;
d = delta[3];
break;
}
}
//bp = basepolynomial(x, y, alphax, alphay, vx, vy, dx, dy, xo, yo);
H = H + pow(dx, alphax) * pow(dy, alphay) * basepolynomial(x, y, alphax, alphay, vx, vy, dx, dy, xo, yo) * d;
}
}
return H;
}
__device__ double hermx(double* phi, double* psix, double* psiy, double* psixy, double xadv, double yadv, double xo, double yo, double dx, double dy){
double gradientx = 0, delta[4], d;
int alphax, alphay, vx, vy;
//double bp; //Base Polynomial Section
for(int i = 0; i < 4; i++){
switch (i){
case 0:{
alphax = 0;
alphay = 0;
memcpy(delta, phi, sizeof(delta));
break;
}
case 1:{
alphax = 0;
alphay = 1;
memcpy(delta, psiy, sizeof(delta));
break;
}
case 2:{
alphax = 1;
alphay = 0;
memcpy(delta, psix, sizeof(delta));
break;
}
case 3:{
alphax = 1;
alphay = 1;
memcpy(delta, psixy, sizeof(delta));
break;
}
}
for(int j = 0; j < 4; j++){
switch (j){
case 0:{
vx = 0;
vy = 0;
d = delta[0];
break;
}
case 1:{
vx = 0;
vy = 1;
d = delta[2];
break;
}
case 2:{
vx = 1;
vy = 0;
d = delta[1];
break;
}
case 3:{
vx = 1;
vy = 1;
d = delta[3];
break;
}
}
//bp = gradbpx(xadv, yadv, alphax, alphay, vx, vy, dx, dy, xo, yo);
gradientx = gradientx + pow(dx, alphax) * pow(dy, alphay) * gradbpx(xadv, yadv, alphax, alphay, vx, vy, dx, dy, xo, yo) * d * (1/dx);
}
}
return gradientx;
}
__device__ double hermy(double* phi, double* psix, double* psiy, double* psixy, double xadv, double yadv, double xo, double yo, double dx, double dy){
double gradienty = 0, d, delta[4];
int alphax, alphay, vx, vy;
//double bp; //Base Polynomial Section
for(int i = 0; i < 4; i++){
switch (i){
case 0:{
alphax = 0;
alphay = 0;
memcpy(delta, phi, sizeof(delta));
break;
}
case 1:{
alphax = 0;
alphay = 1;
memcpy(delta, psiy, sizeof(delta));
break;
}
case 2:{
alphax = 1;
alphay = 0;
memcpy(delta, psix, sizeof(delta));
break;
}
case 3:{
alphax = 1;
alphay = 1;
memcpy(delta, psixy, sizeof(delta));
break;
}
}
for(int j = 0; j < 4; j++){
switch (j){
case 0:{
vx = 0;
vy = 0;
d = delta[0];
break;
}
case 1:{
vx = 0;
vy = 1;
d = delta[2];
break;
}
case 2:{
vx = 1;
vy = 0;
d = delta[1];
break;
}
case 3:{
vx = 1;
vy = 1;
d = delta[3];
break;
}
}
//bp = gradbpy(xadv, yadv, alphax, alphay, vx, vy, dx, dy, xo, yo);
gradienty = gradienty + pow(dx, alphax) * pow(dy, alphay) * gradbpy(xadv, yadv, alphax, alphay, vx, vy, dx, dy, xo, yo) * d * (1/dy);
}
}
return gradienty;
}
__device__ double hp1D(double phi[2], double psix[2], double x, double xo, double dx){
double H = 0;
double etax = (x - xo)/dx;
H = (1 - 3*pow(etax,2) + 2 * pow(etax,3)) * phi[0] + (3 * pow(etax,2) - 2 * pow(etax,3)) * phi[1] + (etax + pow(etax,3) - 2 * pow(etax,2)) * psix[0] + (pow(etax,3) - pow(etax,2)) * psix[1];
return H;
}
#endif
|
88b8cd5b9dda8033cbe11507b89562065cb7cfbd.hip | // !!! This is a file automatically generated by hipify!!!
/*
* CUDA for Convolutional Neural Network
*
* by Zikun Xu, Bangqi Xiao
*/
#define USE_MNIST_LOADER
#define MNIST_FLOAT
#include "mnist.h"
#include "model.h"
#include <hip/hip_runtime.h>
#include <cstdio>
#include <time.h>
mnist_data *train_set, *test_set; // pointer to dataset struct
unsigned int train_cnt, test_cnt; // sample count
void printTestFunc(float* data, int a, int b, int c){
int N = a*b*c;
float h_data[N];
for (int i=0; i<N; i++){
h_data[i] = 0;
}
hipMemcpy(h_data, data, sizeof(float) * N, hipMemcpyDeviceToHost);
for (int i=0; i<a; i++){
for(int j=0;j<b;j++){
for(int k=0; k<c; k++){
printf("%f ", h_data[i*b*c+j*c+k]);
}
printf("\n");
}
printf("\n");
}
printf("\n");
}
void load_minist()
{
printf("Loading data .. \n");
int ret = mnist_load("../../data/train-images.idx3-ubyte", "../../data/train-labels.idx1-ubyte",
&train_set, &train_cnt);
mnist_load("../../data/t10k-images.idx3-ubyte", "../../data/t10k-labels.idx1-ubyte",
&test_set, &test_cnt);
printf("Successfully loaded %d training samples and %d testing samples\n", train_cnt, test_cnt);
}
int main(int argc, const char **argv)
{
// CUDA initialisation
hipError_t err_code = hipInit(0);
if (hipSuccess != err_code)
{
fprintf(stderr, "CUDA initialisation failed with error code - %d\n", err_code);
return 1;
}
// loading dataset
load_minist();
// Create model instance
printf("\n==========================================\n");
printf("Creating CNN model...\n");
Model cnn = Model(0.01);
printf("==========================================\n\n");
// Start training
int EPOCH = 500;
clock_t start, end;
double timing=0;
for (int epoch = 1; epoch <= EPOCH; epoch++)
{
float loss = 0;
start = clock();
for (int i = 0; i < train_cnt; i++)
{
loss += cnn.feed(train_set[i].data, train_set[i].label, true);
}
loss /= train_cnt;
end = clock();
timing += ((double) (end - start)) / CLOCKS_PER_SEC;
printf("Epoch %d, loss=%.3f, time %.3e\n", epoch, loss, timing);
// start evaluation
int err_cnt=0;
int predResult;
for (int i =0; i<test_cnt; i++)
{
predResult = cnn.predict(test_set[i].data);
if(predResult != test_set[i].label){
// printf("Model mistake on predicting %d to be %d\n", test_set[i].label, predResult); // DEBUG
err_cnt ++;
}
}
double accuracy = (double)(test_cnt - err_cnt)/test_cnt;
printf("\nError number = %d", err_cnt);
printf("\nModel performance on test data: accuracy = %.4e\n", accuracy);
printf("-----------------------------------------------------------\n");
}
return 0;
}
| 88b8cd5b9dda8033cbe11507b89562065cb7cfbd.cu | /*
* CUDA for Convolutional Neural Network
*
* by Zikun Xu, Bangqi Xiao
*/
#define USE_MNIST_LOADER
#define MNIST_FLOAT
#include "mnist.h"
#include "model.h"
#include <cuda.h>
#include <cstdio>
#include <time.h>
mnist_data *train_set, *test_set; // pointer to dataset struct
unsigned int train_cnt, test_cnt; // sample count
void printTestFunc(float* data, int a, int b, int c){
int N = a*b*c;
float h_data[N];
for (int i=0; i<N; i++){
h_data[i] = 0;
}
cudaMemcpy(h_data, data, sizeof(float) * N, cudaMemcpyDeviceToHost);
for (int i=0; i<a; i++){
for(int j=0;j<b;j++){
for(int k=0; k<c; k++){
printf("%f ", h_data[i*b*c+j*c+k]);
}
printf("\n");
}
printf("\n");
}
printf("\n");
}
void load_minist()
{
printf("Loading data .. \n");
int ret = mnist_load("../../data/train-images.idx3-ubyte", "../../data/train-labels.idx1-ubyte",
&train_set, &train_cnt);
mnist_load("../../data/t10k-images.idx3-ubyte", "../../data/t10k-labels.idx1-ubyte",
&test_set, &test_cnt);
printf("Successfully loaded %d training samples and %d testing samples\n", train_cnt, test_cnt);
}
int main(int argc, const char **argv)
{
// CUDA initialisation
CUresult err_code = cuInit(0);
if (CUDA_SUCCESS != err_code)
{
fprintf(stderr, "CUDA initialisation failed with error code - %d\n", err_code);
return 1;
}
// loading dataset
load_minist();
// Create model instance
printf("\n==========================================\n");
printf("Creating CNN model...\n");
Model cnn = Model(0.01);
printf("==========================================\n\n");
// Start training
int EPOCH = 500;
clock_t start, end;
double timing=0;
for (int epoch = 1; epoch <= EPOCH; epoch++)
{
float loss = 0;
start = clock();
for (int i = 0; i < train_cnt; i++)
{
loss += cnn.feed(train_set[i].data, train_set[i].label, true);
}
loss /= train_cnt;
end = clock();
timing += ((double) (end - start)) / CLOCKS_PER_SEC;
printf("Epoch %d, loss=%.3f, time %.3e\n", epoch, loss, timing);
// start evaluation
int err_cnt=0;
int predResult;
for (int i =0; i<test_cnt; i++)
{
predResult = cnn.predict(test_set[i].data);
if(predResult != test_set[i].label){
// printf("Model mistake on predicting %d to be %d\n", test_set[i].label, predResult); // DEBUG
err_cnt ++;
}
}
double accuracy = (double)(test_cnt - err_cnt)/test_cnt;
printf("\nError number = %d", err_cnt);
printf("\nModel performance on test data: accuracy = %.4e\n", accuracy);
printf("-----------------------------------------------------------\n");
}
return 0;
}
|
235362c5aa32f4f3d157673ade46fd2a65780692.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void CalculateFixed( const float *background, const float *target, const float *mask, float *fixed, const int wb, const int hb, const int wt, const int ht, const int oy, const int ox )
{
const int yt = blockIdx.y * blockDim.y + threadIdx.y;
const int xt = blockIdx.x * blockDim.x + threadIdx.x;
const int curt = wt*yt+xt;
if (yt < ht and xt < wt and mask[curt] > 127.0f) {
bool nt_bnd = (yt == 0), wt_bnd = (xt == 0), st_bnd = (yt == ht-1), et_bnd = (xt == wt-1);
int North_t = (nt_bnd)? curt:(curt-wt);
int West_t = (wt_bnd)? curt:(curt-1);
int South_t = (st_bnd)? curt:(curt+wt);
int East_t = (et_bnd)? curt:(curt+1);
fixed[curt*3+0] = 4.0f*target[curt*3+0]-(target[North_t*3+0]+target[West_t*3+0]+target[South_t*3+0]+target[East_t*3+0]);
fixed[curt*3+1] = 4.0f*target[curt*3+1]-(target[North_t*3+1]+target[West_t*3+1]+target[South_t*3+1]+target[East_t*3+1]);
fixed[curt*3+2] = 4.0f*target[curt*3+2]-(target[North_t*3+2]+target[West_t*3+2]+target[South_t*3+2]+target[East_t*3+2]);
const int yb = oy+yt, xb = ox+xt;
const int curb = wb*yb+xb;
if (0 <= yb and yb < hb and 0 <= xb and xb < wb) {
bool nb_bnd = (yb == 0), wb_bnd = (xb == 0), sb_bnd = (yb == hb-1), eb_bnd = (xb == wb-1);
int North_b = (nb_bnd)? (curb):(curb-wb);
int West_b = (wb_bnd)? (curb):(curb-1);
int South_b = (sb_bnd)? (curb):(curb+wb);
int East_b = (eb_bnd)? (curb):(curb+1);
bool isMasked_n = (nt_bnd)? true:(mask[North_t] <= 127.0f);
bool isMasked_w = (wt_bnd)? true:(mask[West_t] <= 127.0f);
bool isMasked_s = (st_bnd)? true:(mask[South_t] <= 127.0f);
bool isMasked_e = (et_bnd)? true:(mask[East_t] <= 127.0f);
if(isMasked_n) {
fixed[curt*3+0] += background[North_b*3+0];
fixed[curt*3+1] += background[North_b*3+1];
fixed[curt*3+2] += background[North_b*3+2];
}
if(isMasked_w) {
fixed[curt*3+0] += background[West_b*3+0];
fixed[curt*3+1] += background[West_b*3+1];
fixed[curt*3+2] += background[West_b*3+2];
}
if(isMasked_s) {
fixed[curt*3+0] += background[South_b*3+0];
fixed[curt*3+1] += background[South_b*3+1];
fixed[curt*3+2] += background[South_b*3+2];
}
if(isMasked_e) {
fixed[curt*3+0] += background[East_b*3+0];
fixed[curt*3+1] += background[East_b*3+1];
fixed[curt*3+2] += background[East_b*3+2];
}
}
}
} | 235362c5aa32f4f3d157673ade46fd2a65780692.cu | #include "includes.h"
__global__ void CalculateFixed( const float *background, const float *target, const float *mask, float *fixed, const int wb, const int hb, const int wt, const int ht, const int oy, const int ox )
{
const int yt = blockIdx.y * blockDim.y + threadIdx.y;
const int xt = blockIdx.x * blockDim.x + threadIdx.x;
const int curt = wt*yt+xt;
if (yt < ht and xt < wt and mask[curt] > 127.0f) {
bool nt_bnd = (yt == 0), wt_bnd = (xt == 0), st_bnd = (yt == ht-1), et_bnd = (xt == wt-1);
int North_t = (nt_bnd)? curt:(curt-wt);
int West_t = (wt_bnd)? curt:(curt-1);
int South_t = (st_bnd)? curt:(curt+wt);
int East_t = (et_bnd)? curt:(curt+1);
fixed[curt*3+0] = 4.0f*target[curt*3+0]-(target[North_t*3+0]+target[West_t*3+0]+target[South_t*3+0]+target[East_t*3+0]);
fixed[curt*3+1] = 4.0f*target[curt*3+1]-(target[North_t*3+1]+target[West_t*3+1]+target[South_t*3+1]+target[East_t*3+1]);
fixed[curt*3+2] = 4.0f*target[curt*3+2]-(target[North_t*3+2]+target[West_t*3+2]+target[South_t*3+2]+target[East_t*3+2]);
const int yb = oy+yt, xb = ox+xt;
const int curb = wb*yb+xb;
if (0 <= yb and yb < hb and 0 <= xb and xb < wb) {
bool nb_bnd = (yb == 0), wb_bnd = (xb == 0), sb_bnd = (yb == hb-1), eb_bnd = (xb == wb-1);
int North_b = (nb_bnd)? (curb):(curb-wb);
int West_b = (wb_bnd)? (curb):(curb-1);
int South_b = (sb_bnd)? (curb):(curb+wb);
int East_b = (eb_bnd)? (curb):(curb+1);
bool isMasked_n = (nt_bnd)? true:(mask[North_t] <= 127.0f);
bool isMasked_w = (wt_bnd)? true:(mask[West_t] <= 127.0f);
bool isMasked_s = (st_bnd)? true:(mask[South_t] <= 127.0f);
bool isMasked_e = (et_bnd)? true:(mask[East_t] <= 127.0f);
if(isMasked_n) {
fixed[curt*3+0] += background[North_b*3+0];
fixed[curt*3+1] += background[North_b*3+1];
fixed[curt*3+2] += background[North_b*3+2];
}
if(isMasked_w) {
fixed[curt*3+0] += background[West_b*3+0];
fixed[curt*3+1] += background[West_b*3+1];
fixed[curt*3+2] += background[West_b*3+2];
}
if(isMasked_s) {
fixed[curt*3+0] += background[South_b*3+0];
fixed[curt*3+1] += background[South_b*3+1];
fixed[curt*3+2] += background[South_b*3+2];
}
if(isMasked_e) {
fixed[curt*3+0] += background[East_b*3+0];
fixed[curt*3+1] += background[East_b*3+1];
fixed[curt*3+2] += background[East_b*3+2];
}
}
}
} |
90fd2a8fd332d13d3201b4c4cecf5a1b20c0cfa4.hip | // !!! This is a file automatically generated by hipify!!!
#include"solvers.h"
void GPU_ENTRY(init, SIMENGINE_STORAGE){
// FIXME Add more checking of capabilities and devices available!
hipSetDevice(cutGetMaxGflopsDeviceId());
}
void GPU_ENTRY(exit, SIMENGINE_STORAGE){
hipDeviceReset();
}
// Takes a solver_props pointer on the CPU and returns a pointer to a mirrored structure on the GPU
solver_props *GPU_ENTRY(init_props, SIMENGINE_STORAGE, solver_props *props){
// Local temp
solver_props tprops;
// GPU datastructures
solver_props *dprops;
// Copy the properties to local temporary
memcpy(&tprops, props, sizeof(solver_props));
// Allocate GPU space for props and all pointer fields of props
cutilSafeCall(hipMalloc((void**)&dprops, sizeof(solver_props)));
cutilSafeCall(hipMalloc((void**)&tprops.time, props->num_models*sizeof(CDATAFORMAT)));
if(props->statesize){
cutilSafeCall(hipMalloc((void**)&tprops.model_states, props->num_models*props->statesize*sizeof(CDATAFORMAT)));
}
else{
tprops.model_states = NULL;
}
if(tprops.inputsize){
cutilSafeCall(hipMalloc((void**)&tprops.inputs, props->num_models*props->inputsize*sizeof(CDATAFORMAT)));
}
else{
tprops.inputs = NULL;
}
cutilSafeCall(hipMalloc((void**)&tprops.ob, props->ob_size));
if(props->outputsize){
cutilSafeCall(hipMalloc((void**)&tprops.outputs, props->num_models*props->outputsize*sizeof(CDATAFORMAT)));
}
else{
tprops.outputs = NULL;
}
cutilSafeCall(hipMalloc((void**)&tprops.running, props->num_models*sizeof(CDATAFORMAT)));
// Copy props to GPU
cutilSafeCall(hipMemcpy(dprops, &tprops, sizeof(solver_props), hipMemcpyHostToDevice));
cutilSafeCall(hipMemcpy(tprops.time, props->time, props->num_models*sizeof(CDATAFORMAT), hipMemcpyHostToDevice));
if(tprops.model_states) cutilSafeCall(hipMemcpy(tprops.model_states, props->model_states, props->num_models*props->statesize*sizeof(CDATAFORMAT), hipMemcpyHostToDevice));
if(tprops.inputs) cutilSafeCall(hipMemcpy(tprops.inputs, props->inputs, props->num_models*props->inputsize*sizeof(CDATAFORMAT), hipMemcpyHostToDevice));
cutilSafeCall(hipMemcpy(tprops.ob, props->ob, props->ob_size, hipMemcpyHostToDevice));
cutilSafeCall(hipMemcpy(tprops.running, props->running, props->num_models*sizeof(CDATAFORMAT), hipMemcpyHostToDevice));
// Store pointers to GPU memory for data we need to be able to retrieve
props->gpu.ob = tprops.ob;
props->gpu.time = tprops.time;
props->gpu.model_states = tprops.model_states;
return dprops;
}
// Frees a GPU solver props structure
void GPU_ENTRY(free_props, SIMENGINE_STORAGE, solver_props *props){
solver_props tprops;
cutilSafeCall(hipMemcpy(&tprops, props, sizeof(solver_props), hipMemcpyDeviceToHost));
cutilSafeCall(hipFree(tprops.time));
if(tprops.model_states) cutilSafeCall(hipFree(tprops.model_states));
if(tprops.inputs) cutilSafeCall(hipFree(tprops.inputs));
cutilSafeCall(hipFree(tprops.ob));
if(tprops.outputs) cutilSafeCall(hipFree(tprops.outputs));
cutilSafeCall(hipFree(tprops.running));
cutilSafeCall(hipFree(props));
}
| 90fd2a8fd332d13d3201b4c4cecf5a1b20c0cfa4.cu | #include"solvers.h"
void GPU_ENTRY(init, SIMENGINE_STORAGE){
// FIXME Add more checking of capabilities and devices available!
cudaSetDevice(cutGetMaxGflopsDeviceId());
}
void GPU_ENTRY(exit, SIMENGINE_STORAGE){
cudaThreadExit();
}
// Takes a solver_props pointer on the CPU and returns a pointer to a mirrored structure on the GPU
solver_props *GPU_ENTRY(init_props, SIMENGINE_STORAGE, solver_props *props){
// Local temp
solver_props tprops;
// GPU datastructures
solver_props *dprops;
// Copy the properties to local temporary
memcpy(&tprops, props, sizeof(solver_props));
// Allocate GPU space for props and all pointer fields of props
cutilSafeCall(cudaMalloc((void**)&dprops, sizeof(solver_props)));
cutilSafeCall(cudaMalloc((void**)&tprops.time, props->num_models*sizeof(CDATAFORMAT)));
if(props->statesize){
cutilSafeCall(cudaMalloc((void**)&tprops.model_states, props->num_models*props->statesize*sizeof(CDATAFORMAT)));
}
else{
tprops.model_states = NULL;
}
if(tprops.inputsize){
cutilSafeCall(cudaMalloc((void**)&tprops.inputs, props->num_models*props->inputsize*sizeof(CDATAFORMAT)));
}
else{
tprops.inputs = NULL;
}
cutilSafeCall(cudaMalloc((void**)&tprops.ob, props->ob_size));
if(props->outputsize){
cutilSafeCall(cudaMalloc((void**)&tprops.outputs, props->num_models*props->outputsize*sizeof(CDATAFORMAT)));
}
else{
tprops.outputs = NULL;
}
cutilSafeCall(cudaMalloc((void**)&tprops.running, props->num_models*sizeof(CDATAFORMAT)));
// Copy props to GPU
cutilSafeCall(cudaMemcpy(dprops, &tprops, sizeof(solver_props), cudaMemcpyHostToDevice));
cutilSafeCall(cudaMemcpy(tprops.time, props->time, props->num_models*sizeof(CDATAFORMAT), cudaMemcpyHostToDevice));
if(tprops.model_states) cutilSafeCall(cudaMemcpy(tprops.model_states, props->model_states, props->num_models*props->statesize*sizeof(CDATAFORMAT), cudaMemcpyHostToDevice));
if(tprops.inputs) cutilSafeCall(cudaMemcpy(tprops.inputs, props->inputs, props->num_models*props->inputsize*sizeof(CDATAFORMAT), cudaMemcpyHostToDevice));
cutilSafeCall(cudaMemcpy(tprops.ob, props->ob, props->ob_size, cudaMemcpyHostToDevice));
cutilSafeCall(cudaMemcpy(tprops.running, props->running, props->num_models*sizeof(CDATAFORMAT), cudaMemcpyHostToDevice));
// Store pointers to GPU memory for data we need to be able to retrieve
props->gpu.ob = tprops.ob;
props->gpu.time = tprops.time;
props->gpu.model_states = tprops.model_states;
return dprops;
}
// Frees a GPU solver props structure
void GPU_ENTRY(free_props, SIMENGINE_STORAGE, solver_props *props){
solver_props tprops;
cutilSafeCall(cudaMemcpy(&tprops, props, sizeof(solver_props), cudaMemcpyDeviceToHost));
cutilSafeCall(cudaFree(tprops.time));
if(tprops.model_states) cutilSafeCall(cudaFree(tprops.model_states));
if(tprops.inputs) cutilSafeCall(cudaFree(tprops.inputs));
cutilSafeCall(cudaFree(tprops.ob));
if(tprops.outputs) cutilSafeCall(cudaFree(tprops.outputs));
cutilSafeCall(cudaFree(tprops.running));
cutilSafeCall(cudaFree(props));
}
|
efe57dc58a63c1d6fefe641943b64dce807bce33.hip | // !!! This is a file automatically generated by hipify!!!
#include "Hornet.hpp"
#include "StandardAPI.hpp"
#include "Util/BatchFunctions.hpp"
#include "Util/RandomGraphData.cuh"
#include <Host/FileUtil.hpp> //xlib::extract_filepath_noextension
#include <Device/Util/CudaUtil.cuh> //xlib::deviceInfo
#include <algorithm> //std:.generate
#include <chrono> //std::chrono
#include <random> //std::mt19937_64
#include <hip/hip_runtime_api.h>
#include <Graph/GraphStd.hpp>
#include <Host/Classes/Timer.hpp>
#include <Device/Util/Timer.cuh>
#include "Util/CommandLineParam.hpp"
//using namespace hornets_nest;
using namespace timer;
using namespace std::string_literals;
using vert_t = int;
using eoff_t = int;
using wgt0_t = int;
using wgt1_t = float;
using Init = hornet::HornetInit<vert_t, hornet::EMPTY, hornet::TypeList<wgt0_t, wgt1_t>>;
using HornetGPU = hornet::gpu::Hornet<vert_t, hornet::EMPTY, hornet::TypeList<wgt0_t, wgt1_t>>;
using UpdatePtr = hornet::BatchUpdatePtr<vert_t, hornet::TypeList<wgt0_t, wgt1_t>, hornet::DeviceType::HOST>;
using Update = hornet::gpu::BatchUpdate<vert_t, hornet::TypeList<wgt0_t, wgt1_t>>;
using hornet::TypeList;
using hornet::DeviceType;
/**
* @brief Example tester for Hornet
*/
int exec(int argc, char* argv[]) {
using namespace graph::structure_prop;
using namespace graph::parsing_prop;
graph::GraphStd<vert_t, vert_t> graph;
graph.read(argv[1]);
int batch_size = std::stoi(argv[2]);
Init hornet_init(graph.nV(), graph.nE(), graph.csr_out_offsets(), graph.csr_out_edges());
//Use meta with hornet_init
std::vector<wgt0_t> edge_meta_0(graph.nE(), 0);
std::vector<wgt1_t> edge_meta_1(graph.nE(), 1);
hornet_init.insertEdgeData(edge_meta_0.data(), edge_meta_1.data());
HornetGPU hornet_gpu(hornet_init);
auto init_coo = hornet_gpu.getCOO(true);
hornet::RandomGenTraits<TypeList<wgt0_t, wgt1_t>> cooGenTraits;
auto randomBatch = hornet::generateRandomCOO<vert_t, eoff_t>(graph.nV(), batch_size, cooGenTraits);
Update batch_update(randomBatch);
printf("ne: %d\n", hornet_gpu.nE());
std::cout<<"=======\n";
Timer<DEVICE> TM(3);
TM.start();
hornet_gpu.insert(batch_update);
TM.stop();
printf("ne: %d\n", hornet_gpu.nE());
std::cout<<"=======\n";
TM.print("Insertion " + std::to_string(batch_size) + ": ");
auto inst_coo = hornet_gpu.getCOO(true);
init_coo.append(randomBatch);
init_coo.sort();
std::cout<<"Creating multimap for testing correctness...";
auto init_coo_map = getHostMMap(init_coo);
auto inst_coo_map = getHostMMap(inst_coo);
std::cout<<"...Done!\n";
if (inst_coo_map == inst_coo_map) {
std::cout<<"Passed\n";
} else {
std::cout<<"Failed\n";
}
return 0;
}
int main(int argc, char* argv[]) {
int ret = 0;
{
ret = exec(argc, argv);
}
return ret;
}
| efe57dc58a63c1d6fefe641943b64dce807bce33.cu | #include "Hornet.hpp"
#include "StandardAPI.hpp"
#include "Util/BatchFunctions.hpp"
#include "Util/RandomGraphData.cuh"
#include <Host/FileUtil.hpp> //xlib::extract_filepath_noextension
#include <Device/Util/CudaUtil.cuh> //xlib::deviceInfo
#include <algorithm> //std:.generate
#include <chrono> //std::chrono
#include <random> //std::mt19937_64
#include <cuda_profiler_api.h>
#include <Graph/GraphStd.hpp>
#include <Host/Classes/Timer.hpp>
#include <Device/Util/Timer.cuh>
#include "Util/CommandLineParam.hpp"
//using namespace hornets_nest;
using namespace timer;
using namespace std::string_literals;
using vert_t = int;
using eoff_t = int;
using wgt0_t = int;
using wgt1_t = float;
using Init = hornet::HornetInit<vert_t, hornet::EMPTY, hornet::TypeList<wgt0_t, wgt1_t>>;
using HornetGPU = hornet::gpu::Hornet<vert_t, hornet::EMPTY, hornet::TypeList<wgt0_t, wgt1_t>>;
using UpdatePtr = hornet::BatchUpdatePtr<vert_t, hornet::TypeList<wgt0_t, wgt1_t>, hornet::DeviceType::HOST>;
using Update = hornet::gpu::BatchUpdate<vert_t, hornet::TypeList<wgt0_t, wgt1_t>>;
using hornet::TypeList;
using hornet::DeviceType;
/**
* @brief Example tester for Hornet
*/
int exec(int argc, char* argv[]) {
using namespace graph::structure_prop;
using namespace graph::parsing_prop;
graph::GraphStd<vert_t, vert_t> graph;
graph.read(argv[1]);
int batch_size = std::stoi(argv[2]);
Init hornet_init(graph.nV(), graph.nE(), graph.csr_out_offsets(), graph.csr_out_edges());
//Use meta with hornet_init
std::vector<wgt0_t> edge_meta_0(graph.nE(), 0);
std::vector<wgt1_t> edge_meta_1(graph.nE(), 1);
hornet_init.insertEdgeData(edge_meta_0.data(), edge_meta_1.data());
HornetGPU hornet_gpu(hornet_init);
auto init_coo = hornet_gpu.getCOO(true);
hornet::RandomGenTraits<TypeList<wgt0_t, wgt1_t>> cooGenTraits;
auto randomBatch = hornet::generateRandomCOO<vert_t, eoff_t>(graph.nV(), batch_size, cooGenTraits);
Update batch_update(randomBatch);
printf("ne: %d\n", hornet_gpu.nE());
std::cout<<"=======\n";
Timer<DEVICE> TM(3);
TM.start();
hornet_gpu.insert(batch_update);
TM.stop();
printf("ne: %d\n", hornet_gpu.nE());
std::cout<<"=======\n";
TM.print("Insertion " + std::to_string(batch_size) + ": ");
auto inst_coo = hornet_gpu.getCOO(true);
init_coo.append(randomBatch);
init_coo.sort();
std::cout<<"Creating multimap for testing correctness...";
auto init_coo_map = getHostMMap(init_coo);
auto inst_coo_map = getHostMMap(inst_coo);
std::cout<<"...Done!\n";
if (inst_coo_map == inst_coo_map) {
std::cout<<"Passed\n";
} else {
std::cout<<"Failed\n";
}
return 0;
}
int main(int argc, char* argv[]) {
int ret = 0;
{
ret = exec(argc, argv);
}
return ret;
}
|
5afd952d1c791b9bbba508f5f448baff4d601b34.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <string.h>
#include <stdarg.h>
#ifdef UNIX
#include <stdint.h>
#include <unistd.h>
#endif
#include "mex.h"
// CUDA
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "rocblas.h"
#include "cudaCommon.h"
/* THIS FUNCTION
The cudaArrayAtomic function is meant to perform operations that operate elementwise
on single arrays. The only such functions yet encountered are in "control" functions where
we require that either density be kept to a minimum value, or that NaNs be replaced by 0s.
*/
__global__ void cukern_ArraySetMin(double *array, double min, int n);
__global__ void cukern_ArraySetMax(double *array, double max, int n);
__global__ void cukern_ArrayFixNaN(double *array, double fixval, int n);
#define BLOCKDIM 256
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
// At least 2 arguments expected
// Input and result
if (nrhs!=3)
mexErrMsgTxt("Wrong number of arguments. Expected form: cudaArrayAtomic(gputag, value, [1: set min, 2: set max, 3: NaN->value])");
// Get GPU array pointers
double val = *mxGetPr(prhs[1]);
int operation = (int)*mxGetPr(prhs[2]);
MGArray phi;
int worked = MGA_accessMatlabArrays(prhs, 0, 0, &phi);
int j;
CHECK_CUDA_ERROR("Entering cudaArrayAtomic");
for(j = 0; j < phi.nGPUs; j++) {
hipSetDevice(phi.deviceID[j]);
CHECK_CUDA_ERROR("Setting device.");
switch(operation) {
case 1:hipLaunchKernelGGL(( cukern_ArraySetMin), dim3(32), dim3(BLOCKDIM), 0, 0, phi.devicePtr[j], val, phi.partNumel[j]); break;
case 2:hipLaunchKernelGGL(( cukern_ArraySetMax), dim3(32), dim3(BLOCKDIM), 0, 0, phi.devicePtr[j], val, phi.partNumel[j]); break;
case 3:hipLaunchKernelGGL(( cukern_ArrayFixNaN), dim3(32), dim3(BLOCKDIM), 0, 0, phi.devicePtr[j], val, phi.partNumel[j]); break;
}
CHECK_CUDA_LAUNCH_ERROR(256, 32, &phi, operation, "array min/max/nan sweeping");
}
}
__global__ void cukern_ArraySetMin(double *array, double min, int n)
{
int x = threadIdx.x + blockDim.x * blockIdx.x;
int dx = blockDim.x * gridDim.x;
while(x < n) {
if(array[x] < min) array[x] = min;
x += dx;
}
}
__global__ void cukern_ArraySetMax(double *array, double max, int n)
{
int x = threadIdx.x + blockDim.x * blockIdx.x;
int dx = blockDim.x * gridDim.x;
while(x < n) {
if(array[x] > max) array[x] = max;
x += dx;
}
}
__global__ void cukern_ArrayFixNaN(double *array, double fixval, int n)
{
int x = threadIdx.x + blockDim.x * blockIdx.x;
int dx = blockDim.x * gridDim.x;
while(x < n) {
if( isnan(array[x])) array[x] = fixval;
x += dx;
}
}
| 5afd952d1c791b9bbba508f5f448baff4d601b34.cu | #include <stdio.h>
#include <string.h>
#include <stdarg.h>
#ifdef UNIX
#include <stdint.h>
#include <unistd.h>
#endif
#include "mex.h"
// CUDA
#include "cuda.h"
#include "cuda_runtime.h"
#include "cublas.h"
#include "cudaCommon.h"
/* THIS FUNCTION
The cudaArrayAtomic function is meant to perform operations that operate elementwise
on single arrays. The only such functions yet encountered are in "control" functions where
we require that either density be kept to a minimum value, or that NaNs be replaced by 0s.
*/
__global__ void cukern_ArraySetMin(double *array, double min, int n);
__global__ void cukern_ArraySetMax(double *array, double max, int n);
__global__ void cukern_ArrayFixNaN(double *array, double fixval, int n);
#define BLOCKDIM 256
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
// At least 2 arguments expected
// Input and result
if (nrhs!=3)
mexErrMsgTxt("Wrong number of arguments. Expected form: cudaArrayAtomic(gputag, value, [1: set min, 2: set max, 3: NaN->value])");
// Get GPU array pointers
double val = *mxGetPr(prhs[1]);
int operation = (int)*mxGetPr(prhs[2]);
MGArray phi;
int worked = MGA_accessMatlabArrays(prhs, 0, 0, &phi);
int j;
CHECK_CUDA_ERROR("Entering cudaArrayAtomic");
for(j = 0; j < phi.nGPUs; j++) {
cudaSetDevice(phi.deviceID[j]);
CHECK_CUDA_ERROR("Setting device.");
switch(operation) {
case 1: cukern_ArraySetMin<<<32, BLOCKDIM>>>(phi.devicePtr[j], val, phi.partNumel[j]); break;
case 2: cukern_ArraySetMax<<<32, BLOCKDIM>>>(phi.devicePtr[j], val, phi.partNumel[j]); break;
case 3: cukern_ArrayFixNaN<<<32, BLOCKDIM>>>(phi.devicePtr[j], val, phi.partNumel[j]); break;
}
CHECK_CUDA_LAUNCH_ERROR(256, 32, &phi, operation, "array min/max/nan sweeping");
}
}
__global__ void cukern_ArraySetMin(double *array, double min, int n)
{
int x = threadIdx.x + blockDim.x * blockIdx.x;
int dx = blockDim.x * gridDim.x;
while(x < n) {
if(array[x] < min) array[x] = min;
x += dx;
}
}
__global__ void cukern_ArraySetMax(double *array, double max, int n)
{
int x = threadIdx.x + blockDim.x * blockIdx.x;
int dx = blockDim.x * gridDim.x;
while(x < n) {
if(array[x] > max) array[x] = max;
x += dx;
}
}
__global__ void cukern_ArrayFixNaN(double *array, double fixval, int n)
{
int x = threadIdx.x + blockDim.x * blockIdx.x;
int dx = blockDim.x * gridDim.x;
while(x < n) {
if( isnan(array[x])) array[x] = fixval;
x += dx;
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.