hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
e299cf6afe4585ebf93002bf21d1a9ea0ef8ee0b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void add_adjacent(int *vec, int *vec_shorter, const int n) { unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x; if ( xIndex < n ) vec_shorter[xIndex] = vec[2 * xIndex] + vec[(2 * xIndex) +1]; }
e299cf6afe4585ebf93002bf21d1a9ea0ef8ee0b.cu
#include "includes.h" __global__ void add_adjacent(int *vec, int *vec_shorter, const int n) { unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x; if ( xIndex < n ) vec_shorter[xIndex] = vec[2 * xIndex] + vec[(2 * xIndex) +1]; }
2ebda82901c600726d5d6ff0d5f314aa4fb04da3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hough2d.h" #include "main.h" #include <sm_11_atomic_functions.h> #define BLOCK_SIZE 512 #define BLOCK_SIZE_HOUGH 360 #define STEP_SIZE 5 #define NUMBER_OF_STEPS 360/STEP_SIZE // Circ mask kernel storage __constant__ int maskKernelX[NUMBER_OF_STEPS]; __constant__ int maskKernelY[NUMBER_OF_STEPS]; // Function to set precalculated relative coordinates for circle boundary coordinates extern "C" void setMaskKernel(int *maskX, int *maskY) { hipMemcpyToSymbol(maskKernelX, maskX, NUMBER_OF_STEPS*sizeof(int)); hipMemcpyToSymbol(maskKernelY, maskY, NUMBER_OF_STEPS*sizeof(int)); } // Kernel to set all pixel values to specified value __global__ void setAllValuesKernel(int* houghSpace, int height, int width, float value) { int const index = blockIdx.x * BLOCK_SIZE + threadIdx.x; if (index < height*width) { houghSpace[index] = value; } __syncthreads(); } extern "C" void setAllValuesToCUDA(int* houghSpace, int height, int width, float value) { //cout << "Setting all values to " << value << "..." << endl; dim3 dimGrid = (ceil((float)width*height/(float)BLOCK_SIZE)); dim3 dimBlock = (BLOCK_SIZE); hipLaunchKernelGGL(( setAllValuesKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, houghSpace, height, width, value); hipDeviceSynchronize(); } // Kernel to perform circular Hough transform __global__ void houghTransformKernel(int* cudaHough, float* img, int height, int width, int radius) { if (threadIdx.x < BLOCK_SIZE_HOUGH) { // Arrays to hold coordinates for circle pixels __shared__ float circVals[NUMBER_OF_STEPS]; // There are 10 hough pixels calculated in each block int whichPixel = (int)threadIdx.x / NUMBER_OF_STEPS; // Calculate position for pixel in hough space int cpixIDy = (int)( ((float)(blockIdx.x*STEP_SIZE+whichPixel)) / (float)(width-(radius*2))) + radius; int cpixIDx = (blockIdx.x*10+whichPixel) % (width-(radius*2)) + radius; // Load image pixel from circle edge int xVal = cpixIDx + maskKernelX[threadIdx.x % NUMBER_OF_STEPS]; int yVal = cpixIDy + maskKernelY[threadIdx.x % NUMBER_OF_STEPS]; // Get the pixel value from the image float pixVal = img[yVal*width + xVal]; //float pixVal = img[cpixIDy*width + cpixIDx]; // ## TO DELETE, INCORRECT //int houghVal = cudaHough[cpixIDy*width + cpixIDx]; __syncthreads(); if (pixVal > 0) { atomicAdd(cudaHough + cpixIDy*width + cpixIDx, 1); } __syncthreads(); } } // Calls the Hough transform kernel extern "C" void performHoughTransformCUDA(int* cudaHough, float* img, int height, int width, int radius) { // Define grid and block dimensions dim3 dimGrid = ( ceil((float)(width-(2*radius)) * (height-(2*radius)) / (float)STEP_SIZE) ); dim3 dimBlock = (BLOCK_SIZE_HOUGH); // Perform Hough transform and sync threads to get the final result hipLaunchKernelGGL(( houghTransformKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, cudaHough, img, height, width, radius); hipDeviceSynchronize(); } // Analyse the defined image area for circles using Hough Transform extern "C" void Hough2D_CUDA(float* img, int width, int height, int radMin, int radMax, int* posX, int* posY, int* maxVal, int* resRad) { int* houghSpace; houghSpace = (int*)malloc(width*height*sizeof(int)); // Arrays for results int *posxArray, *posyArray, *maxValArray, *radArray; // Allocate correct memory for arrays posxArray = (int*)malloc((radMax-radMin)*sizeof(int)); posyArray = (int*)malloc((radMax-radMin)*sizeof(int)); maxValArray = (int*)malloc((radMax-radMin)*sizeof(int)); radArray = (int*)malloc((radMax-radMin)*sizeof(int)); // Allocate memory for CUDA images and matrices float *cudaImg; int *cudaHough; hipMalloc((void **)&cudaImg, width*height*sizeof(float)); hipMalloc((void **)&cudaHough, width*height*sizeof(int)); // Copy image from host to device hipMemcpy(cudaImg, img, width*height*sizeof(float), hipMemcpyHostToDevice); int ctrArr = 0, radius; for (int i=radMin; i < radMax; i++) { // Set all elements to zero setAllValuesToCUDA(cudaHough, height, width, 0); // Precalculate relX and relY radius = i; int ctr = 0; int* relX, *relY; relX = (int*)malloc(NUMBER_OF_STEPS*sizeof(int)); relY = (int*)malloc(NUMBER_OF_STEPS*sizeof(int)); for (int theta=0; theta < 360; theta+=STEP_SIZE) { // Calculate x and y coordinates float angle = (theta*PI) / 180; relX[ctr] = (int)(-radius*cos(angle)); relY[ctr] = (int)(-radius*sin(angle)); ctr++; } // Set mask coordinates for circle setMaskKernel(relX, relY); // Free memory free(relX); free(relY); //performHoughTransformCUDA(cudaHough, cudaImg, height, width, radius, relX, relY, angleNum); performHoughTransformCUDA(cudaHough, cudaImg, height, width, radius); // Copy matrix from device to host hipMemcpy(houghSpace, cudaHough, width*height*sizeof(float), hipMemcpyDeviceToHost); // Find max value in the houghSpace *maxVal = 0; int index; int tempPosX, tempPosY, tempMaxVal = 0; for (int y=0; y < height; y++) { for (int x=0; x < width; x++) { //index = radius*width*height + y*width + x; index = y*width + x; if (tempMaxVal < houghSpace[index]) { tempMaxVal = houghSpace[index]; tempPosX = x; tempPosY = y; } } } // Write results to arrays posxArray[ctrArr] = tempPosX; posyArray[ctrArr] = tempPosY; maxValArray[ctrArr] = tempMaxVal; radArray[ctrArr] = i; cout << "Current (radius: " << i << ") MaxVal: " << maxValArray[ctrArr] << " ctr: " << ctrArr << endl; ctrArr++; } // end for // Find the maximum value from arrays *maxVal = 0; for (int j=0; j < (radMax-radMin); j++) { cout << "MaxValArray: " << maxValArray[j] << " " << posxArray[j] << " " << posyArray[j] << " " << radArray[j] << endl; if (*maxVal < maxValArray[j]) { *maxVal = maxValArray[j]; *posX = posxArray[j]; *posY = posyArray[j]; *resRad = radArray[j]; } } // Free cuda memory hipFree(cudaImg); hipFree(cudaHough); // Free array memory free(posxArray); free(posyArray); free(maxValArray); free(radArray); } // ################## // #### IMADJUST #### // ################## __global__ void AdjustImageIntensityKernel(float *imgOut, float *imgIn, int width, int height, float lowin, float lowout, float scale) { __shared__ float bufData[BLOCK_SIZE]; // Get the index of pixel const int index = blockIdx.x * BLOCK_SIZE + threadIdx.x; // Load data to shared variable bufData[threadIdx.x] = imgIn[index]; // Check that it's not out of bounds if (index < (height*width)) { // Find the according multiplier float tempLevel = ( bufData[threadIdx.x] - lowin)*scale + lowout; // Check that it's within required range if (tempLevel < 0) { bufData[threadIdx.x] = 0; } else if (tempLevel > 1) { bufData[threadIdx.x] = 1; } else { bufData[threadIdx.x] = tempLevel; } // Write data back imgOut[index] = bufData[threadIdx.x]; } // Synchronise threads to have the whole image fully processed for output __syncthreads(); } // Resize the image __global__ void ImageScalingKernel(float *imgOut, float *imgIn, int width, int height) { __shared__ float inData[BLOCK_SIZE]; // Get the index of pixel const int index = blockIdx.x * BLOCK_SIZE + threadIdx.x; // Load data to shared variable inData[threadIdx.x] = imgIn[index]; if ( index < (width*height) ) { imgOut[index] = inData[threadIdx.x] / (float)255; } __syncthreads(); } // the CUDA sample implementaiton can be used void ImageHistogramCUDA(float *pSrc, int width, int height, int * imghist) { const int GrayThres = 256; for (int i=0; i< GrayThres; i++) imghist[i] = 0; for (int i=0; i< width*height; i++) { int level = (int) (pSrc[i]*255); imghist[level]+=1; } } // Strech limit void ImageStretchLimitCUDA(float *pSrc, int width, int height, float tol_low, float tol_high, float *low, float *high ) { const int GrayThres = 256; int imghist[256]; double cdf[GrayThres], sum; int i; bool bLowFound=false, bHighFound=false; //histogram ImageHistogramCUDA(pSrc,width,height,imghist); // the below segment can be implemented on CPU only; //************************************************* // cdf cdf[0]=imghist[0]; for (i=1;i<GrayThres;i++) cdf[i] = cdf[i-1] + imghist[i]; sum = cdf[GrayThres-1]; for (i=0;i<GrayThres;i++) cdf[i] /= sum; // find low and high for (i=0;i<GrayThres;i++) { if (cdf[i]>=tol_low && (bLowFound == false)) { *low = (float)(i); bLowFound = true; } if (cdf[i]>=tol_high && (bHighFound == false)) { *high = (float)(i); bHighFound = true; } } // convert to range [0 1] *low /= (GrayThres-1); *high /= (GrayThres-1); } // Adjusts image intensity depending on the current gray levels of the image (histogram stretching) extern "C" void imadjustCUDA(unsigned char *inImg, unsigned char *outImg, int width, int height, float lowPerc, float highPerc) { const int grayLevels = 256; float lowin, highin; float *tempBuffer = new float[width*height]; float *imgInput, *imgBuffer, *imgOutput; clock_t init, final_gpu; // Convert input image to float for (int i=0; i < (width*height); i++) { tempBuffer[i] = (float) inImg[i]; } // ### ALLOCATE CUDA ARRAYS ### hipMalloc((void **)&imgInput, width * height * sizeof(float)); hipMalloc((void **)&imgBuffer, width * height * sizeof(float)); hipMalloc((void **)&imgOutput, width * height * sizeof(float)); // ### COPY TO CUDA MEMORY ### hipMemcpy(imgInput, tempBuffer, width * height * sizeof(float), hipMemcpyHostToDevice); // Get number of blocks int gridSize = ceil( (float)(height*width) / (float)BLOCK_SIZE ); // Assign sizes dim3 blocks( gridSize ); dim3 threads( BLOCK_SIZE ); // Image scaling Kernel hipLaunchKernelGGL(( ImageScalingKernel), dim3(blocks), dim3(threads), 0, 0, imgBuffer, imgInput, width, height); hipDeviceSynchronize(); // Copy image buffer back to host memory (for ImageStretchLimit function) hipMemcpy(tempBuffer, imgBuffer, width * height * sizeof(float), hipMemcpyDeviceToHost); // find out the 1% pixel intensity value and set it to "low" // find out the 99% pixel intensiy valeu and set it to "high" //ImageStretchLimitCUDA(tempBuffer, width, height, 0.01f,0.99f,&lowin,&highin); ImageStretchLimitCUDA(tempBuffer, width, height, lowPerc,highPerc,&lowin,&highin); // Adjust image intensity float lowout = 0, highout = 1; float range = highin-lowin; float rangeout = highout-lowout; float scale = rangeout/range; printf("Adjusting image intensities on GPU (CUDA)...\n"); // Start timer init = clock(); // Call the adjust image intensity kernel hipLaunchKernelGGL(( AdjustImageIntensityKernel), dim3(blocks), dim3(threads), 0, 0, imgOutput, imgBuffer, width, height, lowin, lowout, scale); hipDeviceSynchronize(); // Copy the result back hipMemcpy(tempBuffer, imgOutput, width * height * sizeof(float), hipMemcpyDeviceToHost); // Take time final_gpu=clock()-init; printf("Time taken for imadjust on GPU (CUDA): %f sec\n", (double)final_gpu / ((double)CLOCKS_PER_SEC)); // convert it back to unsigned char for (int i =0; i< width*height; i++) { outImg[i] = (unsigned char) (tempBuffer[i]*255); } // Free memory hipFree(imgInput); hipFree(imgBuffer); hipFree(imgOutput); } // ##################### // #### ADJUSTGAMMA #### // ##################### __global__ void AdjustGammaKernel(float *imgOut, float *imgIn, int width, int height, float gamma, float minVal, float maxVal) { __shared__ float bufData[BLOCK_SIZE]; // Get the index of pixel const int index = blockIdx.x * BLOCK_SIZE + threadIdx.x; // Load data to shared variable bufData[threadIdx.x] = imgIn[index]; // Check that it's not out of bounds if (index < (height*width)) { // Find the according multiplier float tempLevel = ( bufData[threadIdx.x] - minVal) / maxVal; tempLevel = powf(tempLevel, (double)1/gamma); // Check that it's within required range if (tempLevel < 0) { bufData[threadIdx.x] = 0; } else if (tempLevel > 1) { bufData[threadIdx.x] = 1; } else { bufData[threadIdx.x] = tempLevel; } // Write data back imgOut[index] = bufData[threadIdx.x]; } // Synchronise threads to have the whole image fully processed for output __syncthreads(); } extern "C" void adjustGammaCUDA(unsigned char *inImg, unsigned char *outImg, int width, int height, float gamma) { const int grayLevels = 256; float lowin, highin; float *tempBuffer = new float[width*height]; float *imgInput, *imgOutput; clock_t init, final_gpu; float minVal = 1000, maxVal = 0; // Convert input image to float for (int i=0; i < (width*height); i++) { tempBuffer[i] = (float) inImg[i]; // Calculate min and max values in the image ## CAN BE ADDED TO DO ON CUDA LATER ON ## if (minVal > tempBuffer[i]) { minVal = tempBuffer[i]; } if (maxVal < tempBuffer[i]) { maxVal = tempBuffer[i]; } } // ### ALLOCATE CUDA ARRAYS ### hipMalloc((void **)&imgInput, width * height * sizeof(float)); hipMalloc((void **)&imgOutput, width * height * sizeof(float)); // ### COPY TO CUDA MEMORY ### hipMemcpy(imgInput, tempBuffer, width * height * sizeof(float), hipMemcpyHostToDevice); // Get number of blocks int gridSize = ceil( (float)(height*width) / (float)BLOCK_SIZE ); // Assign sizes dim3 blocks( gridSize ); dim3 threads( BLOCK_SIZE ); printf("Adjusting gamma on GPU (CUDA)...\n"); // Start timer init = clock(); // Image scaling Kernel hipLaunchKernelGGL(( AdjustGammaKernel), dim3(blocks), dim3(threads), 0, 0, imgOutput, imgInput, width, height, gamma, minVal, maxVal); hipDeviceSynchronize(); // Copy the result back hipMemcpy(tempBuffer, imgOutput, width * height * sizeof(float), hipMemcpyDeviceToHost); // Take time final_gpu=clock()-init; printf("Time taken for gamma adjustment on GPU (CUDA): %f sec\n", (double)final_gpu / ((double)CLOCKS_PER_SEC)); // convert it back to unsigned char for (int i =0; i< width*height; i++) { outImg[i] = (unsigned char) (tempBuffer[i]*255); } // Free memory hipFree(imgInput); hipFree(imgOutput); }
2ebda82901c600726d5d6ff0d5f314aa4fb04da3.cu
#include "hough2d.h" #include "main.h" #include <sm_11_atomic_functions.h> #define BLOCK_SIZE 512 #define BLOCK_SIZE_HOUGH 360 #define STEP_SIZE 5 #define NUMBER_OF_STEPS 360/STEP_SIZE // Circ mask kernel storage __constant__ int maskKernelX[NUMBER_OF_STEPS]; __constant__ int maskKernelY[NUMBER_OF_STEPS]; // Function to set precalculated relative coordinates for circle boundary coordinates extern "C" void setMaskKernel(int *maskX, int *maskY) { cudaMemcpyToSymbol(maskKernelX, maskX, NUMBER_OF_STEPS*sizeof(int)); cudaMemcpyToSymbol(maskKernelY, maskY, NUMBER_OF_STEPS*sizeof(int)); } // Kernel to set all pixel values to specified value __global__ void setAllValuesKernel(int* houghSpace, int height, int width, float value) { int const index = blockIdx.x * BLOCK_SIZE + threadIdx.x; if (index < height*width) { houghSpace[index] = value; } __syncthreads(); } extern "C" void setAllValuesToCUDA(int* houghSpace, int height, int width, float value) { //cout << "Setting all values to " << value << "..." << endl; dim3 dimGrid = (ceil((float)width*height/(float)BLOCK_SIZE)); dim3 dimBlock = (BLOCK_SIZE); setAllValuesKernel<<<dimGrid, dimBlock>>>(houghSpace, height, width, value); cudaThreadSynchronize(); } // Kernel to perform circular Hough transform __global__ void houghTransformKernel(int* cudaHough, float* img, int height, int width, int radius) { if (threadIdx.x < BLOCK_SIZE_HOUGH) { // Arrays to hold coordinates for circle pixels __shared__ float circVals[NUMBER_OF_STEPS]; // There are 10 hough pixels calculated in each block int whichPixel = (int)threadIdx.x / NUMBER_OF_STEPS; // Calculate position for pixel in hough space int cpixIDy = (int)( ((float)(blockIdx.x*STEP_SIZE+whichPixel)) / (float)(width-(radius*2))) + radius; int cpixIDx = (blockIdx.x*10+whichPixel) % (width-(radius*2)) + radius; // Load image pixel from circle edge int xVal = cpixIDx + maskKernelX[threadIdx.x % NUMBER_OF_STEPS]; int yVal = cpixIDy + maskKernelY[threadIdx.x % NUMBER_OF_STEPS]; // Get the pixel value from the image float pixVal = img[yVal*width + xVal]; //float pixVal = img[cpixIDy*width + cpixIDx]; // ## TO DELETE, INCORRECT //int houghVal = cudaHough[cpixIDy*width + cpixIDx]; __syncthreads(); if (pixVal > 0) { atomicAdd(cudaHough + cpixIDy*width + cpixIDx, 1); } __syncthreads(); } } // Calls the Hough transform kernel extern "C" void performHoughTransformCUDA(int* cudaHough, float* img, int height, int width, int radius) { // Define grid and block dimensions dim3 dimGrid = ( ceil((float)(width-(2*radius)) * (height-(2*radius)) / (float)STEP_SIZE) ); dim3 dimBlock = (BLOCK_SIZE_HOUGH); // Perform Hough transform and sync threads to get the final result houghTransformKernel<<<dimGrid, dimBlock>>>(cudaHough, img, height, width, radius); cudaThreadSynchronize(); } // Analyse the defined image area for circles using Hough Transform extern "C" void Hough2D_CUDA(float* img, int width, int height, int radMin, int radMax, int* posX, int* posY, int* maxVal, int* resRad) { int* houghSpace; houghSpace = (int*)malloc(width*height*sizeof(int)); // Arrays for results int *posxArray, *posyArray, *maxValArray, *radArray; // Allocate correct memory for arrays posxArray = (int*)malloc((radMax-radMin)*sizeof(int)); posyArray = (int*)malloc((radMax-radMin)*sizeof(int)); maxValArray = (int*)malloc((radMax-radMin)*sizeof(int)); radArray = (int*)malloc((radMax-radMin)*sizeof(int)); // Allocate memory for CUDA images and matrices float *cudaImg; int *cudaHough; cudaMalloc((void **)&cudaImg, width*height*sizeof(float)); cudaMalloc((void **)&cudaHough, width*height*sizeof(int)); // Copy image from host to device cudaMemcpy(cudaImg, img, width*height*sizeof(float), cudaMemcpyHostToDevice); int ctrArr = 0, radius; for (int i=radMin; i < radMax; i++) { // Set all elements to zero setAllValuesToCUDA(cudaHough, height, width, 0); // Precalculate relX and relY radius = i; int ctr = 0; int* relX, *relY; relX = (int*)malloc(NUMBER_OF_STEPS*sizeof(int)); relY = (int*)malloc(NUMBER_OF_STEPS*sizeof(int)); for (int theta=0; theta < 360; theta+=STEP_SIZE) { // Calculate x and y coordinates float angle = (theta*PI) / 180; relX[ctr] = (int)(-radius*cos(angle)); relY[ctr] = (int)(-radius*sin(angle)); ctr++; } // Set mask coordinates for circle setMaskKernel(relX, relY); // Free memory free(relX); free(relY); //performHoughTransformCUDA(cudaHough, cudaImg, height, width, radius, relX, relY, angleNum); performHoughTransformCUDA(cudaHough, cudaImg, height, width, radius); // Copy matrix from device to host cudaMemcpy(houghSpace, cudaHough, width*height*sizeof(float), cudaMemcpyDeviceToHost); // Find max value in the houghSpace *maxVal = 0; int index; int tempPosX, tempPosY, tempMaxVal = 0; for (int y=0; y < height; y++) { for (int x=0; x < width; x++) { //index = radius*width*height + y*width + x; index = y*width + x; if (tempMaxVal < houghSpace[index]) { tempMaxVal = houghSpace[index]; tempPosX = x; tempPosY = y; } } } // Write results to arrays posxArray[ctrArr] = tempPosX; posyArray[ctrArr] = tempPosY; maxValArray[ctrArr] = tempMaxVal; radArray[ctrArr] = i; cout << "Current (radius: " << i << ") MaxVal: " << maxValArray[ctrArr] << " ctr: " << ctrArr << endl; ctrArr++; } // end for // Find the maximum value from arrays *maxVal = 0; for (int j=0; j < (radMax-radMin); j++) { cout << "MaxValArray: " << maxValArray[j] << " " << posxArray[j] << " " << posyArray[j] << " " << radArray[j] << endl; if (*maxVal < maxValArray[j]) { *maxVal = maxValArray[j]; *posX = posxArray[j]; *posY = posyArray[j]; *resRad = radArray[j]; } } // Free cuda memory cudaFree(cudaImg); cudaFree(cudaHough); // Free array memory free(posxArray); free(posyArray); free(maxValArray); free(radArray); } // ################## // #### IMADJUST #### // ################## __global__ void AdjustImageIntensityKernel(float *imgOut, float *imgIn, int width, int height, float lowin, float lowout, float scale) { __shared__ float bufData[BLOCK_SIZE]; // Get the index of pixel const int index = blockIdx.x * BLOCK_SIZE + threadIdx.x; // Load data to shared variable bufData[threadIdx.x] = imgIn[index]; // Check that it's not out of bounds if (index < (height*width)) { // Find the according multiplier float tempLevel = ( bufData[threadIdx.x] - lowin)*scale + lowout; // Check that it's within required range if (tempLevel < 0) { bufData[threadIdx.x] = 0; } else if (tempLevel > 1) { bufData[threadIdx.x] = 1; } else { bufData[threadIdx.x] = tempLevel; } // Write data back imgOut[index] = bufData[threadIdx.x]; } // Synchronise threads to have the whole image fully processed for output __syncthreads(); } // Resize the image __global__ void ImageScalingKernel(float *imgOut, float *imgIn, int width, int height) { __shared__ float inData[BLOCK_SIZE]; // Get the index of pixel const int index = blockIdx.x * BLOCK_SIZE + threadIdx.x; // Load data to shared variable inData[threadIdx.x] = imgIn[index]; if ( index < (width*height) ) { imgOut[index] = inData[threadIdx.x] / (float)255; } __syncthreads(); } // the CUDA sample implementaiton can be used void ImageHistogramCUDA(float *pSrc, int width, int height, int * imghist) { const int GrayThres = 256; for (int i=0; i< GrayThres; i++) imghist[i] = 0; for (int i=0; i< width*height; i++) { int level = (int) (pSrc[i]*255); imghist[level]+=1; } } // Strech limit void ImageStretchLimitCUDA(float *pSrc, int width, int height, float tol_low, float tol_high, float *low, float *high ) { const int GrayThres = 256; int imghist[256]; double cdf[GrayThres], sum; int i; bool bLowFound=false, bHighFound=false; //histogram ImageHistogramCUDA(pSrc,width,height,imghist); // the below segment can be implemented on CPU only; //************************************************* // cdf cdf[0]=imghist[0]; for (i=1;i<GrayThres;i++) cdf[i] = cdf[i-1] + imghist[i]; sum = cdf[GrayThres-1]; for (i=0;i<GrayThres;i++) cdf[i] /= sum; // find low and high for (i=0;i<GrayThres;i++) { if (cdf[i]>=tol_low && (bLowFound == false)) { *low = (float)(i); bLowFound = true; } if (cdf[i]>=tol_high && (bHighFound == false)) { *high = (float)(i); bHighFound = true; } } // convert to range [0 1] *low /= (GrayThres-1); *high /= (GrayThres-1); } // Adjusts image intensity depending on the current gray levels of the image (histogram stretching) extern "C" void imadjustCUDA(unsigned char *inImg, unsigned char *outImg, int width, int height, float lowPerc, float highPerc) { const int grayLevels = 256; float lowin, highin; float *tempBuffer = new float[width*height]; float *imgInput, *imgBuffer, *imgOutput; clock_t init, final_gpu; // Convert input image to float for (int i=0; i < (width*height); i++) { tempBuffer[i] = (float) inImg[i]; } // ### ALLOCATE CUDA ARRAYS ### cudaMalloc((void **)&imgInput, width * height * sizeof(float)); cudaMalloc((void **)&imgBuffer, width * height * sizeof(float)); cudaMalloc((void **)&imgOutput, width * height * sizeof(float)); // ### COPY TO CUDA MEMORY ### cudaMemcpy(imgInput, tempBuffer, width * height * sizeof(float), cudaMemcpyHostToDevice); // Get number of blocks int gridSize = ceil( (float)(height*width) / (float)BLOCK_SIZE ); // Assign sizes dim3 blocks( gridSize ); dim3 threads( BLOCK_SIZE ); // Image scaling Kernel ImageScalingKernel<<<blocks, threads>>>(imgBuffer, imgInput, width, height); cudaThreadSynchronize(); // Copy image buffer back to host memory (for ImageStretchLimit function) cudaMemcpy(tempBuffer, imgBuffer, width * height * sizeof(float), cudaMemcpyDeviceToHost); // find out the 1% pixel intensity value and set it to "low" // find out the 99% pixel intensiy valeu and set it to "high" //ImageStretchLimitCUDA(tempBuffer, width, height, 0.01f,0.99f,&lowin,&highin); ImageStretchLimitCUDA(tempBuffer, width, height, lowPerc,highPerc,&lowin,&highin); // Adjust image intensity float lowout = 0, highout = 1; float range = highin-lowin; float rangeout = highout-lowout; float scale = rangeout/range; printf("Adjusting image intensities on GPU (CUDA)...\n"); // Start timer init = clock(); // Call the adjust image intensity kernel AdjustImageIntensityKernel<<<blocks, threads>>>(imgOutput, imgBuffer, width, height, lowin, lowout, scale); cudaThreadSynchronize(); // Copy the result back cudaMemcpy(tempBuffer, imgOutput, width * height * sizeof(float), cudaMemcpyDeviceToHost); // Take time final_gpu=clock()-init; printf("Time taken for imadjust on GPU (CUDA): %f sec\n", (double)final_gpu / ((double)CLOCKS_PER_SEC)); // convert it back to unsigned char for (int i =0; i< width*height; i++) { outImg[i] = (unsigned char) (tempBuffer[i]*255); } // Free memory cudaFree(imgInput); cudaFree(imgBuffer); cudaFree(imgOutput); } // ##################### // #### ADJUSTGAMMA #### // ##################### __global__ void AdjustGammaKernel(float *imgOut, float *imgIn, int width, int height, float gamma, float minVal, float maxVal) { __shared__ float bufData[BLOCK_SIZE]; // Get the index of pixel const int index = blockIdx.x * BLOCK_SIZE + threadIdx.x; // Load data to shared variable bufData[threadIdx.x] = imgIn[index]; // Check that it's not out of bounds if (index < (height*width)) { // Find the according multiplier float tempLevel = ( bufData[threadIdx.x] - minVal) / maxVal; tempLevel = powf(tempLevel, (double)1/gamma); // Check that it's within required range if (tempLevel < 0) { bufData[threadIdx.x] = 0; } else if (tempLevel > 1) { bufData[threadIdx.x] = 1; } else { bufData[threadIdx.x] = tempLevel; } // Write data back imgOut[index] = bufData[threadIdx.x]; } // Synchronise threads to have the whole image fully processed for output __syncthreads(); } extern "C" void adjustGammaCUDA(unsigned char *inImg, unsigned char *outImg, int width, int height, float gamma) { const int grayLevels = 256; float lowin, highin; float *tempBuffer = new float[width*height]; float *imgInput, *imgOutput; clock_t init, final_gpu; float minVal = 1000, maxVal = 0; // Convert input image to float for (int i=0; i < (width*height); i++) { tempBuffer[i] = (float) inImg[i]; // Calculate min and max values in the image ## CAN BE ADDED TO DO ON CUDA LATER ON ## if (minVal > tempBuffer[i]) { minVal = tempBuffer[i]; } if (maxVal < tempBuffer[i]) { maxVal = tempBuffer[i]; } } // ### ALLOCATE CUDA ARRAYS ### cudaMalloc((void **)&imgInput, width * height * sizeof(float)); cudaMalloc((void **)&imgOutput, width * height * sizeof(float)); // ### COPY TO CUDA MEMORY ### cudaMemcpy(imgInput, tempBuffer, width * height * sizeof(float), cudaMemcpyHostToDevice); // Get number of blocks int gridSize = ceil( (float)(height*width) / (float)BLOCK_SIZE ); // Assign sizes dim3 blocks( gridSize ); dim3 threads( BLOCK_SIZE ); printf("Adjusting gamma on GPU (CUDA)...\n"); // Start timer init = clock(); // Image scaling Kernel AdjustGammaKernel<<<blocks, threads>>>(imgOutput, imgInput, width, height, gamma, minVal, maxVal); cudaThreadSynchronize(); // Copy the result back cudaMemcpy(tempBuffer, imgOutput, width * height * sizeof(float), cudaMemcpyDeviceToHost); // Take time final_gpu=clock()-init; printf("Time taken for gamma adjustment on GPU (CUDA): %f sec\n", (double)final_gpu / ((double)CLOCKS_PER_SEC)); // convert it back to unsigned char for (int i =0; i< width*height; i++) { outImg[i] = (unsigned char) (tempBuffer[i]*255); } // Free memory cudaFree(imgInput); cudaFree(imgOutput); }
d68ad00cd17be121e5adaa18efe5daa864958590.hip
// !!! This is a file automatically generated by hipify!!! /* * _reg_cudaCommon.cpp * * * Created by Marc Modat on 25/03/2009. * Copyright (c) 2009, University College London. All rights reserved. * Centre for Medical Image Computing (CMIC) * See the LICENSE.txt file in the nifty_reg root folder * */ #ifndef _REG_CUDACOMMON_CPP #define _REG_CUDACOMMON_CPP #include "_reg_cudaCommon.h" /* ******************************** */ /* ******************************** */ template <class DTYPE, class NIFTI_TYPE> int cudaCommon_transferNiftiToArrayOnDevice1(DTYPE **array_d, nifti_image *img) { if(sizeof(DTYPE)!=sizeof(NIFTI_TYPE)){ fprintf(stderr, "ERROR:\tcudaCommon_transferNiftiToArrayOnDevice:\n"); fprintf(stderr, "ERROR:\tThe host and device arrays are of different types.\n"); return 1; } else{ const unsigned int memSize = img->dim[1] * img->dim[2] * img->dim[3] * sizeof(DTYPE); NIFTI_TYPE *array_h=static_cast<NIFTI_TYPE *>(img->data); NR_CUDA_SAFE_CALL(hipMemcpy(*array_d, array_h, memSize, hipMemcpyHostToDevice)); } return 0; } /* ******************************** */ template <class DTYPE> int cudaCommon_transferNiftiToArrayOnDevice(DTYPE **array_d, nifti_image *img) { if( sizeof(DTYPE)==sizeof(float4) ){ if( (img->datatype!=NIFTI_TYPE_FLOAT32) || (img->dim[5]<2) || (img->dim[4]>1)){ fprintf(stderr, "ERROR:\tcudaCommon_transferNiftiToDevice:\n"); fprintf(stderr, "ERROR:\tThe specified image is not a single precision deformation field image\n"); return 1; } float *niftiImgValues = static_cast<float *>(img->data); float4 *array_h=(float4 *)calloc(img->nx*img->ny*img->nz,sizeof(float4)); const int voxelNumber = img->nx*img->ny*img->nz; for(int i=0; i<voxelNumber; i++) array_h[i].x= *niftiImgValues++; if(img->dim[5]>=2){ for(int i=0; i<voxelNumber; i++) array_h[i].y= *niftiImgValues++; } if(img->dim[5]>=3){ for(int i=0; i<voxelNumber; i++) array_h[i].z= *niftiImgValues++; } if(img->dim[5]>=4){ for(int i=0; i<voxelNumber; i++) array_h[i].w= *niftiImgValues++; } NR_CUDA_SAFE_CALL(hipMemcpy(*array_d, array_h, img->nx*img->ny*img->nz*sizeof(float4), hipMemcpyHostToDevice)); free(array_h); } else{ // All these else could be removed but the nvcc compiler would warn for unreachable statement switch(img->datatype){ case NIFTI_TYPE_FLOAT32: return cudaCommon_transferNiftiToArrayOnDevice1<DTYPE,float>(array_d, img); default: fprintf(stderr, "ERROR:\tcudaCommon_transferNiftiToArrayOnDevice:\n"); fprintf(stderr, "ERROR:\tThe image data type %d is not supported\n",img->datatype); return 1; } } return 0; } template int cudaCommon_transferNiftiToArrayOnDevice<float>(float **, nifti_image *); template int cudaCommon_transferNiftiToArrayOnDevice<float4>(float4 **, nifti_image *); /* ******************************** */ template <class DTYPE, class NIFTI_TYPE> int cudaCommon_transferNiftiToArrayOnDevice1(DTYPE **array_d, DTYPE **array2_d, nifti_image *img) { if(sizeof(DTYPE)!=sizeof(NIFTI_TYPE)){ fprintf(stderr, "ERROR:\tcudaCommon_transferNiftiToArrayOnDevice:\n"); fprintf(stderr, "ERROR:\tThe host and device arrays are of different types.\n"); return 1; } else{ const unsigned int memSize = img->dim[1] * img->dim[2] * img->dim[3] * sizeof(DTYPE); NIFTI_TYPE *array_h=static_cast<NIFTI_TYPE *>(img->data); NIFTI_TYPE *array2_h=&array_h[img->dim[1] * img->dim[2] * img->dim[3]]; NR_CUDA_SAFE_CALL(hipMemcpy(*array_d, array_h, memSize, hipMemcpyHostToDevice)); NR_CUDA_SAFE_CALL(hipMemcpy(*array2_d, array2_h, memSize, hipMemcpyHostToDevice)); } return 0; } /* ******************************** */ template <class DTYPE> int cudaCommon_transferNiftiToArrayOnDevice(DTYPE **array_d, DTYPE **array2_d, nifti_image *img) { if( sizeof(DTYPE)==sizeof(float4) ){ if( (img->datatype!=NIFTI_TYPE_FLOAT32) || (img->dim[5]<2) || (img->dim[4]>1)){ fprintf(stderr, "ERROR:\tcudaCommon_transferNiftiToDevice:\n"); fprintf(stderr, "ERROR:\tThe specified image is not a single precision deformation field image\n"); return 1; } float *niftiImgValues = static_cast<float *>(img->data); float4 *array_h=(float4 *)calloc(img->nx*img->ny*img->nz,sizeof(float4)); float4 *array2_h=(float4 *)calloc(img->nx*img->ny*img->nz,sizeof(float4)); const int voxelNumber = img->nx*img->ny*img->nz; for(int i=0; i<voxelNumber; i++) array_h[i].x= *niftiImgValues++; for(int i=0; i<voxelNumber; i++) array2_h[i].x= *niftiImgValues++; if(img->dim[5]>=2){ for(int i=0; i<voxelNumber; i++) array_h[i].y= *niftiImgValues++; for(int i=0; i<voxelNumber; i++) array2_h[i].y= *niftiImgValues++; } if(img->dim[5]>=3){ for(int i=0; i<voxelNumber; i++) array_h[i].z= *niftiImgValues++; for(int i=0; i<voxelNumber; i++) array2_h[i].z= *niftiImgValues++; } if(img->dim[5]>=4){ for(int i=0; i<voxelNumber; i++) array_h[i].w= *niftiImgValues++; for(int i=0; i<voxelNumber; i++) array2_h[i].w= *niftiImgValues++; } NR_CUDA_SAFE_CALL(hipMemcpy(*array_d, array_h, img->nx*img->ny*img->nz*sizeof(float4), hipMemcpyHostToDevice)); NR_CUDA_SAFE_CALL(hipMemcpy(*array2_d, array2_h, img->nx*img->ny*img->nz*sizeof(float4), hipMemcpyHostToDevice)); free(array_h); free(array2_h); } else{ // All these else could be removed but the nvcc compiler would warn for unreachable statement switch(img->datatype){ case NIFTI_TYPE_FLOAT32: return cudaCommon_transferNiftiToArrayOnDevice1<DTYPE,float>(array_d, array2_d, img); default: fprintf(stderr, "ERROR:\tcudaCommon_transferNiftiToArrayOnDevice:\n"); fprintf(stderr, "ERROR:\tThe image data type %d is not supported\n",img->datatype); return 1; } } return 0; } template int cudaCommon_transferNiftiToArrayOnDevice<float>(float **,float **, nifti_image *); template int cudaCommon_transferNiftiToArrayOnDevice<float4>(float4 **,float4 **, nifti_image *); // for deformation field /* ******************************** */ /* ******************************** */ template <class DTYPE, class NIFTI_TYPE> int cudaCommon_transferNiftiToArrayOnDevice1(hipArray **cuArray_d, nifti_image *img) { if(sizeof(DTYPE)!=sizeof(NIFTI_TYPE)){ fprintf(stderr, "ERROR:\tcudaCommon_transferNiftiToArrayOnDevice:\n"); fprintf(stderr, "ERROR:\tThe host and device arrays are of different types.\n"); return 1; } else{ NIFTI_TYPE *array_h=static_cast<NIFTI_TYPE *>(img->data); hipMemcpy3DParms copyParams = {0}; copyParams.extent = make_hipExtent(img->dim[1], img->dim[2], img->dim[3]); copyParams.srcPtr = make_hipPitchedPtr((void *) array_h, copyParams.extent.width*sizeof(DTYPE), copyParams.extent.width, copyParams.extent.height); copyParams.dstArray = *cuArray_d; copyParams.kind = hipMemcpyHostToDevice; NR_CUDA_SAFE_CALL(hipMemcpy3D(&copyParams)); } return 0; } /* ******************************** */ template <class DTYPE> int cudaCommon_transferNiftiToArrayOnDevice(hipArray **cuArray_d, nifti_image *img) { if( sizeof(DTYPE)==sizeof(float4) ){ if( (img->datatype!=NIFTI_TYPE_FLOAT32) || (img->dim[5]<2) || (img->dim[4]>1) ){ fprintf(stderr, "ERROR:\tcudaCommon_transferNiftiToDevice:\n"); fprintf(stderr, "ERROR:\tThe specified image is not a single precision deformation field image\n"); return 1; } float *niftiImgValues = static_cast<float *>(img->data); float4 *array_h=(float4 *)calloc(img->nx*img->ny*img->nz,sizeof(float4)); for(int i=0; i<img->nx*img->ny*img->nz; i++) array_h[i].x= *niftiImgValues++; if(img->dim[5]>=2){ for(int i=0; i<img->nx*img->ny*img->nz; i++) array_h[i].y= *niftiImgValues++; } if(img->dim[5]>=3){ for(int i=0; i<img->nx*img->ny*img->nz; i++) array_h[i].z= *niftiImgValues++; } if(img->dim[5]==3){ for(int i=0; i<img->nx*img->ny*img->nz; i++) array_h[i].w= *niftiImgValues++; } hipMemcpy3DParms copyParams = {0}; copyParams.extent = make_hipExtent(img->dim[1], img->dim[2], img->dim[3]); copyParams.srcPtr = make_hipPitchedPtr((void *) array_h, copyParams.extent.width*sizeof(DTYPE), copyParams.extent.width, copyParams.extent.height); copyParams.dstArray = *cuArray_d; copyParams.kind = hipMemcpyHostToDevice; NR_CUDA_SAFE_CALL(hipMemcpy3D(&copyParams)) free(array_h); } else{ // All these else could be removed but the nvcc compiler would warn for unreachable statement switch(img->datatype){ case NIFTI_TYPE_FLOAT32: return cudaCommon_transferNiftiToArrayOnDevice1<DTYPE,float>(cuArray_d, img); default: fprintf(stderr, "ERROR:\tcudaCommon_transferNiftiToArrayOnDevice:\n"); fprintf(stderr, "ERROR:\tThe image data type %d is not supported\n",img->datatype); return 1; } } return 0; } template int cudaCommon_transferNiftiToArrayOnDevice<float>(hipArray **, nifti_image *); template int cudaCommon_transferNiftiToArrayOnDevice<float4>(hipArray **, nifti_image *); // for deformation field /* ******************************** */ /* ******************************** */ template <class DTYPE, class NIFTI_TYPE> int cudaCommon_transferNiftiToArrayOnDevice1(hipArray **cuArray_d, hipArray **cuArray2_d, nifti_image *img) { if(sizeof(DTYPE)!=sizeof(NIFTI_TYPE)){ fprintf(stderr, "ERROR:\tcudaCommon_transferNiftiToArrayOnDevice:\n"); fprintf(stderr, "ERROR:\tThe host and device arrays are of different types.\n"); return 1; } else{ NIFTI_TYPE *array_h = static_cast<NIFTI_TYPE *>(img->data); NIFTI_TYPE *array2_h = &array_h[img->dim[1]*img->dim[2]*img->dim[3]]; hipMemcpy3DParms copyParams = {0}; copyParams.extent = make_hipExtent(img->dim[1], img->dim[2], img->dim[3]); copyParams.kind = hipMemcpyHostToDevice; // First timepoint copyParams.srcPtr = make_hipPitchedPtr((void *) array_h, copyParams.extent.width*sizeof(DTYPE), copyParams.extent.width, copyParams.extent.height); copyParams.dstArray = *cuArray_d; NR_CUDA_SAFE_CALL(hipMemcpy3D(&copyParams)); // Second timepoint copyParams.srcPtr = make_hipPitchedPtr((void *) array2_h, copyParams.extent.width*sizeof(DTYPE), copyParams.extent.width, copyParams.extent.height); copyParams.dstArray = *cuArray2_d; NR_CUDA_SAFE_CALL(hipMemcpy3D(&copyParams)); } return 0; } /* ******************************** */ template <class DTYPE> int cudaCommon_transferNiftiToArrayOnDevice(hipArray **cuArray_d, hipArray **cuArray2_d, nifti_image *img) { if( sizeof(DTYPE)==sizeof(float4) ){ if( (img->datatype!=NIFTI_TYPE_FLOAT32) || (img->dim[5]<2) || (img->dim[4]>1) ){ fprintf(stderr, "ERROR:\tcudaCommon_transferNiftiToDevice:\n"); fprintf(stderr, "ERROR:\tThe specified image is not a single precision deformation field image\n"); return 1; } float *niftiImgValues = static_cast<float *>(img->data); float4 *array_h=(float4 *)calloc(img->nx*img->ny*img->nz,sizeof(float4)); float4 *array2_h=(float4 *)calloc(img->nx*img->ny*img->nz,sizeof(float4)); for(int i=0; i<img->nx*img->ny*img->nz; i++) array_h[i].x= *niftiImgValues++; for(int i=0; i<img->nx*img->ny*img->nz; i++) array2_h[i].x= *niftiImgValues++; if(img->dim[5]>=2){ for(int i=0; i<img->nx*img->ny*img->nz; i++) array_h[i].y= *niftiImgValues++; for(int i=0; i<img->nx*img->ny*img->nz; i++) array2_h[i].y= *niftiImgValues++; } if(img->dim[5]>=3){ for(int i=0; i<img->nx*img->ny*img->nz; i++) array_h[i].z= *niftiImgValues++; for(int i=0; i<img->nx*img->ny*img->nz; i++) array2_h[i].z= *niftiImgValues++; } if(img->dim[5]==3){ for(int i=0; i<img->nx*img->ny*img->nz; i++) array_h[i].w= *niftiImgValues++; for(int i=0; i<img->nx*img->ny*img->nz; i++) array2_h[i].w= *niftiImgValues++; } hipMemcpy3DParms copyParams = {0}; copyParams.extent = make_hipExtent(img->dim[1], img->dim[2], img->dim[3]); copyParams.kind = hipMemcpyHostToDevice; // First timepoint copyParams.srcPtr = make_hipPitchedPtr((void *) array_h, copyParams.extent.width*sizeof(DTYPE), copyParams.extent.width, copyParams.extent.height); copyParams.dstArray = *cuArray_d; NR_CUDA_SAFE_CALL(hipMemcpy3D(&copyParams)); free(array_h); // Second timepoint copyParams.srcPtr = make_hipPitchedPtr((void *) array2_h, copyParams.extent.width*sizeof(DTYPE), copyParams.extent.width, copyParams.extent.height); copyParams.dstArray = *cuArray2_d; NR_CUDA_SAFE_CALL(hipMemcpy3D(&copyParams)); free(array2_h); } else{ // All these else could be removed but the nvcc compiler would warn for unreachable statement switch(img->datatype){ case NIFTI_TYPE_FLOAT32: return cudaCommon_transferNiftiToArrayOnDevice1<DTYPE,float>(cuArray_d, cuArray2_d, img); default: fprintf(stderr, "ERROR:\tcudaCommon_transferNiftiToArrayOnDevice:\n"); fprintf(stderr, "ERROR:\tThe image data type %d is not supported\n",img->datatype); return 1; } } return 0; } template int cudaCommon_transferNiftiToArrayOnDevice<float>(hipArray **, hipArray **, nifti_image *); template int cudaCommon_transferNiftiToArrayOnDevice<float4>(hipArray **, hipArray **, nifti_image *); // for deformation field /* ******************************** */ /* ******************************** */ template <class DTYPE> int cudaCommon_allocateArrayToDevice(hipArray **cuArray_d, int *dim) { const hipExtent volumeSize = make_hipExtent(dim[1], dim[2], dim[3]); hipChannelFormatDesc texDesc = hipCreateChannelDesc<DTYPE>(); NR_CUDA_SAFE_CALL(hipMalloc3DArray(cuArray_d, &texDesc, volumeSize)); return 0; } template int cudaCommon_allocateArrayToDevice<float>(hipArray **, int *); template int cudaCommon_allocateArrayToDevice<float4>(hipArray **, int *); // for deformation field /* ******************************** */ /* ******************************** */ template <class DTYPE> int cudaCommon_allocateArrayToDevice(hipArray **cuArray_d, hipArray **cuArray2_d, int *dim) { const hipExtent volumeSize = make_hipExtent(dim[1], dim[2], dim[3]); hipChannelFormatDesc texDesc = hipCreateChannelDesc<DTYPE>(); NR_CUDA_SAFE_CALL(hipMalloc3DArray(cuArray_d, &texDesc, volumeSize)); NR_CUDA_SAFE_CALL(hipMalloc3DArray(cuArray2_d, &texDesc, volumeSize)); return 0; } template int cudaCommon_allocateArrayToDevice<float>(hipArray **,hipArray **, int *); template int cudaCommon_allocateArrayToDevice<float4>(hipArray **,hipArray **, int *); // for deformation field /* ******************************** */ /* ******************************** */ template <class DTYPE> int cudaCommon_allocateArrayToDevice(DTYPE **array_d, int *dim) { const unsigned int memSize = dim[1] * dim[2] * dim[3] * sizeof(DTYPE); NR_CUDA_SAFE_CALL(hipMalloc(array_d, memSize)); return 0; } template int cudaCommon_allocateArrayToDevice<float>(float **, int *); template int cudaCommon_allocateArrayToDevice<float4>(float4 **, int *); // for deformation field /* ******************************** */ /* ******************************** */ template <class DTYPE> int cudaCommon_allocateArrayToDevice(DTYPE **array_d, DTYPE **array2_d, int *dim) { const unsigned int memSize = dim[1] * dim[2] * dim[3] * sizeof(DTYPE); NR_CUDA_SAFE_CALL(hipMalloc(array_d, memSize)); NR_CUDA_SAFE_CALL(hipMalloc(array2_d, memSize)); return 0; } template int cudaCommon_allocateArrayToDevice<float>(float **, float **, int *); template int cudaCommon_allocateArrayToDevice<float4>(float4 **, float4 **, int *); // for deformation field /* ******************************** */ /* ******************************** */ template <class DTYPE, class NIFTI_TYPE> int cudaCommon_transferFromDeviceToNifti1(nifti_image *img, DTYPE **array_d) { if(sizeof(DTYPE)!=sizeof(NIFTI_TYPE)){ fprintf(stderr, "ERROR:\tcudaCommon_transferFromDeviceToNifti:\n"); fprintf(stderr, "ERROR:\tThe host and device arrays are of different types.\n"); return 1; } else{ NIFTI_TYPE *array_h=static_cast<NIFTI_TYPE *>(img->data); NR_CUDA_SAFE_CALL(hipMemcpy((void *)array_h, (void *)*array_d, img->nvox*sizeof(DTYPE), hipMemcpyDeviceToHost)); } return 0; } /* ******************************** */ template <class DTYPE> int cudaCommon_transferFromDeviceToNifti(nifti_image *img, DTYPE **array_d) { if(sizeof(DTYPE)==sizeof(float4)){ // A nifti 5D volume is expected if(img->dim[0]<5 || img->dim[4]>1 || img->dim[5]<2 || img->datatype!=NIFTI_TYPE_FLOAT32){ fprintf(stderr, "ERROR:\tcudaCommon_transferFromDeviceToNifti:\n"); fprintf(stderr, "ERROR:\tThe nifti image is not a 5D volume.\n"); return 1; } const int voxelNumber = img->nx*img->ny*img->nz; float4 *array_h; NR_CUDA_SAFE_CALL(hipHostMalloc(&array_h, voxelNumber*sizeof(float4))); NR_CUDA_SAFE_CALL(hipMemcpy((void *)array_h, (const void *)*array_d, voxelNumber*sizeof(float4), hipMemcpyDeviceToHost)); float *niftiImgValues = static_cast<float *>(img->data); for(int i=0; i<voxelNumber; i++) *niftiImgValues++ = array_h[i].x; if(img->dim[5]>=2){ for(int i=0; i<voxelNumber; i++) *niftiImgValues++ = array_h[i].y; } if(img->dim[5]>=3){ for(int i=0; i<voxelNumber; i++) *niftiImgValues++ = array_h[i].z; } if(img->dim[5]>=4){ for(int i=0; i<voxelNumber; i++) *niftiImgValues++ = array_h[i].w; } NR_CUDA_SAFE_CALL(hipHostFree(array_h)); return 0; } else{ switch(img->datatype){ case NIFTI_TYPE_FLOAT32: return cudaCommon_transferFromDeviceToNifti1<DTYPE,float>(img, array_d); default: fprintf(stderr, "ERROR:\tcudaCommon_transferFromDeviceToNifti:\n"); fprintf(stderr, "ERROR:\tThe image data type %d is not supported\n",img->datatype); return 1; } } } template int cudaCommon_transferFromDeviceToNifti<float>(nifti_image *, float **); template int cudaCommon_transferFromDeviceToNifti<float4>(nifti_image *, float4 **); // for deformation field /* ******************************** */ /* ******************************** */ template <class DTYPE, class NIFTI_TYPE> int cudaCommon_transferFromDeviceToNifti1(nifti_image *img, DTYPE **array_d, DTYPE **array2_d) { if(sizeof(DTYPE)!=sizeof(NIFTI_TYPE)){ fprintf(stderr, "ERROR:\tcudaCommon_transferFromDeviceToNifti:\n"); fprintf(stderr, "ERROR:\tThe host and device arrays are of different types.\n"); return 1; } else{ unsigned int voxelNumber=img->nx*img->ny*img->nz; NIFTI_TYPE *array_h=static_cast<NIFTI_TYPE *>(img->data); NIFTI_TYPE *array2_h=&array_h[voxelNumber]; NR_CUDA_SAFE_CALL(hipMemcpy((void *)array_h, (void *)*array_d, voxelNumber*sizeof(DTYPE), hipMemcpyDeviceToHost)); NR_CUDA_SAFE_CALL(hipMemcpy((void *)array2_h, (void *)*array2_d, voxelNumber*sizeof(DTYPE), hipMemcpyDeviceToHost)); } return 0; } /* ******************************** */ template <class DTYPE> int cudaCommon_transferFromDeviceToNifti(nifti_image *img, DTYPE **array_d, DTYPE **array2_d) { if(sizeof(DTYPE)==sizeof(float4)){ // A nifti 5D volume is expected if(img->dim[0]<5 || img->dim[4]>1 || img->dim[5]<2 || img->datatype!=NIFTI_TYPE_FLOAT32){ fprintf(stderr, "ERROR:\tcudaCommon_transferFromDeviceToNifti:\n"); fprintf(stderr, "ERROR:\tThe nifti image is not a 5D volume.\n"); return 1; } const int voxelNumber = img->nx*img->ny*img->nz; float4 *array_h=NULL; float4 *array2_h=NULL; NR_CUDA_SAFE_CALL(hipHostMalloc(&array_h, voxelNumber*sizeof(float4))); NR_CUDA_SAFE_CALL(hipHostMalloc(&array2_h, voxelNumber*sizeof(float4))); NR_CUDA_SAFE_CALL(hipMemcpy((void *)array_h, (const void *)*array_d, voxelNumber*sizeof(float4), hipMemcpyDeviceToHost)); NR_CUDA_SAFE_CALL(hipMemcpy((void *)array2_h, (const void *)*array2_d, voxelNumber*sizeof(float4), hipMemcpyDeviceToHost)); float *niftiImgValues = static_cast<float *>(img->data); for(int i=0; i<voxelNumber; i++){ *niftiImgValues++ = array_h[i].x; } for(int i=0; i<voxelNumber; i++){ *niftiImgValues++ = array2_h[i].x; } if(img->dim[5]>=2){ for(int i=0; i<voxelNumber; i++){ *niftiImgValues++ = array_h[i].y; } for(int i=0; i<voxelNumber; i++){ *niftiImgValues++ = array2_h[i].y; } } if(img->dim[5]>=3){ for(int i=0; i<voxelNumber; i++){ *niftiImgValues++ = array_h[i].z; } for(int i=0; i<voxelNumber; i++){ *niftiImgValues++ = array2_h[i].z; } } if(img->dim[5]>=4){ for(int i=0; i<voxelNumber; i++){ *niftiImgValues++ = array_h[i].w; } for(int i=0; i<voxelNumber; i++){ *niftiImgValues++ = array2_h[i].w; } } NR_CUDA_SAFE_CALL(hipHostFree(array_h)); NR_CUDA_SAFE_CALL(hipHostFree(array2_h)); return 0; } else{ switch(img->datatype){ case NIFTI_TYPE_FLOAT32: return cudaCommon_transferFromDeviceToNifti1<DTYPE,float>(img, array_d, array2_d); default: fprintf(stderr, "ERROR:\tcudaCommon_transferFromDeviceToNifti:\n"); fprintf(stderr, "ERROR:\tThe image data type %d is not supported\n",img->datatype); return 1; } } } template int cudaCommon_transferFromDeviceToNifti<float>(nifti_image *, float **, float **); template int cudaCommon_transferFromDeviceToNifti<float4>(nifti_image *, float4 **, float4 **); // for deformation field /* ******************************** */ /* ******************************** */ void cudaCommon_free(hipArray **cuArray_d){ NR_CUDA_SAFE_CALL(hipFreeArray(*cuArray_d)); return; } /* ******************************** */ /* ******************************** */ template <class DTYPE> void cudaCommon_free(DTYPE **array_d){ NR_CUDA_SAFE_CALL(hipFree(*array_d)); return; } template void cudaCommon_free<int>(int **); template void cudaCommon_free<float>(float **); template void cudaCommon_free<float4>(float4 **); /* ******************************** */ /* ******************************** */ #endif
d68ad00cd17be121e5adaa18efe5daa864958590.cu
/* * _reg_cudaCommon.cpp * * * Created by Marc Modat on 25/03/2009. * Copyright (c) 2009, University College London. All rights reserved. * Centre for Medical Image Computing (CMIC) * See the LICENSE.txt file in the nifty_reg root folder * */ #ifndef _REG_CUDACOMMON_CPP #define _REG_CUDACOMMON_CPP #include "_reg_cudaCommon.h" /* ******************************** */ /* ******************************** */ template <class DTYPE, class NIFTI_TYPE> int cudaCommon_transferNiftiToArrayOnDevice1(DTYPE **array_d, nifti_image *img) { if(sizeof(DTYPE)!=sizeof(NIFTI_TYPE)){ fprintf(stderr, "ERROR:\tcudaCommon_transferNiftiToArrayOnDevice:\n"); fprintf(stderr, "ERROR:\tThe host and device arrays are of different types.\n"); return 1; } else{ const unsigned int memSize = img->dim[1] * img->dim[2] * img->dim[3] * sizeof(DTYPE); NIFTI_TYPE *array_h=static_cast<NIFTI_TYPE *>(img->data); NR_CUDA_SAFE_CALL(cudaMemcpy(*array_d, array_h, memSize, cudaMemcpyHostToDevice)); } return 0; } /* ******************************** */ template <class DTYPE> int cudaCommon_transferNiftiToArrayOnDevice(DTYPE **array_d, nifti_image *img) { if( sizeof(DTYPE)==sizeof(float4) ){ if( (img->datatype!=NIFTI_TYPE_FLOAT32) || (img->dim[5]<2) || (img->dim[4]>1)){ fprintf(stderr, "ERROR:\tcudaCommon_transferNiftiToDevice:\n"); fprintf(stderr, "ERROR:\tThe specified image is not a single precision deformation field image\n"); return 1; } float *niftiImgValues = static_cast<float *>(img->data); float4 *array_h=(float4 *)calloc(img->nx*img->ny*img->nz,sizeof(float4)); const int voxelNumber = img->nx*img->ny*img->nz; for(int i=0; i<voxelNumber; i++) array_h[i].x= *niftiImgValues++; if(img->dim[5]>=2){ for(int i=0; i<voxelNumber; i++) array_h[i].y= *niftiImgValues++; } if(img->dim[5]>=3){ for(int i=0; i<voxelNumber; i++) array_h[i].z= *niftiImgValues++; } if(img->dim[5]>=4){ for(int i=0; i<voxelNumber; i++) array_h[i].w= *niftiImgValues++; } NR_CUDA_SAFE_CALL(cudaMemcpy(*array_d, array_h, img->nx*img->ny*img->nz*sizeof(float4), cudaMemcpyHostToDevice)); free(array_h); } else{ // All these else could be removed but the nvcc compiler would warn for unreachable statement switch(img->datatype){ case NIFTI_TYPE_FLOAT32: return cudaCommon_transferNiftiToArrayOnDevice1<DTYPE,float>(array_d, img); default: fprintf(stderr, "ERROR:\tcudaCommon_transferNiftiToArrayOnDevice:\n"); fprintf(stderr, "ERROR:\tThe image data type %d is not supported\n",img->datatype); return 1; } } return 0; } template int cudaCommon_transferNiftiToArrayOnDevice<float>(float **, nifti_image *); template int cudaCommon_transferNiftiToArrayOnDevice<float4>(float4 **, nifti_image *); /* ******************************** */ template <class DTYPE, class NIFTI_TYPE> int cudaCommon_transferNiftiToArrayOnDevice1(DTYPE **array_d, DTYPE **array2_d, nifti_image *img) { if(sizeof(DTYPE)!=sizeof(NIFTI_TYPE)){ fprintf(stderr, "ERROR:\tcudaCommon_transferNiftiToArrayOnDevice:\n"); fprintf(stderr, "ERROR:\tThe host and device arrays are of different types.\n"); return 1; } else{ const unsigned int memSize = img->dim[1] * img->dim[2] * img->dim[3] * sizeof(DTYPE); NIFTI_TYPE *array_h=static_cast<NIFTI_TYPE *>(img->data); NIFTI_TYPE *array2_h=&array_h[img->dim[1] * img->dim[2] * img->dim[3]]; NR_CUDA_SAFE_CALL(cudaMemcpy(*array_d, array_h, memSize, cudaMemcpyHostToDevice)); NR_CUDA_SAFE_CALL(cudaMemcpy(*array2_d, array2_h, memSize, cudaMemcpyHostToDevice)); } return 0; } /* ******************************** */ template <class DTYPE> int cudaCommon_transferNiftiToArrayOnDevice(DTYPE **array_d, DTYPE **array2_d, nifti_image *img) { if( sizeof(DTYPE)==sizeof(float4) ){ if( (img->datatype!=NIFTI_TYPE_FLOAT32) || (img->dim[5]<2) || (img->dim[4]>1)){ fprintf(stderr, "ERROR:\tcudaCommon_transferNiftiToDevice:\n"); fprintf(stderr, "ERROR:\tThe specified image is not a single precision deformation field image\n"); return 1; } float *niftiImgValues = static_cast<float *>(img->data); float4 *array_h=(float4 *)calloc(img->nx*img->ny*img->nz,sizeof(float4)); float4 *array2_h=(float4 *)calloc(img->nx*img->ny*img->nz,sizeof(float4)); const int voxelNumber = img->nx*img->ny*img->nz; for(int i=0; i<voxelNumber; i++) array_h[i].x= *niftiImgValues++; for(int i=0; i<voxelNumber; i++) array2_h[i].x= *niftiImgValues++; if(img->dim[5]>=2){ for(int i=0; i<voxelNumber; i++) array_h[i].y= *niftiImgValues++; for(int i=0; i<voxelNumber; i++) array2_h[i].y= *niftiImgValues++; } if(img->dim[5]>=3){ for(int i=0; i<voxelNumber; i++) array_h[i].z= *niftiImgValues++; for(int i=0; i<voxelNumber; i++) array2_h[i].z= *niftiImgValues++; } if(img->dim[5]>=4){ for(int i=0; i<voxelNumber; i++) array_h[i].w= *niftiImgValues++; for(int i=0; i<voxelNumber; i++) array2_h[i].w= *niftiImgValues++; } NR_CUDA_SAFE_CALL(cudaMemcpy(*array_d, array_h, img->nx*img->ny*img->nz*sizeof(float4), cudaMemcpyHostToDevice)); NR_CUDA_SAFE_CALL(cudaMemcpy(*array2_d, array2_h, img->nx*img->ny*img->nz*sizeof(float4), cudaMemcpyHostToDevice)); free(array_h); free(array2_h); } else{ // All these else could be removed but the nvcc compiler would warn for unreachable statement switch(img->datatype){ case NIFTI_TYPE_FLOAT32: return cudaCommon_transferNiftiToArrayOnDevice1<DTYPE,float>(array_d, array2_d, img); default: fprintf(stderr, "ERROR:\tcudaCommon_transferNiftiToArrayOnDevice:\n"); fprintf(stderr, "ERROR:\tThe image data type %d is not supported\n",img->datatype); return 1; } } return 0; } template int cudaCommon_transferNiftiToArrayOnDevice<float>(float **,float **, nifti_image *); template int cudaCommon_transferNiftiToArrayOnDevice<float4>(float4 **,float4 **, nifti_image *); // for deformation field /* ******************************** */ /* ******************************** */ template <class DTYPE, class NIFTI_TYPE> int cudaCommon_transferNiftiToArrayOnDevice1(cudaArray **cuArray_d, nifti_image *img) { if(sizeof(DTYPE)!=sizeof(NIFTI_TYPE)){ fprintf(stderr, "ERROR:\tcudaCommon_transferNiftiToArrayOnDevice:\n"); fprintf(stderr, "ERROR:\tThe host and device arrays are of different types.\n"); return 1; } else{ NIFTI_TYPE *array_h=static_cast<NIFTI_TYPE *>(img->data); cudaMemcpy3DParms copyParams = {0}; copyParams.extent = make_cudaExtent(img->dim[1], img->dim[2], img->dim[3]); copyParams.srcPtr = make_cudaPitchedPtr((void *) array_h, copyParams.extent.width*sizeof(DTYPE), copyParams.extent.width, copyParams.extent.height); copyParams.dstArray = *cuArray_d; copyParams.kind = cudaMemcpyHostToDevice; NR_CUDA_SAFE_CALL(cudaMemcpy3D(&copyParams)); } return 0; } /* ******************************** */ template <class DTYPE> int cudaCommon_transferNiftiToArrayOnDevice(cudaArray **cuArray_d, nifti_image *img) { if( sizeof(DTYPE)==sizeof(float4) ){ if( (img->datatype!=NIFTI_TYPE_FLOAT32) || (img->dim[5]<2) || (img->dim[4]>1) ){ fprintf(stderr, "ERROR:\tcudaCommon_transferNiftiToDevice:\n"); fprintf(stderr, "ERROR:\tThe specified image is not a single precision deformation field image\n"); return 1; } float *niftiImgValues = static_cast<float *>(img->data); float4 *array_h=(float4 *)calloc(img->nx*img->ny*img->nz,sizeof(float4)); for(int i=0; i<img->nx*img->ny*img->nz; i++) array_h[i].x= *niftiImgValues++; if(img->dim[5]>=2){ for(int i=0; i<img->nx*img->ny*img->nz; i++) array_h[i].y= *niftiImgValues++; } if(img->dim[5]>=3){ for(int i=0; i<img->nx*img->ny*img->nz; i++) array_h[i].z= *niftiImgValues++; } if(img->dim[5]==3){ for(int i=0; i<img->nx*img->ny*img->nz; i++) array_h[i].w= *niftiImgValues++; } cudaMemcpy3DParms copyParams = {0}; copyParams.extent = make_cudaExtent(img->dim[1], img->dim[2], img->dim[3]); copyParams.srcPtr = make_cudaPitchedPtr((void *) array_h, copyParams.extent.width*sizeof(DTYPE), copyParams.extent.width, copyParams.extent.height); copyParams.dstArray = *cuArray_d; copyParams.kind = cudaMemcpyHostToDevice; NR_CUDA_SAFE_CALL(cudaMemcpy3D(&copyParams)) free(array_h); } else{ // All these else could be removed but the nvcc compiler would warn for unreachable statement switch(img->datatype){ case NIFTI_TYPE_FLOAT32: return cudaCommon_transferNiftiToArrayOnDevice1<DTYPE,float>(cuArray_d, img); default: fprintf(stderr, "ERROR:\tcudaCommon_transferNiftiToArrayOnDevice:\n"); fprintf(stderr, "ERROR:\tThe image data type %d is not supported\n",img->datatype); return 1; } } return 0; } template int cudaCommon_transferNiftiToArrayOnDevice<float>(cudaArray **, nifti_image *); template int cudaCommon_transferNiftiToArrayOnDevice<float4>(cudaArray **, nifti_image *); // for deformation field /* ******************************** */ /* ******************************** */ template <class DTYPE, class NIFTI_TYPE> int cudaCommon_transferNiftiToArrayOnDevice1(cudaArray **cuArray_d, cudaArray **cuArray2_d, nifti_image *img) { if(sizeof(DTYPE)!=sizeof(NIFTI_TYPE)){ fprintf(stderr, "ERROR:\tcudaCommon_transferNiftiToArrayOnDevice:\n"); fprintf(stderr, "ERROR:\tThe host and device arrays are of different types.\n"); return 1; } else{ NIFTI_TYPE *array_h = static_cast<NIFTI_TYPE *>(img->data); NIFTI_TYPE *array2_h = &array_h[img->dim[1]*img->dim[2]*img->dim[3]]; cudaMemcpy3DParms copyParams = {0}; copyParams.extent = make_cudaExtent(img->dim[1], img->dim[2], img->dim[3]); copyParams.kind = cudaMemcpyHostToDevice; // First timepoint copyParams.srcPtr = make_cudaPitchedPtr((void *) array_h, copyParams.extent.width*sizeof(DTYPE), copyParams.extent.width, copyParams.extent.height); copyParams.dstArray = *cuArray_d; NR_CUDA_SAFE_CALL(cudaMemcpy3D(&copyParams)); // Second timepoint copyParams.srcPtr = make_cudaPitchedPtr((void *) array2_h, copyParams.extent.width*sizeof(DTYPE), copyParams.extent.width, copyParams.extent.height); copyParams.dstArray = *cuArray2_d; NR_CUDA_SAFE_CALL(cudaMemcpy3D(&copyParams)); } return 0; } /* ******************************** */ template <class DTYPE> int cudaCommon_transferNiftiToArrayOnDevice(cudaArray **cuArray_d, cudaArray **cuArray2_d, nifti_image *img) { if( sizeof(DTYPE)==sizeof(float4) ){ if( (img->datatype!=NIFTI_TYPE_FLOAT32) || (img->dim[5]<2) || (img->dim[4]>1) ){ fprintf(stderr, "ERROR:\tcudaCommon_transferNiftiToDevice:\n"); fprintf(stderr, "ERROR:\tThe specified image is not a single precision deformation field image\n"); return 1; } float *niftiImgValues = static_cast<float *>(img->data); float4 *array_h=(float4 *)calloc(img->nx*img->ny*img->nz,sizeof(float4)); float4 *array2_h=(float4 *)calloc(img->nx*img->ny*img->nz,sizeof(float4)); for(int i=0; i<img->nx*img->ny*img->nz; i++) array_h[i].x= *niftiImgValues++; for(int i=0; i<img->nx*img->ny*img->nz; i++) array2_h[i].x= *niftiImgValues++; if(img->dim[5]>=2){ for(int i=0; i<img->nx*img->ny*img->nz; i++) array_h[i].y= *niftiImgValues++; for(int i=0; i<img->nx*img->ny*img->nz; i++) array2_h[i].y= *niftiImgValues++; } if(img->dim[5]>=3){ for(int i=0; i<img->nx*img->ny*img->nz; i++) array_h[i].z= *niftiImgValues++; for(int i=0; i<img->nx*img->ny*img->nz; i++) array2_h[i].z= *niftiImgValues++; } if(img->dim[5]==3){ for(int i=0; i<img->nx*img->ny*img->nz; i++) array_h[i].w= *niftiImgValues++; for(int i=0; i<img->nx*img->ny*img->nz; i++) array2_h[i].w= *niftiImgValues++; } cudaMemcpy3DParms copyParams = {0}; copyParams.extent = make_cudaExtent(img->dim[1], img->dim[2], img->dim[3]); copyParams.kind = cudaMemcpyHostToDevice; // First timepoint copyParams.srcPtr = make_cudaPitchedPtr((void *) array_h, copyParams.extent.width*sizeof(DTYPE), copyParams.extent.width, copyParams.extent.height); copyParams.dstArray = *cuArray_d; NR_CUDA_SAFE_CALL(cudaMemcpy3D(&copyParams)); free(array_h); // Second timepoint copyParams.srcPtr = make_cudaPitchedPtr((void *) array2_h, copyParams.extent.width*sizeof(DTYPE), copyParams.extent.width, copyParams.extent.height); copyParams.dstArray = *cuArray2_d; NR_CUDA_SAFE_CALL(cudaMemcpy3D(&copyParams)); free(array2_h); } else{ // All these else could be removed but the nvcc compiler would warn for unreachable statement switch(img->datatype){ case NIFTI_TYPE_FLOAT32: return cudaCommon_transferNiftiToArrayOnDevice1<DTYPE,float>(cuArray_d, cuArray2_d, img); default: fprintf(stderr, "ERROR:\tcudaCommon_transferNiftiToArrayOnDevice:\n"); fprintf(stderr, "ERROR:\tThe image data type %d is not supported\n",img->datatype); return 1; } } return 0; } template int cudaCommon_transferNiftiToArrayOnDevice<float>(cudaArray **, cudaArray **, nifti_image *); template int cudaCommon_transferNiftiToArrayOnDevice<float4>(cudaArray **, cudaArray **, nifti_image *); // for deformation field /* ******************************** */ /* ******************************** */ template <class DTYPE> int cudaCommon_allocateArrayToDevice(cudaArray **cuArray_d, int *dim) { const cudaExtent volumeSize = make_cudaExtent(dim[1], dim[2], dim[3]); cudaChannelFormatDesc texDesc = cudaCreateChannelDesc<DTYPE>(); NR_CUDA_SAFE_CALL(cudaMalloc3DArray(cuArray_d, &texDesc, volumeSize)); return 0; } template int cudaCommon_allocateArrayToDevice<float>(cudaArray **, int *); template int cudaCommon_allocateArrayToDevice<float4>(cudaArray **, int *); // for deformation field /* ******************************** */ /* ******************************** */ template <class DTYPE> int cudaCommon_allocateArrayToDevice(cudaArray **cuArray_d, cudaArray **cuArray2_d, int *dim) { const cudaExtent volumeSize = make_cudaExtent(dim[1], dim[2], dim[3]); cudaChannelFormatDesc texDesc = cudaCreateChannelDesc<DTYPE>(); NR_CUDA_SAFE_CALL(cudaMalloc3DArray(cuArray_d, &texDesc, volumeSize)); NR_CUDA_SAFE_CALL(cudaMalloc3DArray(cuArray2_d, &texDesc, volumeSize)); return 0; } template int cudaCommon_allocateArrayToDevice<float>(cudaArray **,cudaArray **, int *); template int cudaCommon_allocateArrayToDevice<float4>(cudaArray **,cudaArray **, int *); // for deformation field /* ******************************** */ /* ******************************** */ template <class DTYPE> int cudaCommon_allocateArrayToDevice(DTYPE **array_d, int *dim) { const unsigned int memSize = dim[1] * dim[2] * dim[3] * sizeof(DTYPE); NR_CUDA_SAFE_CALL(cudaMalloc(array_d, memSize)); return 0; } template int cudaCommon_allocateArrayToDevice<float>(float **, int *); template int cudaCommon_allocateArrayToDevice<float4>(float4 **, int *); // for deformation field /* ******************************** */ /* ******************************** */ template <class DTYPE> int cudaCommon_allocateArrayToDevice(DTYPE **array_d, DTYPE **array2_d, int *dim) { const unsigned int memSize = dim[1] * dim[2] * dim[3] * sizeof(DTYPE); NR_CUDA_SAFE_CALL(cudaMalloc(array_d, memSize)); NR_CUDA_SAFE_CALL(cudaMalloc(array2_d, memSize)); return 0; } template int cudaCommon_allocateArrayToDevice<float>(float **, float **, int *); template int cudaCommon_allocateArrayToDevice<float4>(float4 **, float4 **, int *); // for deformation field /* ******************************** */ /* ******************************** */ template <class DTYPE, class NIFTI_TYPE> int cudaCommon_transferFromDeviceToNifti1(nifti_image *img, DTYPE **array_d) { if(sizeof(DTYPE)!=sizeof(NIFTI_TYPE)){ fprintf(stderr, "ERROR:\tcudaCommon_transferFromDeviceToNifti:\n"); fprintf(stderr, "ERROR:\tThe host and device arrays are of different types.\n"); return 1; } else{ NIFTI_TYPE *array_h=static_cast<NIFTI_TYPE *>(img->data); NR_CUDA_SAFE_CALL(cudaMemcpy((void *)array_h, (void *)*array_d, img->nvox*sizeof(DTYPE), cudaMemcpyDeviceToHost)); } return 0; } /* ******************************** */ template <class DTYPE> int cudaCommon_transferFromDeviceToNifti(nifti_image *img, DTYPE **array_d) { if(sizeof(DTYPE)==sizeof(float4)){ // A nifti 5D volume is expected if(img->dim[0]<5 || img->dim[4]>1 || img->dim[5]<2 || img->datatype!=NIFTI_TYPE_FLOAT32){ fprintf(stderr, "ERROR:\tcudaCommon_transferFromDeviceToNifti:\n"); fprintf(stderr, "ERROR:\tThe nifti image is not a 5D volume.\n"); return 1; } const int voxelNumber = img->nx*img->ny*img->nz; float4 *array_h; NR_CUDA_SAFE_CALL(cudaMallocHost(&array_h, voxelNumber*sizeof(float4))); NR_CUDA_SAFE_CALL(cudaMemcpy((void *)array_h, (const void *)*array_d, voxelNumber*sizeof(float4), cudaMemcpyDeviceToHost)); float *niftiImgValues = static_cast<float *>(img->data); for(int i=0; i<voxelNumber; i++) *niftiImgValues++ = array_h[i].x; if(img->dim[5]>=2){ for(int i=0; i<voxelNumber; i++) *niftiImgValues++ = array_h[i].y; } if(img->dim[5]>=3){ for(int i=0; i<voxelNumber; i++) *niftiImgValues++ = array_h[i].z; } if(img->dim[5]>=4){ for(int i=0; i<voxelNumber; i++) *niftiImgValues++ = array_h[i].w; } NR_CUDA_SAFE_CALL(cudaFreeHost(array_h)); return 0; } else{ switch(img->datatype){ case NIFTI_TYPE_FLOAT32: return cudaCommon_transferFromDeviceToNifti1<DTYPE,float>(img, array_d); default: fprintf(stderr, "ERROR:\tcudaCommon_transferFromDeviceToNifti:\n"); fprintf(stderr, "ERROR:\tThe image data type %d is not supported\n",img->datatype); return 1; } } } template int cudaCommon_transferFromDeviceToNifti<float>(nifti_image *, float **); template int cudaCommon_transferFromDeviceToNifti<float4>(nifti_image *, float4 **); // for deformation field /* ******************************** */ /* ******************************** */ template <class DTYPE, class NIFTI_TYPE> int cudaCommon_transferFromDeviceToNifti1(nifti_image *img, DTYPE **array_d, DTYPE **array2_d) { if(sizeof(DTYPE)!=sizeof(NIFTI_TYPE)){ fprintf(stderr, "ERROR:\tcudaCommon_transferFromDeviceToNifti:\n"); fprintf(stderr, "ERROR:\tThe host and device arrays are of different types.\n"); return 1; } else{ unsigned int voxelNumber=img->nx*img->ny*img->nz; NIFTI_TYPE *array_h=static_cast<NIFTI_TYPE *>(img->data); NIFTI_TYPE *array2_h=&array_h[voxelNumber]; NR_CUDA_SAFE_CALL(cudaMemcpy((void *)array_h, (void *)*array_d, voxelNumber*sizeof(DTYPE), cudaMemcpyDeviceToHost)); NR_CUDA_SAFE_CALL(cudaMemcpy((void *)array2_h, (void *)*array2_d, voxelNumber*sizeof(DTYPE), cudaMemcpyDeviceToHost)); } return 0; } /* ******************************** */ template <class DTYPE> int cudaCommon_transferFromDeviceToNifti(nifti_image *img, DTYPE **array_d, DTYPE **array2_d) { if(sizeof(DTYPE)==sizeof(float4)){ // A nifti 5D volume is expected if(img->dim[0]<5 || img->dim[4]>1 || img->dim[5]<2 || img->datatype!=NIFTI_TYPE_FLOAT32){ fprintf(stderr, "ERROR:\tcudaCommon_transferFromDeviceToNifti:\n"); fprintf(stderr, "ERROR:\tThe nifti image is not a 5D volume.\n"); return 1; } const int voxelNumber = img->nx*img->ny*img->nz; float4 *array_h=NULL; float4 *array2_h=NULL; NR_CUDA_SAFE_CALL(cudaMallocHost(&array_h, voxelNumber*sizeof(float4))); NR_CUDA_SAFE_CALL(cudaMallocHost(&array2_h, voxelNumber*sizeof(float4))); NR_CUDA_SAFE_CALL(cudaMemcpy((void *)array_h, (const void *)*array_d, voxelNumber*sizeof(float4), cudaMemcpyDeviceToHost)); NR_CUDA_SAFE_CALL(cudaMemcpy((void *)array2_h, (const void *)*array2_d, voxelNumber*sizeof(float4), cudaMemcpyDeviceToHost)); float *niftiImgValues = static_cast<float *>(img->data); for(int i=0; i<voxelNumber; i++){ *niftiImgValues++ = array_h[i].x; } for(int i=0; i<voxelNumber; i++){ *niftiImgValues++ = array2_h[i].x; } if(img->dim[5]>=2){ for(int i=0; i<voxelNumber; i++){ *niftiImgValues++ = array_h[i].y; } for(int i=0; i<voxelNumber; i++){ *niftiImgValues++ = array2_h[i].y; } } if(img->dim[5]>=3){ for(int i=0; i<voxelNumber; i++){ *niftiImgValues++ = array_h[i].z; } for(int i=0; i<voxelNumber; i++){ *niftiImgValues++ = array2_h[i].z; } } if(img->dim[5]>=4){ for(int i=0; i<voxelNumber; i++){ *niftiImgValues++ = array_h[i].w; } for(int i=0; i<voxelNumber; i++){ *niftiImgValues++ = array2_h[i].w; } } NR_CUDA_SAFE_CALL(cudaFreeHost(array_h)); NR_CUDA_SAFE_CALL(cudaFreeHost(array2_h)); return 0; } else{ switch(img->datatype){ case NIFTI_TYPE_FLOAT32: return cudaCommon_transferFromDeviceToNifti1<DTYPE,float>(img, array_d, array2_d); default: fprintf(stderr, "ERROR:\tcudaCommon_transferFromDeviceToNifti:\n"); fprintf(stderr, "ERROR:\tThe image data type %d is not supported\n",img->datatype); return 1; } } } template int cudaCommon_transferFromDeviceToNifti<float>(nifti_image *, float **, float **); template int cudaCommon_transferFromDeviceToNifti<float4>(nifti_image *, float4 **, float4 **); // for deformation field /* ******************************** */ /* ******************************** */ void cudaCommon_free(cudaArray **cuArray_d){ NR_CUDA_SAFE_CALL(cudaFreeArray(*cuArray_d)); return; } /* ******************************** */ /* ******************************** */ template <class DTYPE> void cudaCommon_free(DTYPE **array_d){ NR_CUDA_SAFE_CALL(cudaFree(*array_d)); return; } template void cudaCommon_free<int>(int **); template void cudaCommon_free<float>(float **); template void cudaCommon_free<float4>(float4 **); /* ******************************** */ /* ******************************** */ #endif
d5136b12d504c626be834043e671f213390572ab.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <itpp/itbase.h> #include "../libs/cpp/itpp_ext_math.cpp" #include <math.h> #include <tclap/CmdLine.h> #include <hip/device_functions.h> #include <hip/hip_runtime.h> #include "tools.cpp" #include "../libs/cpp/spinchain.cpp" #include "hip_functions.hip" #include "hip_utils.hip" #include "ev_routines.cu" int main(int argc,char* argv[]) { hipSetDevice(0); itpp::RNG_randomize(); int nqubits=3; int l=pow(2,nqubits); itpp::cvec state = itppextmath::RandomState(l); itpp::cvec cstate(l); for(int i=0;i<l;i++) { cstate(i)=state(i); } double ising=1.0; itpp::vec b(3); b(0)=0.0; b(1)=0; b(2)=0; double *dev_R,*dev_I; evcuda::itpp2cuda(state,&dev_R,&dev_I); evcuda::apply_floquet(state,dev_R,dev_I,ising,b); for(int i=0;i<nqubits;i++) { spinchain::apply_ising_z(cstate,ising,i,(i+1)%nqubits); } for(int i=0;i<nqubits;i++) { spinchain::apply_magnetic_kick(cstate,b,i); } evcuda::cuda2itpp(state,dev_R,dev_I); cout << state<<endl; cout << cstate; }
d5136b12d504c626be834043e671f213390572ab.cu
#include <iostream> #include <itpp/itbase.h> #include "../libs/cpp/itpp_ext_math.cpp" #include <math.h> #include <tclap/CmdLine.h> #include <device_functions.h> #include <cuda.h> #include "tools.cpp" #include "../libs/cpp/spinchain.cpp" #include "cuda_functions.cu" #include "cuda_utils.cu" #include "ev_routines.cu" int main(int argc,char* argv[]) { cudaSetDevice(0); itpp::RNG_randomize(); int nqubits=3; int l=pow(2,nqubits); itpp::cvec state = itppextmath::RandomState(l); itpp::cvec cstate(l); for(int i=0;i<l;i++) { cstate(i)=state(i); } double ising=1.0; itpp::vec b(3); b(0)=0.0; b(1)=0; b(2)=0; double *dev_R,*dev_I; evcuda::itpp2cuda(state,&dev_R,&dev_I); evcuda::apply_floquet(state,dev_R,dev_I,ising,b); for(int i=0;i<nqubits;i++) { spinchain::apply_ising_z(cstate,ising,i,(i+1)%nqubits); } for(int i=0;i<nqubits;i++) { spinchain::apply_magnetic_kick(cstate,b,i); } evcuda::cuda2itpp(state,dev_R,dev_I); cout << state<<endl; cout << cstate; }
0cde876aba16c0586401d5d6a93ccc714ddff137.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - **\ * @authors Aaron Oziel, Sean Blackbourn * * Fumitaka Kawasaki (5/3/14): * All functions were completed and working. Therefore, the followng comments * were removed. * * Aaron Wrote (2/3/14): * All comments are now tracking progress in conversion from old GpuSim_struct.cu * file to the new one here. This is a quick key to keep track of their meanings. * * TODO = Needs work and/or is blank. Used to indicate possibly problematic * functions. * DONE = Likely complete functions. Will still need to be checked for * variable continuity and proper arguments. * REMOVED = Deleted, likely due to it becoming unnecessary or not necessary * for GPU implementation. These functions will likely have to be * removed from the Model super class. * COPIED = These functions were in the original GpuSim_struct.cu file * and were directly copy-pasted across to this file. * \** - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - **/ #include "GPUSpikingModel.h" #ifdef PERFORMANCE_METRICS float g_time; hipEvent_t start, stop; #endif // PERFORMANCE_METRICS __constant__ int d_debug_mask[1]; // ---------------------------------------------------------------------------- GPUSpikingModel::GPUSpikingModel(Connections *conns, IAllNeurons *neurons, IAllSynapses *synapses, Layout *layout) : Model::Model(conns, neurons, synapses, layout), synapseIndexMapDevice(NULL), randNoise_d(NULL), m_allNeuronsDevice(NULL), m_allSynapsesDevice(NULL) { } GPUSpikingModel::~GPUSpikingModel() { //Let Model base class handle de-allocation } /* * Allocates and initializes memories on CUDA device. * * @param[out] allNeuronsDevice Memory loation of the pointer to the neurons list on device memory. * @param[out] allSynapsesDevice Memory loation of the pointer to the synapses list on device memory. * @param[in] sim_info Pointer to the simulation information. */ void GPUSpikingModel::allocDeviceStruct(void** allNeuronsDevice, void** allSynapsesDevice, SimulationInfo *sim_info) { // Allocate Neurons and Synapses strucs on GPU device memory m_neurons->allocNeuronDeviceStruct( allNeuronsDevice, sim_info ); m_synapses->allocSynapseDeviceStruct( allSynapsesDevice, sim_info ); // Allocate memory for random noise array int neuron_count = sim_info->totalNeurons; BGSIZE randNoise_d_size = neuron_count * sizeof (float); // size of random noise array HANDLE_ERROR( hipMalloc ( ( void ** ) &randNoise_d, randNoise_d_size ) ); // Copy host neuron and synapse arrays into GPU device m_neurons->copyNeuronHostToDevice( *allNeuronsDevice, sim_info ); m_synapses->copySynapseHostToDevice( *allSynapsesDevice, sim_info ); // allocate synapse inverse map in device memory allocSynapseImap( neuron_count ); } /* * Copies device memories to host memories and deallocaes them. * * @param[out] allNeuronsDevice Memory loation of the pointer to the neurons list on device memory. * @param[out] allSynapsesDevice Memory loation of the pointer to the synapses list on device memory. * @param[in] sim_info Pointer to the simulation information. */ void GPUSpikingModel::deleteDeviceStruct(void** allNeuronsDevice, void** allSynapsesDevice, SimulationInfo *sim_info) { // copy device synapse and neuron structs to host memory m_neurons->copyNeuronDeviceToHost( *allNeuronsDevice, sim_info ); // Deallocate device memory m_neurons->deleteNeuronDeviceStruct( *allNeuronsDevice, sim_info ); // copy device synapse and neuron structs to host memory m_synapses->copySynapseDeviceToHost( *allSynapsesDevice, sim_info ); // Deallocate device memory m_synapses->deleteSynapseDeviceStruct( *allSynapsesDevice ); deleteSynapseImap(); HANDLE_ERROR( hipFree( randNoise_d ) ); } /* * Sets up the Simulation. * * @param sim_info SimulationInfo class to read information from. */ void GPUSpikingModel::setupSim(SimulationInfo *sim_info) { // Set device ID HANDLE_ERROR( hipSetDevice( g_deviceId ) ); // Set DEBUG flag HANDLE_ERROR( hipMemcpyToSymbol (d_debug_mask, &g_debug_mask, sizeof(int) ) ); Model::setupSim(sim_info); //initialize Mersenne Twister //assuming neuron_count >= 100 and is a multiple of 100. Note rng_mt_rng_count must be <= MT_RNG_COUNT int rng_blocks = 25; //# of blocks the kernel will use int rng_nPerRng = 4; //# of iterations per thread (thread granularity, # of rands generated per thread) int rng_mt_rng_count = sim_info->totalNeurons/rng_nPerRng; //# of threads to generate for neuron_count rand #s int rng_threads = rng_mt_rng_count/rng_blocks; //# threads per block needed initMTGPU(sim_info->seed, rng_blocks, rng_threads, rng_nPerRng, rng_mt_rng_count); #ifdef PERFORMANCE_METRICS hipEventCreate( &start ); hipEventCreate( &stop ); t_gpu_rndGeneration = 0.0f; t_gpu_advanceNeurons = 0.0f; t_gpu_advanceSynapses = 0.0f; t_gpu_calcSummation = 0.0f; #endif // PERFORMANCE_METRICS // allocates memories on CUDA device allocDeviceStruct((void **)&m_allNeuronsDevice, (void **)&m_allSynapsesDevice, sim_info); // set device summation points int neuron_count = sim_info->totalNeurons; const int threadsPerBlock = 256; int blocksPerGrid = ( neuron_count + threadsPerBlock - 1 ) / threadsPerBlock; hipLaunchKernelGGL(( setSynapseSummationPointDevice) , dim3(blocksPerGrid), dim3(threadsPerBlock) , 0, 0, neuron_count, m_allNeuronsDevice, m_allSynapsesDevice, sim_info->maxSynapsesPerNeuron, sim_info->width); // copy inverse map to the device memory copySynapseIndexMapHostToDevice(*m_synapseIndexMap, sim_info->totalNeurons); // set some parameters used for advanceNeuronsDevice m_neurons->setAdvanceNeuronsDeviceParams(*m_synapses); // set some parameters used for advanceSynapsesDevice m_synapses->setAdvanceSynapsesDeviceParams(); } /* * Begin terminating the simulator. * * @param sim_info SimulationInfo to refer. */ void GPUSpikingModel::cleanupSim(SimulationInfo *sim_info) { // deallocates memories on CUDA device deleteDeviceStruct((void**)&m_allNeuronsDevice, (void**)&m_allSynapsesDevice, sim_info); #ifdef PERFORMANCE_METRICS hipEventDestroy( start ); hipEventDestroy( stop ); #endif // PERFORMANCE_METRICS } /* * Loads the simulation based on istream input. * * @param input istream to read from. * @param sim_info used as a reference to set info for neurons and synapses. */ void GPUSpikingModel::deserialize(istream& input, const SimulationInfo *sim_info) { Model::deserialize(input, sim_info); // copy inverse map to the device memory copySynapseIndexMapHostToDevice(*m_synapseIndexMap, sim_info->totalNeurons); // Reinitialize device struct - Copy host neuron and synapse arrays into GPU device m_neurons->copyNeuronHostToDevice( m_allNeuronsDevice, sim_info ); m_synapses->copySynapseHostToDevice( m_allSynapsesDevice, sim_info ); // set summation points int neuron_count = sim_info->totalNeurons; const int threadsPerBlock = 256; int blocksPerGrid = ( neuron_count + threadsPerBlock - 1 ) / threadsPerBlock; hipLaunchKernelGGL(( setSynapseSummationPointDevice) , dim3(blocksPerGrid), dim3(threadsPerBlock) , 0, 0, neuron_count, m_allNeuronsDevice, m_allSynapsesDevice, sim_info->maxSynapsesPerNeuron, sim_info->width); } /* * Advance everything in the model one time step. In this case, that * means calling all of the kernels that do the "micro step" updating * (i.e., NOT the stuff associated with growth). * * @param sim_info SimulationInfo class to read information from. */ void GPUSpikingModel::advance(const SimulationInfo *sim_info) { #ifdef PERFORMANCE_METRICS startTimer(); #endif // PERFORMANCE_METRICS normalMTGPU(randNoise_d); #ifdef PERFORMANCE_METRICS lapTime(t_gpu_rndGeneration); startTimer(); #endif // PERFORMANCE_METRICS // display running info to console // Advance neurons -------------> m_neurons->advanceNeurons(*m_synapses, m_allNeuronsDevice, m_allSynapsesDevice, sim_info, randNoise_d, synapseIndexMapDevice); #ifdef PERFORMANCE_METRICS lapTime(t_gpu_advanceNeurons); startTimer(); #endif // PERFORMANCE_METRICS // Advance synapses -------------> m_synapses->advanceSynapses(m_allSynapsesDevice, m_allNeuronsDevice, synapseIndexMapDevice, sim_info); #ifdef PERFORMANCE_METRICS lapTime(t_gpu_advanceSynapses); startTimer(); #endif // PERFORMANCE_METRICS // calculate summation point calcSummationMap(sim_info); #ifdef PERFORMANCE_METRICS lapTime(t_gpu_calcSummation); #endif // PERFORMANCE_METRICS } /* * Add psr of all incoming synapses to summation points. * * @param[in] sim_info Pointer to the simulation information. */ void GPUSpikingModel::calcSummationMap(const SimulationInfo *sim_info) { // CUDA parameters const int threadsPerBlock = 256; int blocksPerGrid = ( sim_info->totalNeurons + threadsPerBlock - 1 ) / threadsPerBlock; hipLaunchKernelGGL(( calcSummationMapDevice) , dim3(blocksPerGrid), dim3(threadsPerBlock) , 0, 0, sim_info->totalNeurons, synapseIndexMapDevice, m_allSynapsesDevice ); } /* * Update the connection of all the Neurons and Synapses of the simulation. * * @param sim_info SimulationInfo class to read information from. */ void GPUSpikingModel::updateConnections(const SimulationInfo *sim_info) { dynamic_cast<AllSpikingNeurons*>(m_neurons)->copyNeuronDeviceSpikeCountsToHost(m_allNeuronsDevice, sim_info); dynamic_cast<AllSpikingNeurons*>(m_neurons)->copyNeuronDeviceSpikeHistoryToHost(m_allNeuronsDevice, sim_info); // Update Connections data if (m_conns->updateConnections(*m_neurons, sim_info, m_layout)) { m_conns->updateSynapsesWeights(sim_info->totalNeurons, *m_neurons, *m_synapses, sim_info, m_allNeuronsDevice, m_allSynapsesDevice, m_layout); // create synapse inverse map m_synapses->createSynapseImap(m_synapseIndexMap, sim_info); // copy inverse map to the device memory copySynapseIndexMapHostToDevice(*m_synapseIndexMap, sim_info->totalNeurons); } } /* * Update the Neuron's history. * * @param sim_info SimulationInfo to refer from. */ void GPUSpikingModel::updateHistory(const SimulationInfo *sim_info) { Model::updateHistory(sim_info); // clear spike count dynamic_cast<AllSpikingNeurons*>(m_neurons)->clearNeuronSpikeCounts(m_allNeuronsDevice, sim_info); } /* ------------------*\ |* # Helper Functions \* ------------------*/ /* * Allocate device memory for synapse inverse map. * @param count The number of neurons. */ void GPUSpikingModel::allocSynapseImap( int count ) { SynapseIndexMap synapseIndexMap; HANDLE_ERROR( hipMalloc( ( void ** ) &synapseIndexMap.incomingSynapse_begin, count * sizeof( int ) ) ); HANDLE_ERROR( hipMalloc( ( void ** ) &synapseIndexMap.synapseCount, count * sizeof( int ) ) ); HANDLE_ERROR( hipMemset(synapseIndexMap.incomingSynapse_begin, 0, count * sizeof( int ) ) ); HANDLE_ERROR( hipMemset(synapseIndexMap.synapseCount, 0, count * sizeof( int ) ) ); HANDLE_ERROR( hipMalloc( ( void ** ) &synapseIndexMapDevice, sizeof( SynapseIndexMap ) ) ); HANDLE_ERROR( hipMemcpy( synapseIndexMapDevice, &synapseIndexMap, sizeof( SynapseIndexMap ), hipMemcpyHostToDevice ) ); } /* * Deallocate device memory for synapse inverse map. */ void GPUSpikingModel::deleteSynapseImap( ) { SynapseIndexMap synapseIndexMap; HANDLE_ERROR( hipMemcpy ( &synapseIndexMap, synapseIndexMapDevice, sizeof( SynapseIndexMap ), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipFree( synapseIndexMap.incomingSynapse_begin ) ); HANDLE_ERROR( hipFree( synapseIndexMap.synapseCount ) ); HANDLE_ERROR( hipFree( synapseIndexMap.inverseIndex ) ); HANDLE_ERROR( hipFree( synapseIndexMap.activeSynapseIndex ) ); HANDLE_ERROR( hipFree( synapseIndexMapDevice ) ); } /* * Copy SynapseIndexMap in host memory to SynapseIndexMap in device memory. * * @param synapseIndexMapHost Reference to the SynapseIndexMap in host memory. * @param neuron_count The number of neurons. */ void GPUSpikingModel::copySynapseIndexMapHostToDevice(SynapseIndexMap &synapseIndexMapHost, int neuron_count) { int total_synapse_counts = dynamic_cast<AllSynapses*>(m_synapses)->total_synapse_counts; if (total_synapse_counts == 0) return; SynapseIndexMap synapseIndexMap; HANDLE_ERROR( hipMemcpy ( &synapseIndexMap, synapseIndexMapDevice, sizeof( SynapseIndexMap ), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy ( synapseIndexMap.incomingSynapse_begin, synapseIndexMapHost.incomingSynapse_begin, neuron_count * sizeof( int ), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy ( synapseIndexMap.synapseCount, synapseIndexMapHost.synapseCount, neuron_count * sizeof( int ), hipMemcpyHostToDevice ) ); // the number of synapses may change, so we reallocate the memory if (synapseIndexMap.inverseIndex != NULL) { HANDLE_ERROR( hipFree( synapseIndexMap.inverseIndex ) ); } HANDLE_ERROR( hipMalloc( ( void ** ) &synapseIndexMap.inverseIndex, total_synapse_counts * sizeof( BGSIZE ) ) ); HANDLE_ERROR( hipMemcpy ( synapseIndexMap.inverseIndex, synapseIndexMapHost.inverseIndex, total_synapse_counts * sizeof( BGSIZE ), hipMemcpyHostToDevice ) ); if (synapseIndexMap.activeSynapseIndex != NULL) { HANDLE_ERROR( hipFree( synapseIndexMap.activeSynapseIndex ) ); } HANDLE_ERROR( hipMalloc( ( void ** ) &synapseIndexMap.activeSynapseIndex, total_synapse_counts * sizeof( BGSIZE ) ) ); HANDLE_ERROR( hipMemcpy ( synapseIndexMap.activeSynapseIndex, synapseIndexMapHost.activeSynapseIndex, total_synapse_counts * sizeof( BGSIZE ), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy ( synapseIndexMapDevice, &synapseIndexMap, sizeof( SynapseIndexMap ), hipMemcpyHostToDevice ) ); } /* ------------------*\ |* # Global Functions \* ------------------*/ /* * Set the summation points in device memory * * @param[in] num_neurons Number of neurons. * @param[in] allNeuronsDevice Pointer to the Neuron structures in device memory. * @param[in] allSynapsesDevice Pointer to the Synapse structures in device memory. * @param[in] max_synapses Maximum number of synapses per neuron. * @param[in] width Width of neuron map (assumes square). */ __global__ void setSynapseSummationPointDevice(int num_neurons, AllSpikingNeuronsDeviceProperties* allNeuronsDevice, AllSpikingSynapsesDeviceProperties* allSynapsesDevice, int max_synapses, int width) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if ( idx >= num_neurons ) return; int src_neuron = idx; int n_inUse = 0; for (int syn_index = 0; n_inUse < allSynapsesDevice->synapse_counts[src_neuron]; syn_index++) { if (allSynapsesDevice->in_use[max_synapses * src_neuron + syn_index] == true) { int dest_neuron = allSynapsesDevice->destNeuronIndex[max_synapses * src_neuron + syn_index]; allSynapsesDevice->summationPoint[max_synapses * src_neuron + syn_index] = &( allNeuronsDevice->summation_map[dest_neuron] ); n_inUse++; } } } /* * @param[in] totalNeurons Number of neurons. * @param[in] synapseIndexMap Inverse map, which is a table indexed by an input neuron and maps to the synapses that provide input to that neuron. * @param[in] allSynapsesDevice Pointer to Synapse structures in device memory. */ __global__ void calcSummationMapDevice( int totalNeurons, SynapseIndexMap* synapseIndexMapDevice, AllSpikingSynapsesDeviceProperties* allSynapsesDevice ) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if ( idx >= totalNeurons ) return; BGSIZE iCount = synapseIndexMapDevice->synapseCount[idx]; if (iCount != 0) { int beginIndex = synapseIndexMapDevice->incomingSynapse_begin[idx]; BGSIZE* inverseMap_begin = &( synapseIndexMapDevice->inverseIndex[beginIndex] ); BGFLOAT sum = 0.0; BGSIZE syn_i = inverseMap_begin[0]; BGFLOAT &summationPoint = *( allSynapsesDevice->summationPoint[syn_i] ); for ( BGSIZE i = 0; i < iCount; i++ ) { syn_i = inverseMap_begin[i]; sum += allSynapsesDevice->psr[syn_i]; } summationPoint = sum; } }
0cde876aba16c0586401d5d6a93ccc714ddff137.cu
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - **\ * @authors Aaron Oziel, Sean Blackbourn * * Fumitaka Kawasaki (5/3/14): * All functions were completed and working. Therefore, the followng comments * were removed. * * Aaron Wrote (2/3/14): * All comments are now tracking progress in conversion from old GpuSim_struct.cu * file to the new one here. This is a quick key to keep track of their meanings. * * TODO = Needs work and/or is blank. Used to indicate possibly problematic * functions. * DONE = Likely complete functions. Will still need to be checked for * variable continuity and proper arguments. * REMOVED = Deleted, likely due to it becoming unnecessary or not necessary * for GPU implementation. These functions will likely have to be * removed from the Model super class. * COPIED = These functions were in the original GpuSim_struct.cu file * and were directly copy-pasted across to this file. * \** - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - **/ #include "GPUSpikingModel.h" #ifdef PERFORMANCE_METRICS float g_time; cudaEvent_t start, stop; #endif // PERFORMANCE_METRICS __constant__ int d_debug_mask[1]; // ---------------------------------------------------------------------------- GPUSpikingModel::GPUSpikingModel(Connections *conns, IAllNeurons *neurons, IAllSynapses *synapses, Layout *layout) : Model::Model(conns, neurons, synapses, layout), synapseIndexMapDevice(NULL), randNoise_d(NULL), m_allNeuronsDevice(NULL), m_allSynapsesDevice(NULL) { } GPUSpikingModel::~GPUSpikingModel() { //Let Model base class handle de-allocation } /* * Allocates and initializes memories on CUDA device. * * @param[out] allNeuronsDevice Memory loation of the pointer to the neurons list on device memory. * @param[out] allSynapsesDevice Memory loation of the pointer to the synapses list on device memory. * @param[in] sim_info Pointer to the simulation information. */ void GPUSpikingModel::allocDeviceStruct(void** allNeuronsDevice, void** allSynapsesDevice, SimulationInfo *sim_info) { // Allocate Neurons and Synapses strucs on GPU device memory m_neurons->allocNeuronDeviceStruct( allNeuronsDevice, sim_info ); m_synapses->allocSynapseDeviceStruct( allSynapsesDevice, sim_info ); // Allocate memory for random noise array int neuron_count = sim_info->totalNeurons; BGSIZE randNoise_d_size = neuron_count * sizeof (float); // size of random noise array HANDLE_ERROR( cudaMalloc ( ( void ** ) &randNoise_d, randNoise_d_size ) ); // Copy host neuron and synapse arrays into GPU device m_neurons->copyNeuronHostToDevice( *allNeuronsDevice, sim_info ); m_synapses->copySynapseHostToDevice( *allSynapsesDevice, sim_info ); // allocate synapse inverse map in device memory allocSynapseImap( neuron_count ); } /* * Copies device memories to host memories and deallocaes them. * * @param[out] allNeuronsDevice Memory loation of the pointer to the neurons list on device memory. * @param[out] allSynapsesDevice Memory loation of the pointer to the synapses list on device memory. * @param[in] sim_info Pointer to the simulation information. */ void GPUSpikingModel::deleteDeviceStruct(void** allNeuronsDevice, void** allSynapsesDevice, SimulationInfo *sim_info) { // copy device synapse and neuron structs to host memory m_neurons->copyNeuronDeviceToHost( *allNeuronsDevice, sim_info ); // Deallocate device memory m_neurons->deleteNeuronDeviceStruct( *allNeuronsDevice, sim_info ); // copy device synapse and neuron structs to host memory m_synapses->copySynapseDeviceToHost( *allSynapsesDevice, sim_info ); // Deallocate device memory m_synapses->deleteSynapseDeviceStruct( *allSynapsesDevice ); deleteSynapseImap(); HANDLE_ERROR( cudaFree( randNoise_d ) ); } /* * Sets up the Simulation. * * @param sim_info SimulationInfo class to read information from. */ void GPUSpikingModel::setupSim(SimulationInfo *sim_info) { // Set device ID HANDLE_ERROR( cudaSetDevice( g_deviceId ) ); // Set DEBUG flag HANDLE_ERROR( cudaMemcpyToSymbol (d_debug_mask, &g_debug_mask, sizeof(int) ) ); Model::setupSim(sim_info); //initialize Mersenne Twister //assuming neuron_count >= 100 and is a multiple of 100. Note rng_mt_rng_count must be <= MT_RNG_COUNT int rng_blocks = 25; //# of blocks the kernel will use int rng_nPerRng = 4; //# of iterations per thread (thread granularity, # of rands generated per thread) int rng_mt_rng_count = sim_info->totalNeurons/rng_nPerRng; //# of threads to generate for neuron_count rand #s int rng_threads = rng_mt_rng_count/rng_blocks; //# threads per block needed initMTGPU(sim_info->seed, rng_blocks, rng_threads, rng_nPerRng, rng_mt_rng_count); #ifdef PERFORMANCE_METRICS cudaEventCreate( &start ); cudaEventCreate( &stop ); t_gpu_rndGeneration = 0.0f; t_gpu_advanceNeurons = 0.0f; t_gpu_advanceSynapses = 0.0f; t_gpu_calcSummation = 0.0f; #endif // PERFORMANCE_METRICS // allocates memories on CUDA device allocDeviceStruct((void **)&m_allNeuronsDevice, (void **)&m_allSynapsesDevice, sim_info); // set device summation points int neuron_count = sim_info->totalNeurons; const int threadsPerBlock = 256; int blocksPerGrid = ( neuron_count + threadsPerBlock - 1 ) / threadsPerBlock; setSynapseSummationPointDevice <<< blocksPerGrid, threadsPerBlock >>> (neuron_count, m_allNeuronsDevice, m_allSynapsesDevice, sim_info->maxSynapsesPerNeuron, sim_info->width); // copy inverse map to the device memory copySynapseIndexMapHostToDevice(*m_synapseIndexMap, sim_info->totalNeurons); // set some parameters used for advanceNeuronsDevice m_neurons->setAdvanceNeuronsDeviceParams(*m_synapses); // set some parameters used for advanceSynapsesDevice m_synapses->setAdvanceSynapsesDeviceParams(); } /* * Begin terminating the simulator. * * @param sim_info SimulationInfo to refer. */ void GPUSpikingModel::cleanupSim(SimulationInfo *sim_info) { // deallocates memories on CUDA device deleteDeviceStruct((void**)&m_allNeuronsDevice, (void**)&m_allSynapsesDevice, sim_info); #ifdef PERFORMANCE_METRICS cudaEventDestroy( start ); cudaEventDestroy( stop ); #endif // PERFORMANCE_METRICS } /* * Loads the simulation based on istream input. * * @param input istream to read from. * @param sim_info used as a reference to set info for neurons and synapses. */ void GPUSpikingModel::deserialize(istream& input, const SimulationInfo *sim_info) { Model::deserialize(input, sim_info); // copy inverse map to the device memory copySynapseIndexMapHostToDevice(*m_synapseIndexMap, sim_info->totalNeurons); // Reinitialize device struct - Copy host neuron and synapse arrays into GPU device m_neurons->copyNeuronHostToDevice( m_allNeuronsDevice, sim_info ); m_synapses->copySynapseHostToDevice( m_allSynapsesDevice, sim_info ); // set summation points int neuron_count = sim_info->totalNeurons; const int threadsPerBlock = 256; int blocksPerGrid = ( neuron_count + threadsPerBlock - 1 ) / threadsPerBlock; setSynapseSummationPointDevice <<< blocksPerGrid, threadsPerBlock >>> (neuron_count, m_allNeuronsDevice, m_allSynapsesDevice, sim_info->maxSynapsesPerNeuron, sim_info->width); } /* * Advance everything in the model one time step. In this case, that * means calling all of the kernels that do the "micro step" updating * (i.e., NOT the stuff associated with growth). * * @param sim_info SimulationInfo class to read information from. */ void GPUSpikingModel::advance(const SimulationInfo *sim_info) { #ifdef PERFORMANCE_METRICS startTimer(); #endif // PERFORMANCE_METRICS normalMTGPU(randNoise_d); #ifdef PERFORMANCE_METRICS lapTime(t_gpu_rndGeneration); startTimer(); #endif // PERFORMANCE_METRICS // display running info to console // Advance neurons -------------> m_neurons->advanceNeurons(*m_synapses, m_allNeuronsDevice, m_allSynapsesDevice, sim_info, randNoise_d, synapseIndexMapDevice); #ifdef PERFORMANCE_METRICS lapTime(t_gpu_advanceNeurons); startTimer(); #endif // PERFORMANCE_METRICS // Advance synapses -------------> m_synapses->advanceSynapses(m_allSynapsesDevice, m_allNeuronsDevice, synapseIndexMapDevice, sim_info); #ifdef PERFORMANCE_METRICS lapTime(t_gpu_advanceSynapses); startTimer(); #endif // PERFORMANCE_METRICS // calculate summation point calcSummationMap(sim_info); #ifdef PERFORMANCE_METRICS lapTime(t_gpu_calcSummation); #endif // PERFORMANCE_METRICS } /* * Add psr of all incoming synapses to summation points. * * @param[in] sim_info Pointer to the simulation information. */ void GPUSpikingModel::calcSummationMap(const SimulationInfo *sim_info) { // CUDA parameters const int threadsPerBlock = 256; int blocksPerGrid = ( sim_info->totalNeurons + threadsPerBlock - 1 ) / threadsPerBlock; calcSummationMapDevice <<< blocksPerGrid, threadsPerBlock >>> ( sim_info->totalNeurons, synapseIndexMapDevice, m_allSynapsesDevice ); } /* * Update the connection of all the Neurons and Synapses of the simulation. * * @param sim_info SimulationInfo class to read information from. */ void GPUSpikingModel::updateConnections(const SimulationInfo *sim_info) { dynamic_cast<AllSpikingNeurons*>(m_neurons)->copyNeuronDeviceSpikeCountsToHost(m_allNeuronsDevice, sim_info); dynamic_cast<AllSpikingNeurons*>(m_neurons)->copyNeuronDeviceSpikeHistoryToHost(m_allNeuronsDevice, sim_info); // Update Connections data if (m_conns->updateConnections(*m_neurons, sim_info, m_layout)) { m_conns->updateSynapsesWeights(sim_info->totalNeurons, *m_neurons, *m_synapses, sim_info, m_allNeuronsDevice, m_allSynapsesDevice, m_layout); // create synapse inverse map m_synapses->createSynapseImap(m_synapseIndexMap, sim_info); // copy inverse map to the device memory copySynapseIndexMapHostToDevice(*m_synapseIndexMap, sim_info->totalNeurons); } } /* * Update the Neuron's history. * * @param sim_info SimulationInfo to refer from. */ void GPUSpikingModel::updateHistory(const SimulationInfo *sim_info) { Model::updateHistory(sim_info); // clear spike count dynamic_cast<AllSpikingNeurons*>(m_neurons)->clearNeuronSpikeCounts(m_allNeuronsDevice, sim_info); } /* ------------------*\ |* # Helper Functions \* ------------------*/ /* * Allocate device memory for synapse inverse map. * @param count The number of neurons. */ void GPUSpikingModel::allocSynapseImap( int count ) { SynapseIndexMap synapseIndexMap; HANDLE_ERROR( cudaMalloc( ( void ** ) &synapseIndexMap.incomingSynapse_begin, count * sizeof( int ) ) ); HANDLE_ERROR( cudaMalloc( ( void ** ) &synapseIndexMap.synapseCount, count * sizeof( int ) ) ); HANDLE_ERROR( cudaMemset(synapseIndexMap.incomingSynapse_begin, 0, count * sizeof( int ) ) ); HANDLE_ERROR( cudaMemset(synapseIndexMap.synapseCount, 0, count * sizeof( int ) ) ); HANDLE_ERROR( cudaMalloc( ( void ** ) &synapseIndexMapDevice, sizeof( SynapseIndexMap ) ) ); HANDLE_ERROR( cudaMemcpy( synapseIndexMapDevice, &synapseIndexMap, sizeof( SynapseIndexMap ), cudaMemcpyHostToDevice ) ); } /* * Deallocate device memory for synapse inverse map. */ void GPUSpikingModel::deleteSynapseImap( ) { SynapseIndexMap synapseIndexMap; HANDLE_ERROR( cudaMemcpy ( &synapseIndexMap, synapseIndexMapDevice, sizeof( SynapseIndexMap ), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaFree( synapseIndexMap.incomingSynapse_begin ) ); HANDLE_ERROR( cudaFree( synapseIndexMap.synapseCount ) ); HANDLE_ERROR( cudaFree( synapseIndexMap.inverseIndex ) ); HANDLE_ERROR( cudaFree( synapseIndexMap.activeSynapseIndex ) ); HANDLE_ERROR( cudaFree( synapseIndexMapDevice ) ); } /* * Copy SynapseIndexMap in host memory to SynapseIndexMap in device memory. * * @param synapseIndexMapHost Reference to the SynapseIndexMap in host memory. * @param neuron_count The number of neurons. */ void GPUSpikingModel::copySynapseIndexMapHostToDevice(SynapseIndexMap &synapseIndexMapHost, int neuron_count) { int total_synapse_counts = dynamic_cast<AllSynapses*>(m_synapses)->total_synapse_counts; if (total_synapse_counts == 0) return; SynapseIndexMap synapseIndexMap; HANDLE_ERROR( cudaMemcpy ( &synapseIndexMap, synapseIndexMapDevice, sizeof( SynapseIndexMap ), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy ( synapseIndexMap.incomingSynapse_begin, synapseIndexMapHost.incomingSynapse_begin, neuron_count * sizeof( int ), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy ( synapseIndexMap.synapseCount, synapseIndexMapHost.synapseCount, neuron_count * sizeof( int ), cudaMemcpyHostToDevice ) ); // the number of synapses may change, so we reallocate the memory if (synapseIndexMap.inverseIndex != NULL) { HANDLE_ERROR( cudaFree( synapseIndexMap.inverseIndex ) ); } HANDLE_ERROR( cudaMalloc( ( void ** ) &synapseIndexMap.inverseIndex, total_synapse_counts * sizeof( BGSIZE ) ) ); HANDLE_ERROR( cudaMemcpy ( synapseIndexMap.inverseIndex, synapseIndexMapHost.inverseIndex, total_synapse_counts * sizeof( BGSIZE ), cudaMemcpyHostToDevice ) ); if (synapseIndexMap.activeSynapseIndex != NULL) { HANDLE_ERROR( cudaFree( synapseIndexMap.activeSynapseIndex ) ); } HANDLE_ERROR( cudaMalloc( ( void ** ) &synapseIndexMap.activeSynapseIndex, total_synapse_counts * sizeof( BGSIZE ) ) ); HANDLE_ERROR( cudaMemcpy ( synapseIndexMap.activeSynapseIndex, synapseIndexMapHost.activeSynapseIndex, total_synapse_counts * sizeof( BGSIZE ), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy ( synapseIndexMapDevice, &synapseIndexMap, sizeof( SynapseIndexMap ), cudaMemcpyHostToDevice ) ); } /* ------------------*\ |* # Global Functions \* ------------------*/ /* * Set the summation points in device memory * * @param[in] num_neurons Number of neurons. * @param[in] allNeuronsDevice Pointer to the Neuron structures in device memory. * @param[in] allSynapsesDevice Pointer to the Synapse structures in device memory. * @param[in] max_synapses Maximum number of synapses per neuron. * @param[in] width Width of neuron map (assumes square). */ __global__ void setSynapseSummationPointDevice(int num_neurons, AllSpikingNeuronsDeviceProperties* allNeuronsDevice, AllSpikingSynapsesDeviceProperties* allSynapsesDevice, int max_synapses, int width) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if ( idx >= num_neurons ) return; int src_neuron = idx; int n_inUse = 0; for (int syn_index = 0; n_inUse < allSynapsesDevice->synapse_counts[src_neuron]; syn_index++) { if (allSynapsesDevice->in_use[max_synapses * src_neuron + syn_index] == true) { int dest_neuron = allSynapsesDevice->destNeuronIndex[max_synapses * src_neuron + syn_index]; allSynapsesDevice->summationPoint[max_synapses * src_neuron + syn_index] = &( allNeuronsDevice->summation_map[dest_neuron] ); n_inUse++; } } } /* * @param[in] totalNeurons Number of neurons. * @param[in] synapseIndexMap Inverse map, which is a table indexed by an input neuron and maps to the synapses that provide input to that neuron. * @param[in] allSynapsesDevice Pointer to Synapse structures in device memory. */ __global__ void calcSummationMapDevice( int totalNeurons, SynapseIndexMap* synapseIndexMapDevice, AllSpikingSynapsesDeviceProperties* allSynapsesDevice ) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if ( idx >= totalNeurons ) return; BGSIZE iCount = synapseIndexMapDevice->synapseCount[idx]; if (iCount != 0) { int beginIndex = synapseIndexMapDevice->incomingSynapse_begin[idx]; BGSIZE* inverseMap_begin = &( synapseIndexMapDevice->inverseIndex[beginIndex] ); BGFLOAT sum = 0.0; BGSIZE syn_i = inverseMap_begin[0]; BGFLOAT &summationPoint = *( allSynapsesDevice->summationPoint[syn_i] ); for ( BGSIZE i = 0; i < iCount; i++ ) { syn_i = inverseMap_begin[i]; sum += allSynapsesDevice->psr[syn_i]; } summationPoint = sum; } }
c02e3176d7d5293bd952dedfed96f157de375d48.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @author Mark Gates @author Azzam Haidar @author Ichitaro Yamazaki @generated from magmablas/zlacpy_sym_out.cu, normal z -> d, Thu Oct 8 23:05:33 2020 */ #include "magma_internal.h" #define BLK_X 64 #define BLK_Y 32 /******************************************************************************/ /* Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks. Each block has BLK_X threads. Each thread loops across one row, updating BLK_Y entries. Code similar to dlaset, dlacpy, dlag2s, clag2z, dgeadd. */ static __device__ void dlacpy_sym_out_full_device( int m, int n, const double *dA, int ldda, double *dB, int lddb ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column */ bool full = (iby + BLK_Y <= n); /* do only rows inside matrix */ if ( ind < m ) { dA += ind + iby*ldda; dB += ind + iby*lddb; if ( full ) { // full block-column #pragma unroll for( int j=0; j < BLK_Y; ++j ) { dB[j*lddb] = dA[j*ldda]; } } else { // partial block-column for( int j=0; j < BLK_Y && iby+j < n; ++j ) { dB[j*lddb] = dA[j*ldda]; } } } } /******************************************************************************/ /* Similar to dlacpy_full, but updates only the diagonal and below. Blocks that are fully above the diagonal exit immediately. Code similar to dlaset, dlacpy, zlat2c, clat2z. */ static __device__ void dlacpy_sym_out_lower_device( int m, int n, magma_int_t *rows, magma_int_t *perm, const double *dA, int ldda, double *dB, int lddb ) { int ind = blockIdx.x*BLK_X + threadIdx.x; // row int iby = blockIdx.y*BLK_Y; // col /* check if full block-column && (below diag) */ bool full = (iby + BLK_Y <= n); for (int jj=0; jj < n; jj++) { perm[rows[2*jj+1]] = rows[2*jj+1]; } /* do only rows inside matrix, and blocks not above diag */ if ( ind < m ) { if ( full ) { // full block-column, off-diagonal block #pragma unroll for( int jj=0; jj < BLK_Y; ++jj ) { int j = rows[2*(iby+jj)+1]; if (ind <= j) dB[j + ind*ldda] = MAGMA_D_CONJ( dA[ind + (iby+jj)*lddb] ); else dB[ind + j*ldda] = dA[ind + (iby+jj)*lddb]; } } else { // either partial block-column or diagonal block for( int jj=0; jj < BLK_Y && iby+jj < n; ++jj ) { int j = rows[2*(iby+jj)+1]; if (ind <= j) dB[j + ind*ldda] = MAGMA_D_CONJ( dA[ind + (iby+jj)*lddb] ); else dB[ind + j*ldda] = dA[ind + (iby+jj)*lddb]; } } } } /******************************************************************************/ /* Similar to dlacpy_full, but updates only the diagonal and above. Blocks that are fully below the diagonal exit immediately. Code similar to dlaset, dlacpy, zlat2c, clat2z. */ static __device__ void dlacpy_sym_out_upper_device( int m, int n, const double *dA, int ldda, double *dB, int lddb ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (above diag) */ bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby)); /* do only rows inside matrix, and blocks not below diag */ if ( ind < m && ind < iby + BLK_Y ) { dA += ind + iby*ldda; dB += ind + iby*lddb; if ( full ) { // full block-column, off-diagonal block #pragma unroll for( int j=0; j < BLK_Y; ++j ) { dB[j*lddb] = dA[j*ldda]; } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n; ++j ) { if ( ind <= iby+j ) { dB[j*lddb] = dA[j*ldda]; } } } } } /******************************************************************************/ /* kernel wrappers to call the device functions. */ __global__ void dlacpy_sym_out_full_kernel( int m, int n, const double *dA, int ldda, double *dB, int lddb ) { dlacpy_sym_out_full_device(m, n, dA, ldda, dB, lddb); } __global__ void dlacpy_sym_out_lower_kernel( int m, int n, magma_int_t *rows, magma_int_t *perm, const double *dA, int ldda, double *dB, int lddb ) { dlacpy_sym_out_lower_device(m, n, rows, perm, dA, ldda, dB, lddb); } __global__ void dlacpy_sym_out_upper_kernel( int m, int n, const double *dA, int ldda, double *dB, int lddb ) { dlacpy_sym_out_upper_device(m, n, dA, ldda, dB, lddb); } /***************************************************************************//** Purpose ------- DLACPY_SYM_OUT copies all or part of a two-dimensional matrix dA to another matrix dB. This is the same as DLACPY, but adds queue argument. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of the matrix dA to be copied to dB. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part - = MagmaFull: All of the matrix dA @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] rows INTEGER array, on GPU, dimension (2*n) On entry, it stores the new pivots such that rows[i]-th and rows[n+i]-th rows are swapped. @param[in,out] perm INTEGER array, on GPU, dimension (m) On entry, it stores the permutation array such that i-th row will be the original perm[i]-th row after the pivots are applied. On exit, it is restored to be identity permutation. @param[in,out] dA DOUBLE PRECISION array, dimension (LDDA,N) The M-by-N matrix dA. If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed; if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed. On exit, the matrix after the symmetric pivoting is applied. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1,M). @param[in] dB DOUBLE PRECISION array, dimension (LDDB,N) The M-by-N matrix dB. On entry, dB stores the columns after row pivoting is applied. @param[in] lddb INTEGER The leading dimension of the array dB. LDDB >= max(1,M). @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_lacpy *******************************************************************************/ extern "C" void magmablas_dlacpy_sym_out( magma_uplo_t uplo, magma_int_t m, magma_int_t n, magma_int_t *rows, magma_int_t *perm, magmaDouble_const_ptr dA, magma_int_t ldda, magmaDouble_ptr dB, magma_int_t lddb, magma_queue_t queue ) { magma_int_t info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull ) info = -1; else if ( m < 0 ) info = -2; else if ( n < 0 ) info = -3; else if ( ldda < max(1,m)) info = -5; else if ( lddb < max(1,m)) info = -7; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } if ( m == 0 || n == 0 ) { return; } dim3 threads( BLK_X, 1 ); dim3 grid( magma_ceildiv(m, BLK_X), magma_ceildiv(n, BLK_Y) ); if ( uplo == MagmaLower ) { hipLaunchKernelGGL(( dlacpy_sym_out_lower_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, rows, perm, dA, ldda, dB, lddb ); } else if ( uplo == MagmaUpper ) { hipLaunchKernelGGL(( dlacpy_sym_out_upper_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dA, ldda, dB, lddb ); } else { hipLaunchKernelGGL(( dlacpy_sym_out_full_kernel) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dA, ldda, dB, lddb ); } }
c02e3176d7d5293bd952dedfed96f157de375d48.cu
/* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @author Mark Gates @author Azzam Haidar @author Ichitaro Yamazaki @generated from magmablas/zlacpy_sym_out.cu, normal z -> d, Thu Oct 8 23:05:33 2020 */ #include "magma_internal.h" #define BLK_X 64 #define BLK_Y 32 /******************************************************************************/ /* Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks. Each block has BLK_X threads. Each thread loops across one row, updating BLK_Y entries. Code similar to dlaset, dlacpy, dlag2s, clag2z, dgeadd. */ static __device__ void dlacpy_sym_out_full_device( int m, int n, const double *dA, int ldda, double *dB, int lddb ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column */ bool full = (iby + BLK_Y <= n); /* do only rows inside matrix */ if ( ind < m ) { dA += ind + iby*ldda; dB += ind + iby*lddb; if ( full ) { // full block-column #pragma unroll for( int j=0; j < BLK_Y; ++j ) { dB[j*lddb] = dA[j*ldda]; } } else { // partial block-column for( int j=0; j < BLK_Y && iby+j < n; ++j ) { dB[j*lddb] = dA[j*ldda]; } } } } /******************************************************************************/ /* Similar to dlacpy_full, but updates only the diagonal and below. Blocks that are fully above the diagonal exit immediately. Code similar to dlaset, dlacpy, zlat2c, clat2z. */ static __device__ void dlacpy_sym_out_lower_device( int m, int n, magma_int_t *rows, magma_int_t *perm, const double *dA, int ldda, double *dB, int lddb ) { int ind = blockIdx.x*BLK_X + threadIdx.x; // row int iby = blockIdx.y*BLK_Y; // col /* check if full block-column && (below diag) */ bool full = (iby + BLK_Y <= n); for (int jj=0; jj < n; jj++) { perm[rows[2*jj+1]] = rows[2*jj+1]; } /* do only rows inside matrix, and blocks not above diag */ if ( ind < m ) { if ( full ) { // full block-column, off-diagonal block #pragma unroll for( int jj=0; jj < BLK_Y; ++jj ) { int j = rows[2*(iby+jj)+1]; if (ind <= j) dB[j + ind*ldda] = MAGMA_D_CONJ( dA[ind + (iby+jj)*lddb] ); else dB[ind + j*ldda] = dA[ind + (iby+jj)*lddb]; } } else { // either partial block-column or diagonal block for( int jj=0; jj < BLK_Y && iby+jj < n; ++jj ) { int j = rows[2*(iby+jj)+1]; if (ind <= j) dB[j + ind*ldda] = MAGMA_D_CONJ( dA[ind + (iby+jj)*lddb] ); else dB[ind + j*ldda] = dA[ind + (iby+jj)*lddb]; } } } } /******************************************************************************/ /* Similar to dlacpy_full, but updates only the diagonal and above. Blocks that are fully below the diagonal exit immediately. Code similar to dlaset, dlacpy, zlat2c, clat2z. */ static __device__ void dlacpy_sym_out_upper_device( int m, int n, const double *dA, int ldda, double *dB, int lddb ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (above diag) */ bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby)); /* do only rows inside matrix, and blocks not below diag */ if ( ind < m && ind < iby + BLK_Y ) { dA += ind + iby*ldda; dB += ind + iby*lddb; if ( full ) { // full block-column, off-diagonal block #pragma unroll for( int j=0; j < BLK_Y; ++j ) { dB[j*lddb] = dA[j*ldda]; } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n; ++j ) { if ( ind <= iby+j ) { dB[j*lddb] = dA[j*ldda]; } } } } } /******************************************************************************/ /* kernel wrappers to call the device functions. */ __global__ void dlacpy_sym_out_full_kernel( int m, int n, const double *dA, int ldda, double *dB, int lddb ) { dlacpy_sym_out_full_device(m, n, dA, ldda, dB, lddb); } __global__ void dlacpy_sym_out_lower_kernel( int m, int n, magma_int_t *rows, magma_int_t *perm, const double *dA, int ldda, double *dB, int lddb ) { dlacpy_sym_out_lower_device(m, n, rows, perm, dA, ldda, dB, lddb); } __global__ void dlacpy_sym_out_upper_kernel( int m, int n, const double *dA, int ldda, double *dB, int lddb ) { dlacpy_sym_out_upper_device(m, n, dA, ldda, dB, lddb); } /***************************************************************************//** Purpose ------- DLACPY_SYM_OUT copies all or part of a two-dimensional matrix dA to another matrix dB. This is the same as DLACPY, but adds queue argument. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of the matrix dA to be copied to dB. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part - = MagmaFull: All of the matrix dA @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] rows INTEGER array, on GPU, dimension (2*n) On entry, it stores the new pivots such that rows[i]-th and rows[n+i]-th rows are swapped. @param[in,out] perm INTEGER array, on GPU, dimension (m) On entry, it stores the permutation array such that i-th row will be the original perm[i]-th row after the pivots are applied. On exit, it is restored to be identity permutation. @param[in,out] dA DOUBLE PRECISION array, dimension (LDDA,N) The M-by-N matrix dA. If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed; if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed. On exit, the matrix after the symmetric pivoting is applied. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1,M). @param[in] dB DOUBLE PRECISION array, dimension (LDDB,N) The M-by-N matrix dB. On entry, dB stores the columns after row pivoting is applied. @param[in] lddb INTEGER The leading dimension of the array dB. LDDB >= max(1,M). @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_lacpy *******************************************************************************/ extern "C" void magmablas_dlacpy_sym_out( magma_uplo_t uplo, magma_int_t m, magma_int_t n, magma_int_t *rows, magma_int_t *perm, magmaDouble_const_ptr dA, magma_int_t ldda, magmaDouble_ptr dB, magma_int_t lddb, magma_queue_t queue ) { magma_int_t info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull ) info = -1; else if ( m < 0 ) info = -2; else if ( n < 0 ) info = -3; else if ( ldda < max(1,m)) info = -5; else if ( lddb < max(1,m)) info = -7; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } if ( m == 0 || n == 0 ) { return; } dim3 threads( BLK_X, 1 ); dim3 grid( magma_ceildiv(m, BLK_X), magma_ceildiv(n, BLK_Y) ); if ( uplo == MagmaLower ) { dlacpy_sym_out_lower_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, rows, perm, dA, ldda, dB, lddb ); } else if ( uplo == MagmaUpper ) { dlacpy_sym_out_upper_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, dA, ldda, dB, lddb ); } else { dlacpy_sym_out_full_kernel <<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, dA, ldda, dB, lddb ); } }
b40e6dbd073427a474c86e3d6d751a42cf334f6f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #include <string.h> #include "scalarProd_kernel.cuh" #include <helper_functions.h> #include <helper_cuda.h> const int VECTOR_N = 256; //Number of elements per vector; arbitrary, //but strongly preferred to be a multiple of warp size //to meet memory coalescing constraints const int ELEMENT_N = 4096; //Total number of data elements const int DATA_N = VECTOR_N * ELEMENT_N; const int DATA_SZ = DATA_N * sizeof(float); const int RESULT_SZ = VECTOR_N * sizeof(float); float RandFloat(float low, float high) { float t = (float)rand() / (float)RAND_MAX; return (1.0f - t) * low + t * high; } __global__ void msfdc(){ __shared__ int wtf[4]; printf(" msfdc: %lp\n", wtf); __syncthreads(); } int main(int argc, char **argv){ float *h_A, *h_B, *h_C_CPU, *h_C_GPU; float *d_A, *d_B, *d_C; double delta, ref, sum_delta, sum_ref, L1norm; StopWatchInterface *hTimer = NULL; int i; hipLaunchKernelGGL(( msfdc), dim3(1),dim3(1), 0, 0, ); hipDeviceSynchronize(); printf("%s Starting...\n\n", argv[0]); // use command-line specified CUDA device, otherwise use device with highest Gflops/s // findCudaDevice(argc, (const char **)argv); sdkCreateTimer(&hTimer); printf("Initializing data...\n"); printf("...allocating CPU memory.\n"); h_A = (float *)malloc(DATA_SZ); h_B = (float *)malloc(DATA_SZ); h_C_CPU = (float *)malloc(RESULT_SZ); h_C_GPU = (float *)malloc(RESULT_SZ); printf("...allocating GPU memory.\n"); checkCudaErrors(hipMalloc((void **)&d_C, RESULT_SZ )); checkCudaErrors(hipMalloc((void **)&d_B, DATA_SZ )); checkCudaErrors(hipMalloc((void **)&d_A, DATA_SZ )); printf("...generating input data in CPU mem.\n"); srand(123); //Generating input data on CPU for (i = 0; i < DATA_N; i++) { h_A[i] = RandFloat(0.0f, 1.0f); h_B[i] = RandFloat(0.0f, 1.0f); } printf("...copying input data to GPU mem.\n"); //Copy options data to GPU memory for further processing checkCudaErrors(hipMemcpy(d_A, h_A, DATA_SZ, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_B, h_B, DATA_SZ, hipMemcpyHostToDevice)); printf("Data init done.\n"); hipLaunchKernelGGL(( scalarProdGPU), dim3(128),dim3(256), 0, 0, d_C, d_A, d_B, VECTOR_N , ELEMENT_N ); checkCudaErrors(hipDeviceSynchronize() ); }
b40e6dbd073427a474c86e3d6d751a42cf334f6f.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <string.h> #include "scalarProd_kernel.cuh" #include <helper_functions.h> #include <helper_cuda.h> const int VECTOR_N = 256; //Number of elements per vector; arbitrary, //but strongly preferred to be a multiple of warp size //to meet memory coalescing constraints const int ELEMENT_N = 4096; //Total number of data elements const int DATA_N = VECTOR_N * ELEMENT_N; const int DATA_SZ = DATA_N * sizeof(float); const int RESULT_SZ = VECTOR_N * sizeof(float); float RandFloat(float low, float high) { float t = (float)rand() / (float)RAND_MAX; return (1.0f - t) * low + t * high; } __global__ void msfdc(){ __shared__ int wtf[4]; printf(" msfdc: %lp\n", wtf); __syncthreads(); } int main(int argc, char **argv){ float *h_A, *h_B, *h_C_CPU, *h_C_GPU; float *d_A, *d_B, *d_C; double delta, ref, sum_delta, sum_ref, L1norm; StopWatchInterface *hTimer = NULL; int i; msfdc<<<1,1>>>(); cudaDeviceSynchronize(); printf("%s Starting...\n\n", argv[0]); // use command-line specified CUDA device, otherwise use device with highest Gflops/s // findCudaDevice(argc, (const char **)argv); sdkCreateTimer(&hTimer); printf("Initializing data...\n"); printf("...allocating CPU memory.\n"); h_A = (float *)malloc(DATA_SZ); h_B = (float *)malloc(DATA_SZ); h_C_CPU = (float *)malloc(RESULT_SZ); h_C_GPU = (float *)malloc(RESULT_SZ); printf("...allocating GPU memory.\n"); checkCudaErrors(cudaMalloc((void **)&d_C, RESULT_SZ )); checkCudaErrors(cudaMalloc((void **)&d_B, DATA_SZ )); checkCudaErrors(cudaMalloc((void **)&d_A, DATA_SZ )); printf("...generating input data in CPU mem.\n"); srand(123); //Generating input data on CPU for (i = 0; i < DATA_N; i++) { h_A[i] = RandFloat(0.0f, 1.0f); h_B[i] = RandFloat(0.0f, 1.0f); } printf("...copying input data to GPU mem.\n"); //Copy options data to GPU memory for further processing checkCudaErrors(cudaMemcpy(d_A, h_A, DATA_SZ, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_B, h_B, DATA_SZ, cudaMemcpyHostToDevice)); printf("Data init done.\n"); scalarProdGPU<<<128,256>>>( d_C, d_A, d_B, VECTOR_N , ELEMENT_N ); checkCudaErrors(cudaDeviceSynchronize() ); }
2477fcac66c37dc58fc7dbd0299d594c48654e0b.hip
// !!! This is a file automatically generated by hipify!!! // PFOR and PFOR-DELTA Compression and decompression routines #include <stdio.h> #include <fstream> #include <iomanip> #include <exception> #include <thrust/device_vector.h> #include <thrust/iterator/discard_iterator.h> #include <thrust/extrema.h> // YOU NEED TO MODIFY THE CUDPP PATH !!!!! #ifdef _WIN64 #include "C:\Users\anton\Favorites\Downloads\cudpp_src_2.0\cudpp_src_2.0\include\cudpp_hash.h" #else #include "./cudpp_src_2.0/include/cudpp_hash.h" #endif #include "sorts.hip" using namespace std; unsigned long long int* raw_decomp = NULL; unsigned int raw_decomp_length = 0; std::map<string, unsigned int> cnt_counts; string curr_file; struct bool_to_int { __host__ __device__ unsigned int operator()(const bool x) { return (unsigned int)x; } }; struct ui_to_ll { __host__ __device__ long long int operator()(const unsigned int x) { return (long long int)x; } }; template<typename T> struct nz { __host__ __device__ bool operator()(const T x) { return (x != 0); } }; struct compress_functor_int { const int_type * source; unsigned long long int * dest; const long long int * start_val; const unsigned int * vals; compress_functor_int(const int_type * _source, unsigned long long int * _dest, const long long int * _start_val, const unsigned int * _vals): source(_source), dest(_dest), start_val(_start_val), vals(_vals) {} template <typename IndexType> __host__ __device__ void operator()(const IndexType & i) { long long int val = source[i] - start_val[0];; unsigned int shifted = vals[2] - vals[0] - (i%vals[1])*vals[0]; dest[i] = val << shifted; } }; struct compress_functor_float { const long long int * source; unsigned long long int * dest; const long long int * start_val; const unsigned int * vals; compress_functor_float(const long long int * _source, unsigned long long int * _dest, const long long int * _start_val, const unsigned int * _vals): source(_source), dest(_dest), start_val(_start_val), vals(_vals) {} template <typename IndexType> __host__ __device__ void operator()(const IndexType & i) { long long int val; unsigned int bits = vals[0]; unsigned int fit_count = vals[1]; unsigned int int_sz = vals[2]; val = source[i] - start_val[0]; unsigned int z = i%fit_count; unsigned int shifted = int_sz - bits - z*bits; dest[i] = val << shifted; } }; struct decompress_functor_int { const unsigned long long int * source; int_type * dest; const long long int * start_val; const unsigned int * vals; decompress_functor_int(const unsigned long long int * _source, int_type * _dest, const long long int * _start_val, const unsigned int * _vals): source(_source), dest(_dest), start_val(_start_val), vals(_vals) {} template <typename IndexType> __host__ __device__ void operator()(const IndexType & i) { unsigned int bits = vals[0]; unsigned int fit_count = vals[1]; unsigned int int_sz = vals[2]; //find the source index unsigned int src_idx = i/fit_count; // find the exact location unsigned int src_loc = i%fit_count; //right shift the values unsigned int shifted = int_sz - bits - src_loc*bits; unsigned long long int tmp = source[src_idx] >> shifted; // set the rest of bits to 0 tmp = tmp << (int_sz - bits); tmp = tmp >> (int_sz - bits); dest[i] = tmp + start_val[0]; } }; struct decompress_functor_float { const unsigned long long int * source; long long int * dest; const long long int * start_val; const unsigned int * vals; decompress_functor_float(const unsigned long long int * _source, long long int * _dest, const long long int * _start_val, const unsigned int * _vals): source(_source), dest(_dest), start_val(_start_val), vals(_vals) {} template <typename IndexType> __host__ __device__ void operator()(const IndexType & i) { unsigned int bits = vals[0]; unsigned int fit_count = vals[1]; unsigned int int_sz = vals[2]; //find the source index unsigned int src_idx = i/fit_count; // find the exact location unsigned int src_loc = i%fit_count; //right shift the values unsigned int shifted = int_sz - bits - src_loc*bits; unsigned long long int tmp = source[src_idx] >> shifted; // set the rest of bits to 0 tmp = tmp << (int_sz - bits); tmp = tmp >> (int_sz - bits); dest[i] = tmp + start_val[0]; } }; long long int pfor_dict_decompress(void* compressed, std::vector<thrust::host_vector<char> >& h_columns, std::vector<thrust::device_vector<char> >& d_columns, unsigned int* mRecCount, unsigned int mColumnCount, unsigned int offset, void* d_v, void* s_v) { unsigned int bits, cnt, fit_count, orig_recCount, grp_count; long long int orig_lower_val; unsigned int bit_count = 64; cnt = ((unsigned int*)compressed)[0]; grp_count = ((unsigned int*)((char*)compressed + 8*cnt + 12))[0]; orig_recCount = ((unsigned int*)((char*)compressed + 8*cnt +8))[0]; bits = ((unsigned int*)((char*)compressed + 8*cnt + mColumnCount*grp_count + 28))[0]; orig_lower_val = ((long long int*)((char*)compressed + 8*cnt + mColumnCount*grp_count + 32))[0]; fit_count = ((unsigned int*)((char*)compressed + 8*cnt + mColumnCount*grp_count + 40))[0]; *mRecCount = orig_recCount; //cout << "DICT Decomp Header " << cnt << " " << grp_count << " " << orig_recCount << " " << bits << " " << orig_lower_val << " " << fit_count << " " << endl; if(raw_decomp_length < cnt*8) { if(raw_decomp != NULL) { hipFree(raw_decomp); }; hipMalloc((void **) &raw_decomp, cnt*8); raw_decomp_length = cnt*8; }; hipMemcpy( (void*)raw_decomp, (void*)((unsigned int*)compressed + 1), cnt*8, hipMemcpyHostToDevice); thrust::device_ptr<unsigned int> dd_v((unsigned int*)d_v); thrust::device_ptr<long long int> dd_sv((long long int*)s_v); dd_sv[0] = orig_lower_val; dd_v[0] = bits; dd_v[1] = fit_count; dd_v[2] = bit_count; thrust::device_ptr<unsigned long long int> dest = thrust::device_malloc<unsigned long long int>(orig_recCount); thrust::counting_iterator<unsigned int, thrust::device_space_tag> begin(0); decompress_functor_int ff1(raw_decomp,(int_type*)thrust::raw_pointer_cast(dest), (long long int*)s_v, (unsigned int*)d_v); thrust::for_each(begin, begin + orig_recCount, ff1); thrust::device_ptr<char> dict = thrust::device_malloc<char>(grp_count); for(unsigned int i = 0; i < mColumnCount; i++) { hipMemcpy( (void*)thrust::raw_pointer_cast(dict), (void*)((char*)compressed + 8*cnt + 16 + i*grp_count) , grp_count, hipMemcpyHostToDevice); thrust::gather(dest, dest+orig_recCount,dict, d_columns[i].begin() + offset); } thrust::device_free(dict); thrust::device_free(dest); return 1; } long long int pfor_decompress(void* destination, void* host, unsigned int* mRecCount, void* d_v, void* s_v) { unsigned int bits, cnt, fit_count, orig_recCount; long long int orig_lower_val; unsigned int bit_count = 64; unsigned int comp_type; long long int start_val; cnt = ((unsigned int*)host)[0]; orig_recCount = ((unsigned int*)host + cnt*2)[7]; bits = ((unsigned int*)host + cnt*2)[8]; orig_lower_val = ((long long int*)((unsigned int*)host + cnt*2 + 9))[0]; fit_count = ((unsigned int*)host + cnt*2)[11]; start_val = ((long long int*)((unsigned int*)host + cnt*2 + 12))[0]; comp_type = ((unsigned int*)host + cnt*2)[14]; *mRecCount = orig_recCount; //cout << "Decomp Header " << orig_recCount << " " << bits << " " << orig_lower_val << " " << cnt << " " << fit_count << " " << comp_type << endl; if(raw_decomp_length < cnt*8) { if(raw_decomp != NULL) { hipFree(raw_decomp); }; hipMalloc((void **) &raw_decomp, cnt*8); raw_decomp_length = cnt*8; }; hipMemcpy( (void*)raw_decomp, (void*)((unsigned int*)host + 5), cnt*8, hipMemcpyHostToDevice); thrust::device_ptr<unsigned int> dd_v((unsigned int*)d_v); thrust::device_ptr<long long int> dd_sv((long long int*)s_v); dd_sv[0] = orig_lower_val; dd_v[0] = bits; dd_v[1] = fit_count; dd_v[2] = bit_count; thrust::counting_iterator<unsigned int, thrust::device_space_tag> begin(0); decompress_functor_int ff1(raw_decomp,(int_type*)destination, (long long int*)s_v, (unsigned int*)d_v); thrust::for_each(begin, begin + orig_recCount, ff1); if(comp_type == 1) { thrust::device_ptr<int_type> d_int((int_type*)destination); d_int[0] = start_val; thrust::inclusive_scan(d_int, d_int + orig_recCount, d_int); }; return 1; } template< typename T> unsigned long long int pfor_delta_compress(void* source, unsigned int source_len, char* file_name, thrust::host_vector<T>& host, bool tp, unsigned long long int sz) { long long int orig_lower_val, orig_upper_val, start_val, real_lower, real_upper; unsigned int bits, recCount; unsigned int bit_count = 8*8; unsigned int fit_count; unsigned int comp_type = 1; // FOR-DELTA if(tp == 0) recCount = source_len/int_size; else recCount = source_len/float_size; void* ss; CUDA_SAFE_CALL(hipMalloc((void **) &ss, recCount*float_size)); if (tp == 0) { thrust::device_ptr<int_type> s((int_type*)source); thrust::device_ptr<int_type> d_ss((int_type*)ss); thrust::adjacent_difference(s, s+recCount, d_ss); start_val = d_ss[0]; if(recCount > 1) d_ss[0] = d_ss[1]; orig_lower_val = *(thrust::min_element(d_ss, d_ss + recCount)); orig_upper_val = *(thrust::max_element(d_ss, d_ss + recCount)); real_lower = s[0]; real_upper = s[recCount-1]; //cout << "orig " << orig_upper_val << " " << orig_lower_val << endl; //cout << "We need for delta " << (unsigned int)ceil(log2((double)((orig_upper_val-orig_lower_val)+1))) << " bits to encode " << orig_upper_val-orig_lower_val << " values " << endl; bits = (unsigned int)ceil(log2((double)((orig_upper_val-orig_lower_val)+1))); if (bits == 0) bits = 1; } else { thrust::device_ptr<long long int> s((long long int*)source); thrust::device_ptr<long long int> d_ss((long long int*)ss); thrust::adjacent_difference(s, s+recCount, d_ss); start_val = d_ss[0]; if(recCount > 1) d_ss[0] = d_ss[1]; orig_lower_val = *(thrust::min_element(d_ss, d_ss + recCount)); orig_upper_val = *(thrust::max_element(d_ss, d_ss + recCount)); real_lower = s[0]; real_upper = s[recCount-1]; //cout << "orig " << orig_upper_val << " " << orig_lower_val << endl; //cout << "We need for delta " << (unsigned int)ceil(log2((double)((orig_upper_val-orig_lower_val)+1))) << " bits to encode " << orig_upper_val-orig_lower_val << " values" << endl; bits = (unsigned int)ceil(log2((double)((orig_upper_val-orig_lower_val)+1))); if (bits == 0) bits = 1; }; thrust::counting_iterator<unsigned int, thrust::device_space_tag> begin(0); fit_count = bit_count/bits; void* d_v1; CUDA_SAFE_CALL(hipMalloc((void **) &d_v1, 12)); thrust::device_ptr<unsigned int> dd_v((unsigned int*)d_v1); void* s_v1; CUDA_SAFE_CALL(hipMalloc((void **) &s_v1, 8)); thrust::device_ptr<long long int> dd_sv((long long int*)s_v1); dd_sv[0] = orig_lower_val; dd_v[0] = bits; dd_v[1] = fit_count; dd_v[2] = bit_count; //void* d; //CUDA_SAFE_CALL(hipMalloc((void **) &d, recCount*float_size)); thrust::device_ptr<char> dd((char*)source); thrust::fill(dd, dd+source_len,0); //cout << "FF " << orig_lower_val << " " << bits << " " << fit_count << " " << bit_count << endl; if (tp == 0) { compress_functor_int ff((int_type*)ss,(unsigned long long int*)source, (long long int*)s_v1, (unsigned int*)d_v1); thrust::for_each(begin, begin + recCount, ff); } else { compress_functor_float ff((long long int*)ss,(unsigned long long int*)source, (long long int*)s_v1, (unsigned int*)d_v1); thrust::for_each(begin, begin + recCount, ff); }; thrust::device_ptr<unsigned long long int> s_copy1((unsigned long long int*)source); // make an addition sequence thrust::device_ptr<unsigned long long int> add_seq((unsigned long long int*)ss); thrust::constant_iterator<unsigned long long int> iter(fit_count); thrust::sequence(add_seq, add_seq + recCount, 0, 1); thrust::transform(add_seq, add_seq + recCount, iter, add_seq, thrust::divides<unsigned long long int>()); unsigned int cnt = (recCount)/fit_count; if (recCount%fit_count > 0) cnt++; thrust::device_ptr<unsigned long long int> fin_seq = thrust::device_malloc<unsigned long long int>(cnt); thrust::reduce_by_key(add_seq, add_seq+recCount,s_copy1,thrust::make_discard_iterator(), fin_seq); //for(int i = 0; i < 10;i++) // cout << "FIN " << fin_seq[i] << endl; // copy fin_seq to host unsigned long long int * raw_src = thrust::raw_pointer_cast(fin_seq); if(file_name) { hipMemcpy( host.data(), (void *)raw_src, cnt*8, hipMemcpyDeviceToHost); fstream binary_file(file_name,ios::out|ios::binary|ios::app); binary_file.write((char *)&cnt, 4); binary_file.write((char *)&real_lower, 8); binary_file.write((char *)&real_upper, 8); binary_file.write((char *)host.data(),cnt*8); binary_file.write((char *)&comp_type, 4); binary_file.write((char *)&cnt, 4); binary_file.write((char *)&recCount, 4); binary_file.write((char *)&bits, 4); binary_file.write((char *)&orig_lower_val, 8); binary_file.write((char *)&fit_count, 4); binary_file.write((char *)&start_val, 8); binary_file.write((char *)&comp_type, 4); binary_file.write((char *)&comp_type, 4); //filler binary_file.close(); if(cnt_counts[curr_file] < cnt) cnt_counts[curr_file] = cnt; } else { char* hh; //resize_compressed(host, sz, cnt*8 + 15*4, 0); host.resize(sz+cnt+8); hh = (char*)(host.data() + sz); ((unsigned int*)hh)[0] = cnt; ((long long int*)(hh+4))[0] = real_lower; ((long long int*)(hh+12))[0] = real_upper; hipMemcpy( hh + 20, (void *)raw_src, cnt*8, hipMemcpyDeviceToHost); ((unsigned int*)hh)[5+cnt*2] = comp_type; ((unsigned int*)hh)[6+cnt*2] = cnt; ((unsigned int*)hh)[7+cnt*2] = recCount; ((unsigned int*)hh)[8+cnt*2] = bits; ((long long int*)((char*)hh+36+cnt*8))[0] = orig_lower_val; ((unsigned int*)hh)[11+cnt*2] = fit_count; ((long long int*)((char*)hh+48+cnt*8))[0] = start_val; ((unsigned int*)hh)[14+cnt*2] = comp_type; }; thrust::device_free(fin_seq); hipFree(ss); hipFree(d_v1); hipFree(s_v1); return sz + cnt + 8; } unsigned long long int pfor_dict_compress(std::vector<thrust::device_vector<char> >& d_columns, unsigned int mColumnCount, char* file_name, unsigned int source_len, thrust::host_vector<char>& host, unsigned long long int sz) { unsigned int comp_type = 2; // DICT long long int start_val = 0; long long int orig_lower_val; thrust::device_ptr<unsigned int> permutation = thrust::device_malloc<unsigned int>(source_len); thrust::sequence(permutation, permutation+source_len); unsigned int* raw_ptr = thrust::raw_pointer_cast(permutation); void* temp; CUDA_SAFE_CALL(hipMalloc((void **) &temp, source_len)); for(int j=mColumnCount-1; j>=0 ; j--) update_permutation(d_columns[j], raw_ptr, source_len, "ASC", (char*)temp); for(int j=mColumnCount-1; j>=0 ; j--) apply_permutation(d_columns[j], raw_ptr, source_len, (char*)temp); hipFree(temp); // group by the vectors bool *grp; CUDA_SAFE_CALL(hipMalloc((void **) &grp, source_len * sizeof(bool))); thrust::device_ptr<bool> d_grp(grp); thrust::sequence(d_grp, d_grp+source_len, 0, 0); thrust::device_ptr<bool> d_group = thrust::device_malloc<bool>(source_len); d_group[source_len-1] = 1; for(unsigned int j=0; j < mColumnCount; j++) { //thrust::device_ptr<char> d_col(d_columns[j]); thrust::transform(d_columns[j].begin(), d_columns[j].begin() + source_len - 1, d_columns[j].begin()+1, d_group, thrust::not_equal_to<char>()); thrust::transform(d_group, d_group+source_len, d_grp, d_grp, thrust::logical_or<int>()); }; thrust::device_free(d_group); thrust::device_ptr<unsigned int> d_grp_int = thrust::device_malloc<unsigned int>(source_len); thrust::transform(d_grp, d_grp+source_len, d_grp_int, bool_to_int()); //thrust::device_free(d_grp); unsigned int grp_count = thrust::reduce(d_grp_int, d_grp_int+source_len); if(grp_count == 1) grp_count++; //if(grp_count < source_len) // cout << "Compressable to " << grp_count << endl; // cout << "grp count " << grp_count << endl; unsigned int bits = (unsigned int)log2((double)(grp_count))+1; thrust::device_ptr<int_type> permutation_final = thrust::device_malloc<int_type>(source_len); thrust::exclusive_scan(d_grp_int, d_grp_int+source_len, d_grp_int, 0); thrust::scatter(d_grp_int, d_grp_int+source_len, permutation, permutation_final); thrust::device_free(permutation); // for(int z = 0; z < 10; z++) // cout << "RES " << permutation_final[z] << endl; unsigned int fit_count = 64/bits; void* d_v1; CUDA_SAFE_CALL(hipMalloc((void **) &d_v1, 12)); thrust::device_ptr<unsigned int> dd_v((unsigned int*)d_v1); void* s_v1; CUDA_SAFE_CALL(hipMalloc((void **) &s_v1, 8)); thrust::device_ptr<long long int> dd_sv((long long int*)s_v1); dd_sv[0] = 0; dd_v[0] = bits; dd_v[1] = fit_count; dd_v[2] = 64; thrust::counting_iterator<unsigned int, thrust::device_space_tag> begin(0); void* d; CUDA_SAFE_CALL(hipMalloc((void **) &d, source_len*float_size)); thrust::device_ptr<char> dd((char*)d); thrust::fill(dd, dd+source_len,0); compress_functor_int ff(thrust::raw_pointer_cast(permutation_final),(unsigned long long int*)d, (long long int*)s_v1, (unsigned int*)d_v1); thrust::for_each(begin, begin + source_len, ff); hipFree(d_v1); hipFree(s_v1); thrust::device_ptr<unsigned long long int> s_copy1((unsigned long long int*)d); // make an addition sequence thrust::constant_iterator< long long int> iter(fit_count); thrust::sequence(permutation_final, permutation_final + source_len, 0, 1); thrust::transform(permutation_final, permutation_final + source_len, iter, permutation_final, thrust::divides<long long int>()); unsigned int cnt = (source_len)/fit_count; if (source_len%fit_count > 0) cnt++; thrust::device_ptr<unsigned long long int> fin_seq = thrust::device_malloc<unsigned long long int>(cnt); //cout << "fin seq " << cnt << " " << source_len << endl; thrust::reduce_by_key(permutation_final, permutation_final+source_len,s_copy1,thrust::make_discard_iterator(), fin_seq); orig_lower_val = 0; if (file_name) { hipMemcpy( host.data(), (void *)thrust::raw_pointer_cast(fin_seq), cnt*8, hipMemcpyDeviceToHost); //thrust::copy(fin_seq, fin_seq+cnt,host.begin()); thrust::device_free(fin_seq); fstream binary_file(file_name,ios::out|ios::binary|ios::app); binary_file.write((char *)&cnt, 4); binary_file.write((char *)host.data(),cnt*8); binary_file.write((char *)&comp_type, 4); binary_file.write((char *)&source_len, 4); // write a dictionary binary_file.write((char *)&grp_count, 4); // create dictionary thrust::device_ptr<char> dict = thrust::device_malloc<char>(grp_count); for(unsigned int j=0; j < mColumnCount; j++) { thrust::transform(d_grp, d_grp+source_len, d_grp_int, bool_to_int()); thrust::copy_if(d_columns[j].begin(),d_columns[j].begin()+source_len,d_grp_int, dict, nz<unsigned int>()); hipMemcpy( host.data(), (void *)thrust::raw_pointer_cast(dict), grp_count, hipMemcpyDeviceToHost); binary_file.write((char *)host.data(),grp_count); }; thrust::device_free(dict); binary_file.write((char *)&grp_count, 4); binary_file.write((char *)&cnt, 4); binary_file.write((char *)&source_len, 4); binary_file.write((char *)&bits, 4); binary_file.write((char *)&orig_lower_val, 8); binary_file.write((char *)&fit_count, 4); binary_file.write((char *)&start_val, 8); binary_file.write((char *)&comp_type, 4); binary_file.close(); if(cnt_counts[curr_file] < (cnt*8 + 14*4 + grp_count*mColumnCount)) cnt_counts[curr_file] = (cnt*8 + 14*4 + grp_count*mColumnCount); } else { char* hh; host.resize(sz+cnt*8 + mColumnCount*grp_count + 14*4); hh = (host.data() + sz); ((unsigned int*)hh)[0] = cnt; hipMemcpy( (unsigned int*)hh + 1, (void *)thrust::raw_pointer_cast(fin_seq), cnt*8, hipMemcpyDeviceToHost); thrust::device_free(fin_seq); ((unsigned int*)hh)[1+cnt*2] = comp_type; ((unsigned int*)hh)[2+cnt*2] = source_len; // write a dictionary ((unsigned int*)hh)[3+cnt*2] = grp_count; // create dictionary thrust::device_ptr<char> dict = thrust::device_malloc<char>(grp_count); for(unsigned int j=0; j < mColumnCount; j++) { thrust::transform(d_grp, d_grp+source_len, d_grp_int, bool_to_int()); thrust::copy_if(d_columns[j].begin(),d_columns[j].begin()+source_len,d_grp_int, dict, nz<unsigned int>()); hipMemcpy( (void*)(hh+16+cnt*8+j*grp_count), (void *)thrust::raw_pointer_cast(dict), grp_count, hipMemcpyDeviceToHost); }; thrust::device_free(dict); ((unsigned int*)(hh+16+cnt*8+mColumnCount*grp_count))[0] = grp_count; ((unsigned int*)(hh+20+cnt*8+mColumnCount*grp_count))[0] = cnt; ((unsigned int*)(hh+24+cnt*8+mColumnCount*grp_count))[0] = source_len; ((unsigned int*)(hh+28+cnt*8+mColumnCount*grp_count))[0] = bits; ((long long int*)(hh+32+cnt*8+mColumnCount*grp_count))[0] = orig_lower_val; ((unsigned int*)(hh+40+cnt*8+mColumnCount*grp_count))[0] = fit_count; ((long long int*)(hh+44+cnt*8+mColumnCount*grp_count))[0] = start_val; ((unsigned int*)(hh+52+cnt*8+mColumnCount*grp_count))[0] = comp_type; }; thrust::device_free(permutation_final); thrust::device_free(d_grp_int); hipFree(d); thrust::device_free(d_grp); return sz + cnt*8 + mColumnCount*grp_count + 14*4; } template< typename T> unsigned long long int pfor_compress(void* source, unsigned int source_len, char* file_name, thrust::host_vector<T>& host, bool tp, unsigned long long int sz) { unsigned int recCount; long long int orig_lower_val; long long int orig_upper_val; unsigned int bits; unsigned int bit_count = 8*8; unsigned int fit_count; unsigned int comp_type = 0; // FOR long long int start_val = 0; bool sorted = 0; if(tp == 0) recCount = source_len/int_size; else recCount = source_len/float_size; // check if sorted if (tp == 0) { thrust::device_ptr<int_type> s((int_type*)source); sorted = thrust::is_sorted(s, s+recCount); } else { thrust::device_ptr<long long int> s((long long int*)source); sorted = thrust::is_sorted(s, s+recCount); }; //cout << "file " << file_name << " is sorted " << sorted << endl; if(sorted) return pfor_delta_compress(source, source_len, file_name, host, tp, sz); // sort the sequence if (tp == 0) { thrust::device_ptr<int_type> s((int_type*)source); orig_lower_val = *(thrust::min_element(s, s + recCount)); orig_upper_val = *(thrust::max_element(s, s + recCount)); //cout << "We need " << (unsigned int)ceil(log2((double)((orig_upper_val - orig_lower_val) + 1))) << " bits to encode original range of " << orig_lower_val << " to " << orig_upper_val << endl; bits = (unsigned int)ceil(log2((double)((orig_upper_val - orig_lower_val) + 1))); } else { thrust::device_ptr<long long int> s((long long int*)source); orig_lower_val = *(thrust::min_element(s, s + recCount)); orig_upper_val = *(thrust::max_element(s, s + recCount)); //cout << "We need " << (unsigned int)ceil(log2((double)((orig_upper_val - orig_lower_val) + 1))) << " bits to encode original range of " << orig_lower_val << " to " << orig_upper_val << endl; bits = (unsigned int)ceil(log2((double)((orig_upper_val - orig_lower_val) + 1))); }; thrust::counting_iterator<unsigned int, thrust::device_space_tag> begin(0); fit_count = bit_count/bits; void* d_v1; CUDA_SAFE_CALL(hipMalloc((void **) &d_v1, 12)); thrust::device_ptr<unsigned int> dd_v((unsigned int*)d_v1); void* s_v1; CUDA_SAFE_CALL(hipMalloc((void **) &s_v1, 8)); thrust::device_ptr<long long int> dd_sv((long long int*)s_v1); dd_sv[0] = orig_lower_val; dd_v[0] = bits; dd_v[1] = fit_count; dd_v[2] = bit_count; void* d; CUDA_SAFE_CALL(hipMalloc((void **) &d, recCount*float_size)); thrust::device_ptr<char> dd((char*)d); thrust::fill(dd, dd+source_len,0); if (tp == 0) { compress_functor_int ff((int_type*)source,(unsigned long long int*)d, (long long int*)s_v1, (unsigned int*)d_v1); thrust::for_each(begin, begin + recCount, ff); } else { compress_functor_float ff((long long int*)source,(unsigned long long int*)d, (long long int*)s_v1, (unsigned int*)d_v1); thrust::for_each(begin, begin + recCount, ff); }; thrust::device_ptr<unsigned long long int> s_copy1((unsigned long long int*)d); // make an addition sequence thrust::device_ptr<unsigned int> add_seq = thrust::device_malloc<unsigned int>(recCount); thrust::constant_iterator<unsigned int> iter(fit_count); thrust::sequence(add_seq, add_seq + recCount, 0, 1); thrust::transform(add_seq, add_seq + recCount, iter, add_seq, thrust::divides<unsigned int>()); unsigned int cnt = (recCount)/fit_count; if(cnt == 0) cnt = 1; // need at least 1 if (recCount%fit_count > 0) cnt++; //thrust::device_ptr<unsigned long long int> fin_seq = thrust::device_malloc<unsigned long long int>(cnt); thrust::device_ptr<unsigned long long int> fin_seq((unsigned long long int*)source); thrust::reduce_by_key(add_seq, add_seq+recCount,s_copy1,thrust::make_discard_iterator(), fin_seq); // copy fin_seq to host unsigned long long int * raw_src = thrust::raw_pointer_cast(fin_seq); //cout << file_name << " CNT " << cnt << endl; if(file_name) { hipMemcpy( host.data(), (void *)raw_src, cnt*8, hipMemcpyDeviceToHost); fstream binary_file(file_name,ios::out|ios::binary|ios::app); binary_file.write((char *)&cnt, 4); binary_file.write((char *)&orig_lower_val, 8); binary_file.write((char *)&orig_upper_val, 8); binary_file.write((char *)host.data(),cnt*8); binary_file.write((char *)&comp_type, 4); binary_file.write((char *)&cnt, 4); binary_file.write((char *)&recCount, 4); binary_file.write((char *)&bits, 4); binary_file.write((char *)&orig_lower_val, 8); binary_file.write((char *)&fit_count, 4); binary_file.write((char *)&start_val, 8); binary_file.write((char *)&comp_type, 4); binary_file.write((char *)&comp_type, 4); //filler binary_file.close(); if(cnt_counts[curr_file] < cnt) cnt_counts[curr_file] = cnt; } else { char* hh; // resize host to sz + cnt*8 + 15 host.resize(sz+cnt+8); hh = (char*)(host.data() + sz); ((unsigned int*)hh)[0] = cnt; ((long long int*)(hh+4))[0] = orig_lower_val; ((long long int*)(hh+12))[0] = orig_upper_val; hipMemcpy( hh + 20, (void *)raw_src, cnt*8, hipMemcpyDeviceToHost); ((unsigned int*)hh)[5+cnt*2] = comp_type; ((unsigned int*)hh)[6+cnt*2] = cnt; ((unsigned int*)hh)[7+cnt*2] = recCount; ((unsigned int*)hh)[8+cnt*2] = bits; ((long long int*)(hh+36+cnt*8))[0] = orig_lower_val; ((unsigned int*)hh)[11+cnt*2] = fit_count; ((long long int*)(hh+48+cnt*8))[0] = start_val; ((unsigned int*)hh)[14+cnt*2] = comp_type; }; thrust::device_free(add_seq); hipFree(d); hipFree(d_v1); hipFree(s_v1); return sz + cnt + 8; }
2477fcac66c37dc58fc7dbd0299d594c48654e0b.cu
// PFOR and PFOR-DELTA Compression and decompression routines #include <stdio.h> #include <fstream> #include <iomanip> #include <exception> #include <thrust/device_vector.h> #include <thrust/iterator/discard_iterator.h> #include <thrust/extrema.h> // YOU NEED TO MODIFY THE CUDPP PATH !!!!! #ifdef _WIN64 #include "C:\Users\anton\Favorites\Downloads\cudpp_src_2.0\cudpp_src_2.0\include\cudpp_hash.h" #else #include "./cudpp_src_2.0/include/cudpp_hash.h" #endif #include "sorts.cu" using namespace std; unsigned long long int* raw_decomp = NULL; unsigned int raw_decomp_length = 0; std::map<string, unsigned int> cnt_counts; string curr_file; struct bool_to_int { __host__ __device__ unsigned int operator()(const bool x) { return (unsigned int)x; } }; struct ui_to_ll { __host__ __device__ long long int operator()(const unsigned int x) { return (long long int)x; } }; template<typename T> struct nz { __host__ __device__ bool operator()(const T x) { return (x != 0); } }; struct compress_functor_int { const int_type * source; unsigned long long int * dest; const long long int * start_val; const unsigned int * vals; compress_functor_int(const int_type * _source, unsigned long long int * _dest, const long long int * _start_val, const unsigned int * _vals): source(_source), dest(_dest), start_val(_start_val), vals(_vals) {} template <typename IndexType> __host__ __device__ void operator()(const IndexType & i) { long long int val = source[i] - start_val[0];; unsigned int shifted = vals[2] - vals[0] - (i%vals[1])*vals[0]; dest[i] = val << shifted; } }; struct compress_functor_float { const long long int * source; unsigned long long int * dest; const long long int * start_val; const unsigned int * vals; compress_functor_float(const long long int * _source, unsigned long long int * _dest, const long long int * _start_val, const unsigned int * _vals): source(_source), dest(_dest), start_val(_start_val), vals(_vals) {} template <typename IndexType> __host__ __device__ void operator()(const IndexType & i) { long long int val; unsigned int bits = vals[0]; unsigned int fit_count = vals[1]; unsigned int int_sz = vals[2]; val = source[i] - start_val[0]; unsigned int z = i%fit_count; unsigned int shifted = int_sz - bits - z*bits; dest[i] = val << shifted; } }; struct decompress_functor_int { const unsigned long long int * source; int_type * dest; const long long int * start_val; const unsigned int * vals; decompress_functor_int(const unsigned long long int * _source, int_type * _dest, const long long int * _start_val, const unsigned int * _vals): source(_source), dest(_dest), start_val(_start_val), vals(_vals) {} template <typename IndexType> __host__ __device__ void operator()(const IndexType & i) { unsigned int bits = vals[0]; unsigned int fit_count = vals[1]; unsigned int int_sz = vals[2]; //find the source index unsigned int src_idx = i/fit_count; // find the exact location unsigned int src_loc = i%fit_count; //right shift the values unsigned int shifted = int_sz - bits - src_loc*bits; unsigned long long int tmp = source[src_idx] >> shifted; // set the rest of bits to 0 tmp = tmp << (int_sz - bits); tmp = tmp >> (int_sz - bits); dest[i] = tmp + start_val[0]; } }; struct decompress_functor_float { const unsigned long long int * source; long long int * dest; const long long int * start_val; const unsigned int * vals; decompress_functor_float(const unsigned long long int * _source, long long int * _dest, const long long int * _start_val, const unsigned int * _vals): source(_source), dest(_dest), start_val(_start_val), vals(_vals) {} template <typename IndexType> __host__ __device__ void operator()(const IndexType & i) { unsigned int bits = vals[0]; unsigned int fit_count = vals[1]; unsigned int int_sz = vals[2]; //find the source index unsigned int src_idx = i/fit_count; // find the exact location unsigned int src_loc = i%fit_count; //right shift the values unsigned int shifted = int_sz - bits - src_loc*bits; unsigned long long int tmp = source[src_idx] >> shifted; // set the rest of bits to 0 tmp = tmp << (int_sz - bits); tmp = tmp >> (int_sz - bits); dest[i] = tmp + start_val[0]; } }; long long int pfor_dict_decompress(void* compressed, std::vector<thrust::host_vector<char> >& h_columns, std::vector<thrust::device_vector<char> >& d_columns, unsigned int* mRecCount, unsigned int mColumnCount, unsigned int offset, void* d_v, void* s_v) { unsigned int bits, cnt, fit_count, orig_recCount, grp_count; long long int orig_lower_val; unsigned int bit_count = 64; cnt = ((unsigned int*)compressed)[0]; grp_count = ((unsigned int*)((char*)compressed + 8*cnt + 12))[0]; orig_recCount = ((unsigned int*)((char*)compressed + 8*cnt +8))[0]; bits = ((unsigned int*)((char*)compressed + 8*cnt + mColumnCount*grp_count + 28))[0]; orig_lower_val = ((long long int*)((char*)compressed + 8*cnt + mColumnCount*grp_count + 32))[0]; fit_count = ((unsigned int*)((char*)compressed + 8*cnt + mColumnCount*grp_count + 40))[0]; *mRecCount = orig_recCount; //cout << "DICT Decomp Header " << cnt << " " << grp_count << " " << orig_recCount << " " << bits << " " << orig_lower_val << " " << fit_count << " " << endl; if(raw_decomp_length < cnt*8) { if(raw_decomp != NULL) { cudaFree(raw_decomp); }; cudaMalloc((void **) &raw_decomp, cnt*8); raw_decomp_length = cnt*8; }; cudaMemcpy( (void*)raw_decomp, (void*)((unsigned int*)compressed + 1), cnt*8, cudaMemcpyHostToDevice); thrust::device_ptr<unsigned int> dd_v((unsigned int*)d_v); thrust::device_ptr<long long int> dd_sv((long long int*)s_v); dd_sv[0] = orig_lower_val; dd_v[0] = bits; dd_v[1] = fit_count; dd_v[2] = bit_count; thrust::device_ptr<unsigned long long int> dest = thrust::device_malloc<unsigned long long int>(orig_recCount); thrust::counting_iterator<unsigned int, thrust::device_space_tag> begin(0); decompress_functor_int ff1(raw_decomp,(int_type*)thrust::raw_pointer_cast(dest), (long long int*)s_v, (unsigned int*)d_v); thrust::for_each(begin, begin + orig_recCount, ff1); thrust::device_ptr<char> dict = thrust::device_malloc<char>(grp_count); for(unsigned int i = 0; i < mColumnCount; i++) { cudaMemcpy( (void*)thrust::raw_pointer_cast(dict), (void*)((char*)compressed + 8*cnt + 16 + i*grp_count) , grp_count, cudaMemcpyHostToDevice); thrust::gather(dest, dest+orig_recCount,dict, d_columns[i].begin() + offset); } thrust::device_free(dict); thrust::device_free(dest); return 1; } long long int pfor_decompress(void* destination, void* host, unsigned int* mRecCount, void* d_v, void* s_v) { unsigned int bits, cnt, fit_count, orig_recCount; long long int orig_lower_val; unsigned int bit_count = 64; unsigned int comp_type; long long int start_val; cnt = ((unsigned int*)host)[0]; orig_recCount = ((unsigned int*)host + cnt*2)[7]; bits = ((unsigned int*)host + cnt*2)[8]; orig_lower_val = ((long long int*)((unsigned int*)host + cnt*2 + 9))[0]; fit_count = ((unsigned int*)host + cnt*2)[11]; start_val = ((long long int*)((unsigned int*)host + cnt*2 + 12))[0]; comp_type = ((unsigned int*)host + cnt*2)[14]; *mRecCount = orig_recCount; //cout << "Decomp Header " << orig_recCount << " " << bits << " " << orig_lower_val << " " << cnt << " " << fit_count << " " << comp_type << endl; if(raw_decomp_length < cnt*8) { if(raw_decomp != NULL) { cudaFree(raw_decomp); }; cudaMalloc((void **) &raw_decomp, cnt*8); raw_decomp_length = cnt*8; }; cudaMemcpy( (void*)raw_decomp, (void*)((unsigned int*)host + 5), cnt*8, cudaMemcpyHostToDevice); thrust::device_ptr<unsigned int> dd_v((unsigned int*)d_v); thrust::device_ptr<long long int> dd_sv((long long int*)s_v); dd_sv[0] = orig_lower_val; dd_v[0] = bits; dd_v[1] = fit_count; dd_v[2] = bit_count; thrust::counting_iterator<unsigned int, thrust::device_space_tag> begin(0); decompress_functor_int ff1(raw_decomp,(int_type*)destination, (long long int*)s_v, (unsigned int*)d_v); thrust::for_each(begin, begin + orig_recCount, ff1); if(comp_type == 1) { thrust::device_ptr<int_type> d_int((int_type*)destination); d_int[0] = start_val; thrust::inclusive_scan(d_int, d_int + orig_recCount, d_int); }; return 1; } template< typename T> unsigned long long int pfor_delta_compress(void* source, unsigned int source_len, char* file_name, thrust::host_vector<T>& host, bool tp, unsigned long long int sz) { long long int orig_lower_val, orig_upper_val, start_val, real_lower, real_upper; unsigned int bits, recCount; unsigned int bit_count = 8*8; unsigned int fit_count; unsigned int comp_type = 1; // FOR-DELTA if(tp == 0) recCount = source_len/int_size; else recCount = source_len/float_size; void* ss; CUDA_SAFE_CALL(cudaMalloc((void **) &ss, recCount*float_size)); if (tp == 0) { thrust::device_ptr<int_type> s((int_type*)source); thrust::device_ptr<int_type> d_ss((int_type*)ss); thrust::adjacent_difference(s, s+recCount, d_ss); start_val = d_ss[0]; if(recCount > 1) d_ss[0] = d_ss[1]; orig_lower_val = *(thrust::min_element(d_ss, d_ss + recCount)); orig_upper_val = *(thrust::max_element(d_ss, d_ss + recCount)); real_lower = s[0]; real_upper = s[recCount-1]; //cout << "orig " << orig_upper_val << " " << orig_lower_val << endl; //cout << "We need for delta " << (unsigned int)ceil(log2((double)((orig_upper_val-orig_lower_val)+1))) << " bits to encode " << orig_upper_val-orig_lower_val << " values " << endl; bits = (unsigned int)ceil(log2((double)((orig_upper_val-orig_lower_val)+1))); if (bits == 0) bits = 1; } else { thrust::device_ptr<long long int> s((long long int*)source); thrust::device_ptr<long long int> d_ss((long long int*)ss); thrust::adjacent_difference(s, s+recCount, d_ss); start_val = d_ss[0]; if(recCount > 1) d_ss[0] = d_ss[1]; orig_lower_val = *(thrust::min_element(d_ss, d_ss + recCount)); orig_upper_val = *(thrust::max_element(d_ss, d_ss + recCount)); real_lower = s[0]; real_upper = s[recCount-1]; //cout << "orig " << orig_upper_val << " " << orig_lower_val << endl; //cout << "We need for delta " << (unsigned int)ceil(log2((double)((orig_upper_val-orig_lower_val)+1))) << " bits to encode " << orig_upper_val-orig_lower_val << " values" << endl; bits = (unsigned int)ceil(log2((double)((orig_upper_val-orig_lower_val)+1))); if (bits == 0) bits = 1; }; thrust::counting_iterator<unsigned int, thrust::device_space_tag> begin(0); fit_count = bit_count/bits; void* d_v1; CUDA_SAFE_CALL(cudaMalloc((void **) &d_v1, 12)); thrust::device_ptr<unsigned int> dd_v((unsigned int*)d_v1); void* s_v1; CUDA_SAFE_CALL(cudaMalloc((void **) &s_v1, 8)); thrust::device_ptr<long long int> dd_sv((long long int*)s_v1); dd_sv[0] = orig_lower_val; dd_v[0] = bits; dd_v[1] = fit_count; dd_v[2] = bit_count; //void* d; //CUDA_SAFE_CALL(cudaMalloc((void **) &d, recCount*float_size)); thrust::device_ptr<char> dd((char*)source); thrust::fill(dd, dd+source_len,0); //cout << "FF " << orig_lower_val << " " << bits << " " << fit_count << " " << bit_count << endl; if (tp == 0) { compress_functor_int ff((int_type*)ss,(unsigned long long int*)source, (long long int*)s_v1, (unsigned int*)d_v1); thrust::for_each(begin, begin + recCount, ff); } else { compress_functor_float ff((long long int*)ss,(unsigned long long int*)source, (long long int*)s_v1, (unsigned int*)d_v1); thrust::for_each(begin, begin + recCount, ff); }; thrust::device_ptr<unsigned long long int> s_copy1((unsigned long long int*)source); // make an addition sequence thrust::device_ptr<unsigned long long int> add_seq((unsigned long long int*)ss); thrust::constant_iterator<unsigned long long int> iter(fit_count); thrust::sequence(add_seq, add_seq + recCount, 0, 1); thrust::transform(add_seq, add_seq + recCount, iter, add_seq, thrust::divides<unsigned long long int>()); unsigned int cnt = (recCount)/fit_count; if (recCount%fit_count > 0) cnt++; thrust::device_ptr<unsigned long long int> fin_seq = thrust::device_malloc<unsigned long long int>(cnt); thrust::reduce_by_key(add_seq, add_seq+recCount,s_copy1,thrust::make_discard_iterator(), fin_seq); //for(int i = 0; i < 10;i++) // cout << "FIN " << fin_seq[i] << endl; // copy fin_seq to host unsigned long long int * raw_src = thrust::raw_pointer_cast(fin_seq); if(file_name) { cudaMemcpy( host.data(), (void *)raw_src, cnt*8, cudaMemcpyDeviceToHost); fstream binary_file(file_name,ios::out|ios::binary|ios::app); binary_file.write((char *)&cnt, 4); binary_file.write((char *)&real_lower, 8); binary_file.write((char *)&real_upper, 8); binary_file.write((char *)host.data(),cnt*8); binary_file.write((char *)&comp_type, 4); binary_file.write((char *)&cnt, 4); binary_file.write((char *)&recCount, 4); binary_file.write((char *)&bits, 4); binary_file.write((char *)&orig_lower_val, 8); binary_file.write((char *)&fit_count, 4); binary_file.write((char *)&start_val, 8); binary_file.write((char *)&comp_type, 4); binary_file.write((char *)&comp_type, 4); //filler binary_file.close(); if(cnt_counts[curr_file] < cnt) cnt_counts[curr_file] = cnt; } else { char* hh; //resize_compressed(host, sz, cnt*8 + 15*4, 0); host.resize(sz+cnt+8); hh = (char*)(host.data() + sz); ((unsigned int*)hh)[0] = cnt; ((long long int*)(hh+4))[0] = real_lower; ((long long int*)(hh+12))[0] = real_upper; cudaMemcpy( hh + 20, (void *)raw_src, cnt*8, cudaMemcpyDeviceToHost); ((unsigned int*)hh)[5+cnt*2] = comp_type; ((unsigned int*)hh)[6+cnt*2] = cnt; ((unsigned int*)hh)[7+cnt*2] = recCount; ((unsigned int*)hh)[8+cnt*2] = bits; ((long long int*)((char*)hh+36+cnt*8))[0] = orig_lower_val; ((unsigned int*)hh)[11+cnt*2] = fit_count; ((long long int*)((char*)hh+48+cnt*8))[0] = start_val; ((unsigned int*)hh)[14+cnt*2] = comp_type; }; thrust::device_free(fin_seq); cudaFree(ss); cudaFree(d_v1); cudaFree(s_v1); return sz + cnt + 8; } unsigned long long int pfor_dict_compress(std::vector<thrust::device_vector<char> >& d_columns, unsigned int mColumnCount, char* file_name, unsigned int source_len, thrust::host_vector<char>& host, unsigned long long int sz) { unsigned int comp_type = 2; // DICT long long int start_val = 0; long long int orig_lower_val; thrust::device_ptr<unsigned int> permutation = thrust::device_malloc<unsigned int>(source_len); thrust::sequence(permutation, permutation+source_len); unsigned int* raw_ptr = thrust::raw_pointer_cast(permutation); void* temp; CUDA_SAFE_CALL(cudaMalloc((void **) &temp, source_len)); for(int j=mColumnCount-1; j>=0 ; j--) update_permutation(d_columns[j], raw_ptr, source_len, "ASC", (char*)temp); for(int j=mColumnCount-1; j>=0 ; j--) apply_permutation(d_columns[j], raw_ptr, source_len, (char*)temp); cudaFree(temp); // group by the vectors bool *grp; CUDA_SAFE_CALL(cudaMalloc((void **) &grp, source_len * sizeof(bool))); thrust::device_ptr<bool> d_grp(grp); thrust::sequence(d_grp, d_grp+source_len, 0, 0); thrust::device_ptr<bool> d_group = thrust::device_malloc<bool>(source_len); d_group[source_len-1] = 1; for(unsigned int j=0; j < mColumnCount; j++) { //thrust::device_ptr<char> d_col(d_columns[j]); thrust::transform(d_columns[j].begin(), d_columns[j].begin() + source_len - 1, d_columns[j].begin()+1, d_group, thrust::not_equal_to<char>()); thrust::transform(d_group, d_group+source_len, d_grp, d_grp, thrust::logical_or<int>()); }; thrust::device_free(d_group); thrust::device_ptr<unsigned int> d_grp_int = thrust::device_malloc<unsigned int>(source_len); thrust::transform(d_grp, d_grp+source_len, d_grp_int, bool_to_int()); //thrust::device_free(d_grp); unsigned int grp_count = thrust::reduce(d_grp_int, d_grp_int+source_len); if(grp_count == 1) grp_count++; //if(grp_count < source_len) // cout << "Compressable to " << grp_count << endl; // cout << "grp count " << grp_count << endl; unsigned int bits = (unsigned int)log2((double)(grp_count))+1; thrust::device_ptr<int_type> permutation_final = thrust::device_malloc<int_type>(source_len); thrust::exclusive_scan(d_grp_int, d_grp_int+source_len, d_grp_int, 0); thrust::scatter(d_grp_int, d_grp_int+source_len, permutation, permutation_final); thrust::device_free(permutation); // for(int z = 0; z < 10; z++) // cout << "RES " << permutation_final[z] << endl; unsigned int fit_count = 64/bits; void* d_v1; CUDA_SAFE_CALL(cudaMalloc((void **) &d_v1, 12)); thrust::device_ptr<unsigned int> dd_v((unsigned int*)d_v1); void* s_v1; CUDA_SAFE_CALL(cudaMalloc((void **) &s_v1, 8)); thrust::device_ptr<long long int> dd_sv((long long int*)s_v1); dd_sv[0] = 0; dd_v[0] = bits; dd_v[1] = fit_count; dd_v[2] = 64; thrust::counting_iterator<unsigned int, thrust::device_space_tag> begin(0); void* d; CUDA_SAFE_CALL(cudaMalloc((void **) &d, source_len*float_size)); thrust::device_ptr<char> dd((char*)d); thrust::fill(dd, dd+source_len,0); compress_functor_int ff(thrust::raw_pointer_cast(permutation_final),(unsigned long long int*)d, (long long int*)s_v1, (unsigned int*)d_v1); thrust::for_each(begin, begin + source_len, ff); cudaFree(d_v1); cudaFree(s_v1); thrust::device_ptr<unsigned long long int> s_copy1((unsigned long long int*)d); // make an addition sequence thrust::constant_iterator< long long int> iter(fit_count); thrust::sequence(permutation_final, permutation_final + source_len, 0, 1); thrust::transform(permutation_final, permutation_final + source_len, iter, permutation_final, thrust::divides<long long int>()); unsigned int cnt = (source_len)/fit_count; if (source_len%fit_count > 0) cnt++; thrust::device_ptr<unsigned long long int> fin_seq = thrust::device_malloc<unsigned long long int>(cnt); //cout << "fin seq " << cnt << " " << source_len << endl; thrust::reduce_by_key(permutation_final, permutation_final+source_len,s_copy1,thrust::make_discard_iterator(), fin_seq); orig_lower_val = 0; if (file_name) { cudaMemcpy( host.data(), (void *)thrust::raw_pointer_cast(fin_seq), cnt*8, cudaMemcpyDeviceToHost); //thrust::copy(fin_seq, fin_seq+cnt,host.begin()); thrust::device_free(fin_seq); fstream binary_file(file_name,ios::out|ios::binary|ios::app); binary_file.write((char *)&cnt, 4); binary_file.write((char *)host.data(),cnt*8); binary_file.write((char *)&comp_type, 4); binary_file.write((char *)&source_len, 4); // write a dictionary binary_file.write((char *)&grp_count, 4); // create dictionary thrust::device_ptr<char> dict = thrust::device_malloc<char>(grp_count); for(unsigned int j=0; j < mColumnCount; j++) { thrust::transform(d_grp, d_grp+source_len, d_grp_int, bool_to_int()); thrust::copy_if(d_columns[j].begin(),d_columns[j].begin()+source_len,d_grp_int, dict, nz<unsigned int>()); cudaMemcpy( host.data(), (void *)thrust::raw_pointer_cast(dict), grp_count, cudaMemcpyDeviceToHost); binary_file.write((char *)host.data(),grp_count); }; thrust::device_free(dict); binary_file.write((char *)&grp_count, 4); binary_file.write((char *)&cnt, 4); binary_file.write((char *)&source_len, 4); binary_file.write((char *)&bits, 4); binary_file.write((char *)&orig_lower_val, 8); binary_file.write((char *)&fit_count, 4); binary_file.write((char *)&start_val, 8); binary_file.write((char *)&comp_type, 4); binary_file.close(); if(cnt_counts[curr_file] < (cnt*8 + 14*4 + grp_count*mColumnCount)) cnt_counts[curr_file] = (cnt*8 + 14*4 + grp_count*mColumnCount); } else { char* hh; host.resize(sz+cnt*8 + mColumnCount*grp_count + 14*4); hh = (host.data() + sz); ((unsigned int*)hh)[0] = cnt; cudaMemcpy( (unsigned int*)hh + 1, (void *)thrust::raw_pointer_cast(fin_seq), cnt*8, cudaMemcpyDeviceToHost); thrust::device_free(fin_seq); ((unsigned int*)hh)[1+cnt*2] = comp_type; ((unsigned int*)hh)[2+cnt*2] = source_len; // write a dictionary ((unsigned int*)hh)[3+cnt*2] = grp_count; // create dictionary thrust::device_ptr<char> dict = thrust::device_malloc<char>(grp_count); for(unsigned int j=0; j < mColumnCount; j++) { thrust::transform(d_grp, d_grp+source_len, d_grp_int, bool_to_int()); thrust::copy_if(d_columns[j].begin(),d_columns[j].begin()+source_len,d_grp_int, dict, nz<unsigned int>()); cudaMemcpy( (void*)(hh+16+cnt*8+j*grp_count), (void *)thrust::raw_pointer_cast(dict), grp_count, cudaMemcpyDeviceToHost); }; thrust::device_free(dict); ((unsigned int*)(hh+16+cnt*8+mColumnCount*grp_count))[0] = grp_count; ((unsigned int*)(hh+20+cnt*8+mColumnCount*grp_count))[0] = cnt; ((unsigned int*)(hh+24+cnt*8+mColumnCount*grp_count))[0] = source_len; ((unsigned int*)(hh+28+cnt*8+mColumnCount*grp_count))[0] = bits; ((long long int*)(hh+32+cnt*8+mColumnCount*grp_count))[0] = orig_lower_val; ((unsigned int*)(hh+40+cnt*8+mColumnCount*grp_count))[0] = fit_count; ((long long int*)(hh+44+cnt*8+mColumnCount*grp_count))[0] = start_val; ((unsigned int*)(hh+52+cnt*8+mColumnCount*grp_count))[0] = comp_type; }; thrust::device_free(permutation_final); thrust::device_free(d_grp_int); cudaFree(d); thrust::device_free(d_grp); return sz + cnt*8 + mColumnCount*grp_count + 14*4; } template< typename T> unsigned long long int pfor_compress(void* source, unsigned int source_len, char* file_name, thrust::host_vector<T>& host, bool tp, unsigned long long int sz) { unsigned int recCount; long long int orig_lower_val; long long int orig_upper_val; unsigned int bits; unsigned int bit_count = 8*8; unsigned int fit_count; unsigned int comp_type = 0; // FOR long long int start_val = 0; bool sorted = 0; if(tp == 0) recCount = source_len/int_size; else recCount = source_len/float_size; // check if sorted if (tp == 0) { thrust::device_ptr<int_type> s((int_type*)source); sorted = thrust::is_sorted(s, s+recCount); } else { thrust::device_ptr<long long int> s((long long int*)source); sorted = thrust::is_sorted(s, s+recCount); }; //cout << "file " << file_name << " is sorted " << sorted << endl; if(sorted) return pfor_delta_compress(source, source_len, file_name, host, tp, sz); // sort the sequence if (tp == 0) { thrust::device_ptr<int_type> s((int_type*)source); orig_lower_val = *(thrust::min_element(s, s + recCount)); orig_upper_val = *(thrust::max_element(s, s + recCount)); //cout << "We need " << (unsigned int)ceil(log2((double)((orig_upper_val - orig_lower_val) + 1))) << " bits to encode original range of " << orig_lower_val << " to " << orig_upper_val << endl; bits = (unsigned int)ceil(log2((double)((orig_upper_val - orig_lower_val) + 1))); } else { thrust::device_ptr<long long int> s((long long int*)source); orig_lower_val = *(thrust::min_element(s, s + recCount)); orig_upper_val = *(thrust::max_element(s, s + recCount)); //cout << "We need " << (unsigned int)ceil(log2((double)((orig_upper_val - orig_lower_val) + 1))) << " bits to encode original range of " << orig_lower_val << " to " << orig_upper_val << endl; bits = (unsigned int)ceil(log2((double)((orig_upper_val - orig_lower_val) + 1))); }; thrust::counting_iterator<unsigned int, thrust::device_space_tag> begin(0); fit_count = bit_count/bits; void* d_v1; CUDA_SAFE_CALL(cudaMalloc((void **) &d_v1, 12)); thrust::device_ptr<unsigned int> dd_v((unsigned int*)d_v1); void* s_v1; CUDA_SAFE_CALL(cudaMalloc((void **) &s_v1, 8)); thrust::device_ptr<long long int> dd_sv((long long int*)s_v1); dd_sv[0] = orig_lower_val; dd_v[0] = bits; dd_v[1] = fit_count; dd_v[2] = bit_count; void* d; CUDA_SAFE_CALL(cudaMalloc((void **) &d, recCount*float_size)); thrust::device_ptr<char> dd((char*)d); thrust::fill(dd, dd+source_len,0); if (tp == 0) { compress_functor_int ff((int_type*)source,(unsigned long long int*)d, (long long int*)s_v1, (unsigned int*)d_v1); thrust::for_each(begin, begin + recCount, ff); } else { compress_functor_float ff((long long int*)source,(unsigned long long int*)d, (long long int*)s_v1, (unsigned int*)d_v1); thrust::for_each(begin, begin + recCount, ff); }; thrust::device_ptr<unsigned long long int> s_copy1((unsigned long long int*)d); // make an addition sequence thrust::device_ptr<unsigned int> add_seq = thrust::device_malloc<unsigned int>(recCount); thrust::constant_iterator<unsigned int> iter(fit_count); thrust::sequence(add_seq, add_seq + recCount, 0, 1); thrust::transform(add_seq, add_seq + recCount, iter, add_seq, thrust::divides<unsigned int>()); unsigned int cnt = (recCount)/fit_count; if(cnt == 0) cnt = 1; // need at least 1 if (recCount%fit_count > 0) cnt++; //thrust::device_ptr<unsigned long long int> fin_seq = thrust::device_malloc<unsigned long long int>(cnt); thrust::device_ptr<unsigned long long int> fin_seq((unsigned long long int*)source); thrust::reduce_by_key(add_seq, add_seq+recCount,s_copy1,thrust::make_discard_iterator(), fin_seq); // copy fin_seq to host unsigned long long int * raw_src = thrust::raw_pointer_cast(fin_seq); //cout << file_name << " CNT " << cnt << endl; if(file_name) { cudaMemcpy( host.data(), (void *)raw_src, cnt*8, cudaMemcpyDeviceToHost); fstream binary_file(file_name,ios::out|ios::binary|ios::app); binary_file.write((char *)&cnt, 4); binary_file.write((char *)&orig_lower_val, 8); binary_file.write((char *)&orig_upper_val, 8); binary_file.write((char *)host.data(),cnt*8); binary_file.write((char *)&comp_type, 4); binary_file.write((char *)&cnt, 4); binary_file.write((char *)&recCount, 4); binary_file.write((char *)&bits, 4); binary_file.write((char *)&orig_lower_val, 8); binary_file.write((char *)&fit_count, 4); binary_file.write((char *)&start_val, 8); binary_file.write((char *)&comp_type, 4); binary_file.write((char *)&comp_type, 4); //filler binary_file.close(); if(cnt_counts[curr_file] < cnt) cnt_counts[curr_file] = cnt; } else { char* hh; // resize host to sz + cnt*8 + 15 host.resize(sz+cnt+8); hh = (char*)(host.data() + sz); ((unsigned int*)hh)[0] = cnt; ((long long int*)(hh+4))[0] = orig_lower_val; ((long long int*)(hh+12))[0] = orig_upper_val; cudaMemcpy( hh + 20, (void *)raw_src, cnt*8, cudaMemcpyDeviceToHost); ((unsigned int*)hh)[5+cnt*2] = comp_type; ((unsigned int*)hh)[6+cnt*2] = cnt; ((unsigned int*)hh)[7+cnt*2] = recCount; ((unsigned int*)hh)[8+cnt*2] = bits; ((long long int*)(hh+36+cnt*8))[0] = orig_lower_val; ((unsigned int*)hh)[11+cnt*2] = fit_count; ((long long int*)(hh+48+cnt*8))[0] = start_val; ((unsigned int*)hh)[14+cnt*2] = comp_type; }; thrust::device_free(add_seq); cudaFree(d); cudaFree(d_v1); cudaFree(s_v1); return sz + cnt + 8; }
2e9401f08b996f493e65e6d7b49b56c281813403.hip
// !!! This is a file automatically generated by hipify!!! // includes, system #include <algorithm> // includes CUDA #include <hip/hip_runtime.h> #include "floyd.h" #define MAX_MAT_SIZE 4096 #define MAX_BLOCK_SIZE 1024 using namespace std; __device__ inline void update_distance(const int size_mat, const int i, const int j, const int k, int *mat_global, int row_shared[], int col_shared[]) { int i0 = i * size_mat + j; // int i1 = i * size_mat + k; // int i2 = k * size_mat + j; if (col_shared[i] != -1 && row_shared[j] != -1) { int sum = (col_shared[i] + row_shared[j]); if (mat_global[i0] == -1 || sum < mat_global[i0]) mat_global[i0] = sum; } } __global__ void update_mat_on_k(const int size_mat, const int k, int *mat_global) { __shared__ int row_shared[MAX_MAT_SIZE]; __shared__ int col_shared[MAX_MAT_SIZE]; const int index = blockIdx.x * blockDim.x + threadIdx.x; const int i = index % size_mat; const int j = index / size_mat; if (j == 0) { row_shared[threadIdx.x] = mat_global[k * size_mat + i]; col_shared[threadIdx.x] = mat_global[i * size_mat + k]; } __syncthreads(); update_distance(size_mat, i, j, k, mat_global, row_shared, col_shared); __syncthreads(); } void PL_APSP(int *mat, const size_t size_mat) { int *mat_global; int num_node = size_mat * size_mat; int block_size = min(size_mat, (size_t) MAX_BLOCK_SIZE); int num_block = num_node / block_size; hipMalloc(&mat_global, sizeof(int) * num_node); hipMemcpy(mat_global, mat, sizeof(int) * num_node, hipMemcpyHostToDevice); dim3 dimGrid(num_block, 1, 1); dim3 dimBlock(block_size, 1, 1); for (int k = 0; k < size_mat; k++) { hipLaunchKernelGGL(( update_mat_on_k), dim3(dimGrid), dim3(dimBlock), 0, 0, size_mat, k, mat_global); } hipMemcpy(mat, mat_global, sizeof(int) * num_node, hipMemcpyDeviceToHost); }
2e9401f08b996f493e65e6d7b49b56c281813403.cu
// includes, system #include <algorithm> // includes CUDA #include <cuda_runtime.h> #include "floyd.h" #define MAX_MAT_SIZE 4096 #define MAX_BLOCK_SIZE 1024 using namespace std; __device__ inline void update_distance(const int size_mat, const int i, const int j, const int k, int *mat_global, int row_shared[], int col_shared[]) { int i0 = i * size_mat + j; // int i1 = i * size_mat + k; // int i2 = k * size_mat + j; if (col_shared[i] != -1 && row_shared[j] != -1) { int sum = (col_shared[i] + row_shared[j]); if (mat_global[i0] == -1 || sum < mat_global[i0]) mat_global[i0] = sum; } } __global__ void update_mat_on_k(const int size_mat, const int k, int *mat_global) { __shared__ int row_shared[MAX_MAT_SIZE]; __shared__ int col_shared[MAX_MAT_SIZE]; const int index = blockIdx.x * blockDim.x + threadIdx.x; const int i = index % size_mat; const int j = index / size_mat; if (j == 0) { row_shared[threadIdx.x] = mat_global[k * size_mat + i]; col_shared[threadIdx.x] = mat_global[i * size_mat + k]; } __syncthreads(); update_distance(size_mat, i, j, k, mat_global, row_shared, col_shared); __syncthreads(); } void PL_APSP(int *mat, const size_t size_mat) { int *mat_global; int num_node = size_mat * size_mat; int block_size = min(size_mat, (size_t) MAX_BLOCK_SIZE); int num_block = num_node / block_size; cudaMalloc(&mat_global, sizeof(int) * num_node); cudaMemcpy(mat_global, mat, sizeof(int) * num_node, cudaMemcpyHostToDevice); dim3 dimGrid(num_block, 1, 1); dim3 dimBlock(block_size, 1, 1); for (int k = 0; k < size_mat; k++) { update_mat_on_k<<<dimGrid, dimBlock>>>(size_mat, k, mat_global); } cudaMemcpy(mat, mat_global, sizeof(int) * num_node, cudaMemcpyDeviceToHost); }
9a9320b30f3fa61ce4ff5b107a30a3cf39b04571.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // // Copyright (c) 2010, Paul Furgale, Chi Hay Tong // // The original code was written by Paul Furgale and Chi Hay Tong // and later optimized and prepared for integration into OpenCV by Itseez. // //M*/ #if !defined CUDA_DISABLER #include "opencv2/gpu/device/common.hpp" #include "opencv2/gpu/device/limits.hpp" #include "opencv2/gpu/device/saturate_cast.hpp" #include "opencv2/gpu/device/reduce.hpp" #include "opencv2/gpu/device/utility.hpp" #include "opencv2/gpu/device/functional.hpp" #include "opencv2/gpu/device/filters.hpp" namespace cv { namespace gpu { namespace device { namespace surf { void loadGlobalConstants(int maxCandidates, int maxFeatures, int img_rows, int img_cols, int nOctaveLayers, float hessianThreshold); void loadOctaveConstants(int octave, int layer_rows, int layer_cols); void bindImgTex(PtrStepSzb img); size_t bindSumTex(PtrStepSz<unsigned int> sum); size_t bindMaskSumTex(PtrStepSz<unsigned int> maskSum); void icvCalcLayerDetAndTrace_gpu(const PtrStepf& det, const PtrStepf& trace, int img_rows, int img_cols, int octave, int nOctaveLayer); void icvFindMaximaInLayer_gpu(const PtrStepf& det, const PtrStepf& trace, int4* maxPosBuffer, unsigned int* maxCounter, int img_rows, int img_cols, int octave, bool use_mask, int nLayers); void icvInterpolateKeypoint_gpu(const PtrStepf& det, const int4* maxPosBuffer, unsigned int maxCounter, float* featureX, float* featureY, int* featureLaplacian, int* featureOctave, float* featureSize, float* featureHessian, unsigned int* featureCounter); void icvCalcOrientation_gpu(const float* featureX, const float* featureY, const float* featureSize, float* featureDir, int nFeatures); void compute_descriptors_gpu(PtrStepSz<float4> descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir, int nFeatures); } }}} namespace cv { namespace gpu { namespace device { namespace surf { //////////////////////////////////////////////////////////////////////// // Global parameters // The maximum number of features (before subpixel interpolation) that memory is reserved for. __constant__ int c_max_candidates; // The maximum number of features that memory is reserved for. __constant__ int c_max_features; // The image size. __constant__ int c_img_rows; __constant__ int c_img_cols; // The number of layers. __constant__ int c_nOctaveLayers; // The hessian threshold. __constant__ float c_hessianThreshold; // The current octave. __constant__ int c_octave; // The current layer size. __constant__ int c_layer_rows; __constant__ int c_layer_cols; void loadGlobalConstants(int maxCandidates, int maxFeatures, int img_rows, int img_cols, int nOctaveLayers, float hessianThreshold) { cudaSafeCall( hipMemcpyToSymbol(c_max_candidates, &maxCandidates, sizeof(maxCandidates)) ); cudaSafeCall( hipMemcpyToSymbol(c_max_features, &maxFeatures, sizeof(maxFeatures)) ); cudaSafeCall( hipMemcpyToSymbol(c_img_rows, &img_rows, sizeof(img_rows)) ); cudaSafeCall( hipMemcpyToSymbol(c_img_cols, &img_cols, sizeof(img_cols)) ); cudaSafeCall( hipMemcpyToSymbol(c_nOctaveLayers, &nOctaveLayers, sizeof(nOctaveLayers)) ); cudaSafeCall( hipMemcpyToSymbol(c_hessianThreshold, &hessianThreshold, sizeof(hessianThreshold)) ); } void loadOctaveConstants(int octave, int layer_rows, int layer_cols) { cudaSafeCall( hipMemcpyToSymbol(c_octave, &octave, sizeof(octave)) ); cudaSafeCall( hipMemcpyToSymbol(c_layer_rows, &layer_rows, sizeof(layer_rows)) ); cudaSafeCall( hipMemcpyToSymbol(c_layer_cols, &layer_cols, sizeof(layer_cols)) ); } //////////////////////////////////////////////////////////////////////// // Integral image texture texture<unsigned char, 2, hipReadModeElementType> imgTex(0, hipFilterModePoint, hipAddressModeClamp); texture<unsigned int, 2, hipReadModeElementType> sumTex(0, hipFilterModePoint, hipAddressModeClamp); texture<unsigned int, 2, hipReadModeElementType> maskSumTex(0, hipFilterModePoint, hipAddressModeClamp); void bindImgTex(PtrStepSzb img) { bindTexture(&imgTex, img); } size_t bindSumTex(PtrStepSz<uint> sum) { size_t offset; hipChannelFormatDesc desc_sum = hipCreateChannelDesc<uint>(); cudaSafeCall( hipBindTexture2D(&offset, sumTex, sum.data, desc_sum, sum.cols, sum.rows, sum.step)); return offset / sizeof(uint); } size_t bindMaskSumTex(PtrStepSz<uint> maskSum) { size_t offset; hipChannelFormatDesc desc_sum = hipCreateChannelDesc<uint>(); cudaSafeCall( hipBindTexture2D(&offset, maskSumTex, maskSum.data, desc_sum, maskSum.cols, maskSum.rows, maskSum.step)); return offset / sizeof(uint); } template <int N> __device__ float icvCalcHaarPatternSum(const float src[][5], int oldSize, int newSize, int y, int x) { #if __CUDA_ARCH__ && __CUDA_ARCH__ >= 200 typedef double real_t; #else typedef float real_t; #endif float ratio = (float)newSize / oldSize; real_t d = 0; #pragma unroll for (int k = 0; k < N; ++k) { int dx1 = __float2int_rn(ratio * src[k][0]); int dy1 = __float2int_rn(ratio * src[k][1]); int dx2 = __float2int_rn(ratio * src[k][2]); int dy2 = __float2int_rn(ratio * src[k][3]); real_t t = 0; t += tex2D(sumTex, x + dx1, y + dy1); t -= tex2D(sumTex, x + dx1, y + dy2); t -= tex2D(sumTex, x + dx2, y + dy1); t += tex2D(sumTex, x + dx2, y + dy2); d += t * src[k][4] / ((dx2 - dx1) * (dy2 - dy1)); } return (float)d; } //////////////////////////////////////////////////////////////////////// // Hessian __constant__ float c_DX [3][5] = { {0, 2, 3, 7, 1}, {3, 2, 6, 7, -2}, {6, 2, 9, 7, 1} }; __constant__ float c_DY [3][5] = { {2, 0, 7, 3, 1}, {2, 3, 7, 6, -2}, {2, 6, 7, 9, 1} }; __constant__ float c_DXY[4][5] = { {1, 1, 4, 4, 1}, {5, 1, 8, 4, -1}, {1, 5, 4, 8, -1}, {5, 5, 8, 8, 1} }; __host__ __device__ __forceinline__ int calcSize(int octave, int layer) { /* Wavelet size at first layer of first octave. */ const int HAAR_SIZE0 = 9; /* Wavelet size increment between layers. This should be an even number, such that the wavelet sizes in an octave are either all even or all odd. This ensures that when looking for the neighbours of a sample, the layers above and below are aligned correctly. */ const int HAAR_SIZE_INC = 6; return (HAAR_SIZE0 + HAAR_SIZE_INC * layer) << octave; } __global__ void icvCalcLayerDetAndTrace(PtrStepf det, PtrStepf trace) { // Determine the indices const int gridDim_y = gridDim.y / (c_nOctaveLayers + 2); const int blockIdx_y = blockIdx.y % gridDim_y; const int blockIdx_z = blockIdx.y / gridDim_y; const int j = threadIdx.x + blockIdx.x * blockDim.x; const int i = threadIdx.y + blockIdx_y * blockDim.y; const int layer = blockIdx_z; const int size = calcSize(c_octave, layer); const int samples_i = 1 + ((c_img_rows - size) >> c_octave); const int samples_j = 1 + ((c_img_cols - size) >> c_octave); // Ignore pixels where some of the kernel is outside the image const int margin = (size >> 1) >> c_octave; if (size <= c_img_rows && size <= c_img_cols && i < samples_i && j < samples_j) { const float dx = icvCalcHaarPatternSum<3>(c_DX , 9, size, (i << c_octave), (j << c_octave)); const float dy = icvCalcHaarPatternSum<3>(c_DY , 9, size, (i << c_octave), (j << c_octave)); const float dxy = icvCalcHaarPatternSum<4>(c_DXY, 9, size, (i << c_octave), (j << c_octave)); det.ptr(layer * c_layer_rows + i + margin)[j + margin] = dx * dy - 0.81f * dxy * dxy; trace.ptr(layer * c_layer_rows + i + margin)[j + margin] = dx + dy; } } void icvCalcLayerDetAndTrace_gpu(const PtrStepf& det, const PtrStepf& trace, int img_rows, int img_cols, int octave, int nOctaveLayers) { const int min_size = calcSize(octave, 0); const int max_samples_i = 1 + ((img_rows - min_size) >> octave); const int max_samples_j = 1 + ((img_cols - min_size) >> octave); dim3 threads(16, 16); dim3 grid; grid.x = divUp(max_samples_j, threads.x); grid.y = divUp(max_samples_i, threads.y) * (nOctaveLayers + 2); hipLaunchKernelGGL(( icvCalcLayerDetAndTrace), dim3(grid), dim3(threads), 0, 0, det, trace); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); } //////////////////////////////////////////////////////////////////////// // NONMAX __constant__ float c_DM[5] = {0, 0, 9, 9, 1}; struct WithMask { static __device__ bool check(int sum_i, int sum_j, int size) { float ratio = (float)size / 9.0f; float d = 0; int dx1 = __float2int_rn(ratio * c_DM[0]); int dy1 = __float2int_rn(ratio * c_DM[1]); int dx2 = __float2int_rn(ratio * c_DM[2]); int dy2 = __float2int_rn(ratio * c_DM[3]); float t = 0; t += tex2D(maskSumTex, sum_j + dx1, sum_i + dy1); t -= tex2D(maskSumTex, sum_j + dx1, sum_i + dy2); t -= tex2D(maskSumTex, sum_j + dx2, sum_i + dy1); t += tex2D(maskSumTex, sum_j + dx2, sum_i + dy2); d += t * c_DM[4] / ((dx2 - dx1) * (dy2 - dy1)); return (d >= 0.5f); } }; template <typename Mask> __global__ void icvFindMaximaInLayer(const PtrStepf det, const PtrStepf trace, int4* maxPosBuffer, unsigned int* maxCounter) { #if __CUDA_ARCH__ && __CUDA_ARCH__ >= 110 extern __shared__ float N9[]; // The hidx variables are the indices to the hessian buffer. const int gridDim_y = gridDim.y / c_nOctaveLayers; const int blockIdx_y = blockIdx.y % gridDim_y; const int blockIdx_z = blockIdx.y / gridDim_y; const int layer = blockIdx_z + 1; const int size = calcSize(c_octave, layer); // Ignore pixels without a 3x3x3 neighbourhood in the layer above const int margin = ((calcSize(c_octave, layer + 1) >> 1) >> c_octave) + 1; const int j = threadIdx.x + blockIdx.x * (blockDim.x - 2) + margin - 1; const int i = threadIdx.y + blockIdx_y * (blockDim.y - 2) + margin - 1; // Is this thread within the hessian buffer? const int zoff = blockDim.x * blockDim.y; const int localLin = threadIdx.x + threadIdx.y * blockDim.x + zoff; N9[localLin - zoff] = det.ptr(c_layer_rows * (layer - 1) + ::min(::max(i, 0), c_img_rows - 1))[::min(::max(j, 0), c_img_cols - 1)]; N9[localLin ] = det.ptr(c_layer_rows * (layer ) + ::min(::max(i, 0), c_img_rows - 1))[::min(::max(j, 0), c_img_cols - 1)]; N9[localLin + zoff] = det.ptr(c_layer_rows * (layer + 1) + ::min(::max(i, 0), c_img_rows - 1))[::min(::max(j, 0), c_img_cols - 1)]; __syncthreads(); if (i < c_layer_rows - margin && j < c_layer_cols - margin && threadIdx.x > 0 && threadIdx.x < blockDim.x - 1 && threadIdx.y > 0 && threadIdx.y < blockDim.y - 1) { float val0 = N9[localLin]; if (val0 > c_hessianThreshold) { // Coordinates for the start of the wavelet in the sum image. There // is some integer division involved, so don't try to simplify this // (cancel out sampleStep) without checking the result is the same const int sum_i = (i - ((size >> 1) >> c_octave)) << c_octave; const int sum_j = (j - ((size >> 1) >> c_octave)) << c_octave; if (Mask::check(sum_i, sum_j, size)) { // Check to see if we have a max (in its 26 neighbours) const bool condmax = val0 > N9[localLin - 1 - blockDim.x - zoff] && val0 > N9[localLin - blockDim.x - zoff] && val0 > N9[localLin + 1 - blockDim.x - zoff] && val0 > N9[localLin - 1 - zoff] && val0 > N9[localLin - zoff] && val0 > N9[localLin + 1 - zoff] && val0 > N9[localLin - 1 + blockDim.x - zoff] && val0 > N9[localLin + blockDim.x - zoff] && val0 > N9[localLin + 1 + blockDim.x - zoff] && val0 > N9[localLin - 1 - blockDim.x] && val0 > N9[localLin - blockDim.x] && val0 > N9[localLin + 1 - blockDim.x] && val0 > N9[localLin - 1 ] && val0 > N9[localLin + 1 ] && val0 > N9[localLin - 1 + blockDim.x] && val0 > N9[localLin + blockDim.x] && val0 > N9[localLin + 1 + blockDim.x] && val0 > N9[localLin - 1 - blockDim.x + zoff] && val0 > N9[localLin - blockDim.x + zoff] && val0 > N9[localLin + 1 - blockDim.x + zoff] && val0 > N9[localLin - 1 + zoff] && val0 > N9[localLin + zoff] && val0 > N9[localLin + 1 + zoff] && val0 > N9[localLin - 1 + blockDim.x + zoff] && val0 > N9[localLin + blockDim.x + zoff] && val0 > N9[localLin + 1 + blockDim.x + zoff] ; if(condmax) { unsigned int ind = atomicInc(maxCounter,(unsigned int) -1); if (ind < c_max_candidates) { const int laplacian = (int) copysignf(1.0f, trace.ptr(layer * c_layer_rows + i)[j]); maxPosBuffer[ind] = make_int4(j, i, layer, laplacian); } } } } } #endif } void icvFindMaximaInLayer_gpu(const PtrStepf& det, const PtrStepf& trace, int4* maxPosBuffer, unsigned int* maxCounter, int img_rows, int img_cols, int octave, bool use_mask, int nOctaveLayers) { const int layer_rows = img_rows >> octave; const int layer_cols = img_cols >> octave; const int min_margin = ((calcSize(octave, 2) >> 1) >> octave) + 1; dim3 threads(16, 16); dim3 grid; grid.x = divUp(layer_cols - 2 * min_margin, threads.x - 2); grid.y = divUp(layer_rows - 2 * min_margin, threads.y - 2) * nOctaveLayers; const size_t smem_size = threads.x * threads.y * 3 * sizeof(float); if (use_mask) hipLaunchKernelGGL(( icvFindMaximaInLayer<WithMask>), dim3(grid), dim3(threads), smem_size, 0, det, trace, maxPosBuffer, maxCounter); else hipLaunchKernelGGL(( icvFindMaximaInLayer<WithOutMask>), dim3(grid), dim3(threads), smem_size, 0, det, trace, maxPosBuffer, maxCounter); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); } //////////////////////////////////////////////////////////////////////// // INTERPOLATION __global__ void icvInterpolateKeypoint(const PtrStepf det, const int4* maxPosBuffer, float* featureX, float* featureY, int* featureLaplacian, int* featureOctave, float* featureSize, float* featureHessian, unsigned int* featureCounter) { #if __CUDA_ARCH__ && __CUDA_ARCH__ >= 110 const int4 maxPos = maxPosBuffer[blockIdx.x]; const int j = maxPos.x - 1 + threadIdx.x; const int i = maxPos.y - 1 + threadIdx.y; const int layer = maxPos.z - 1 + threadIdx.z; __shared__ float N9[3][3][3]; N9[threadIdx.z][threadIdx.y][threadIdx.x] = det.ptr(c_layer_rows * layer + i)[j]; __syncthreads(); if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0) { __shared__ float dD[3]; //dx dD[0] = -0.5f * (N9[1][1][2] - N9[1][1][0]); //dy dD[1] = -0.5f * (N9[1][2][1] - N9[1][0][1]); //ds dD[2] = -0.5f * (N9[2][1][1] - N9[0][1][1]); __shared__ float H[3][3]; //dxx H[0][0] = N9[1][1][0] - 2.0f * N9[1][1][1] + N9[1][1][2]; //dxy H[0][1]= 0.25f * (N9[1][2][2] - N9[1][2][0] - N9[1][0][2] + N9[1][0][0]); //dxs H[0][2]= 0.25f * (N9[2][1][2] - N9[2][1][0] - N9[0][1][2] + N9[0][1][0]); //dyx = dxy H[1][0] = H[0][1]; //dyy H[1][1] = N9[1][0][1] - 2.0f * N9[1][1][1] + N9[1][2][1]; //dys H[1][2]= 0.25f * (N9[2][2][1] - N9[2][0][1] - N9[0][2][1] + N9[0][0][1]); //dsx = dxs H[2][0] = H[0][2]; //dsy = dys H[2][1] = H[1][2]; //dss H[2][2] = N9[0][1][1] - 2.0f * N9[1][1][1] + N9[2][1][1]; __shared__ float x[3]; if (solve3x3(H, dD, x)) { if (::fabs(x[0]) <= 1.f && ::fabs(x[1]) <= 1.f && ::fabs(x[2]) <= 1.f) { // if the step is within the interpolation region, perform it const int size = calcSize(c_octave, maxPos.z); const int sum_i = (maxPos.y - ((size >> 1) >> c_octave)) << c_octave; const int sum_j = (maxPos.x - ((size >> 1) >> c_octave)) << c_octave; const float center_i = sum_i + (float)(size - 1) / 2; const float center_j = sum_j + (float)(size - 1) / 2; const float px = center_j + x[0] * (1 << c_octave); const float py = center_i + x[1] * (1 << c_octave); const int ds = size - calcSize(c_octave, maxPos.z - 1); const float psize = roundf(size + x[2] * ds); /* The sampling intervals and wavelet sized for selecting an orientation and building the keypoint descriptor are defined relative to 's' */ const float s = psize * 1.2f / 9.0f; /* To find the dominant orientation, the gradients in x and y are sampled in a circle of radius 6s using wavelets of size 4s. We ensure the gradient wavelet size is even to ensure the wavelet pattern is balanced and symmetric around its center */ const int grad_wav_size = 2 * __float2int_rn(2.0f * s); // check when grad_wav_size is too big if ((c_img_rows + 1) >= grad_wav_size && (c_img_cols + 1) >= grad_wav_size) { // Get a new feature index. unsigned int ind = atomicInc(featureCounter, (unsigned int)-1); if (ind < c_max_features) { featureX[ind] = px; featureY[ind] = py; featureLaplacian[ind] = maxPos.w; featureOctave[ind] = c_octave; featureSize[ind] = psize; featureHessian[ind] = N9[1][1][1]; } } // grad_wav_size check } // If the subpixel interpolation worked } } // If this is thread 0. #endif } void icvInterpolateKeypoint_gpu(const PtrStepf& det, const int4* maxPosBuffer, unsigned int maxCounter, float* featureX, float* featureY, int* featureLaplacian, int* featureOctave, float* featureSize, float* featureHessian, unsigned int* featureCounter) { dim3 threads; threads.x = 3; threads.y = 3; threads.z = 3; dim3 grid; grid.x = maxCounter; hipLaunchKernelGGL(( icvInterpolateKeypoint), dim3(grid), dim3(threads), 0, 0, det, maxPosBuffer, featureX, featureY, featureLaplacian, featureOctave, featureSize, featureHessian, featureCounter); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); } //////////////////////////////////////////////////////////////////////// // Orientation #define ORI_SEARCH_INC 5 #define ORI_WIN 60 #define ORI_SAMPLES 113 __constant__ float c_aptX[ORI_SAMPLES] = {-6, -5, -5, -5, -5, -5, -5, -5, -4, -4, -4, -4, -4, -4, -4, -4, -4, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 6}; __constant__ float c_aptY[ORI_SAMPLES] = {0, -3, -2, -1, 0, 1, 2, 3, -4, -3, -2, -1, 0, 1, 2, 3, 4, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -4, -3, -2, -1, 0, 1, 2, 3, 4, -3, -2, -1, 0, 1, 2, 3, 0}; __constant__ float c_aptW[ORI_SAMPLES] = {0.001455130288377404f, 0.001707611023448408f, 0.002547456417232752f, 0.003238451667129993f, 0.0035081731621176f, 0.003238451667129993f, 0.002547456417232752f, 0.001707611023448408f, 0.002003900473937392f, 0.0035081731621176f, 0.005233579315245152f, 0.00665318313986063f, 0.00720730796456337f, 0.00665318313986063f, 0.005233579315245152f, 0.0035081731621176f, 0.002003900473937392f, 0.001707611023448408f, 0.0035081731621176f, 0.006141661666333675f, 0.009162282571196556f, 0.01164754293859005f, 0.01261763460934162f, 0.01164754293859005f, 0.009162282571196556f, 0.006141661666333675f, 0.0035081731621176f, 0.001707611023448408f, 0.002547456417232752f, 0.005233579315245152f, 0.009162282571196556f, 0.01366852037608624f, 0.01737609319388866f, 0.0188232995569706f, 0.01737609319388866f, 0.01366852037608624f, 0.009162282571196556f, 0.005233579315245152f, 0.002547456417232752f, 0.003238451667129993f, 0.00665318313986063f, 0.01164754293859005f, 0.01737609319388866f, 0.02208934165537357f, 0.02392910048365593f, 0.02208934165537357f, 0.01737609319388866f, 0.01164754293859005f, 0.00665318313986063f, 0.003238451667129993f, 0.001455130288377404f, 0.0035081731621176f, 0.00720730796456337f, 0.01261763460934162f, 0.0188232995569706f, 0.02392910048365593f, 0.02592208795249462f, 0.02392910048365593f, 0.0188232995569706f, 0.01261763460934162f, 0.00720730796456337f, 0.0035081731621176f, 0.001455130288377404f, 0.003238451667129993f, 0.00665318313986063f, 0.01164754293859005f, 0.01737609319388866f, 0.02208934165537357f, 0.02392910048365593f, 0.02208934165537357f, 0.01737609319388866f, 0.01164754293859005f, 0.00665318313986063f, 0.003238451667129993f, 0.002547456417232752f, 0.005233579315245152f, 0.009162282571196556f, 0.01366852037608624f, 0.01737609319388866f, 0.0188232995569706f, 0.01737609319388866f, 0.01366852037608624f, 0.009162282571196556f, 0.005233579315245152f, 0.002547456417232752f, 0.001707611023448408f, 0.0035081731621176f, 0.006141661666333675f, 0.009162282571196556f, 0.01164754293859005f, 0.01261763460934162f, 0.01164754293859005f, 0.009162282571196556f, 0.006141661666333675f, 0.0035081731621176f, 0.001707611023448408f, 0.002003900473937392f, 0.0035081731621176f, 0.005233579315245152f, 0.00665318313986063f, 0.00720730796456337f, 0.00665318313986063f, 0.005233579315245152f, 0.0035081731621176f, 0.002003900473937392f, 0.001707611023448408f, 0.002547456417232752f, 0.003238451667129993f, 0.0035081731621176f, 0.003238451667129993f, 0.002547456417232752f, 0.001707611023448408f, 0.001455130288377404f}; __constant__ float c_NX[2][5] = {{0, 0, 2, 4, -1}, {2, 0, 4, 4, 1}}; __constant__ float c_NY[2][5] = {{0, 0, 4, 2, 1}, {0, 2, 4, 4, -1}}; __global__ void icvCalcOrientation(const float* featureX, const float* featureY, const float* featureSize, float* featureDir) { __shared__ float s_X[128]; __shared__ float s_Y[128]; __shared__ float s_angle[128]; __shared__ float s_sumx[32 * 4]; __shared__ float s_sumy[32 * 4]; /* The sampling intervals and wavelet sized for selecting an orientation and building the keypoint descriptor are defined relative to 's' */ const float s = featureSize[blockIdx.x] * 1.2f / 9.0f; /* To find the dominant orientation, the gradients in x and y are sampled in a circle of radius 6s using wavelets of size 4s. We ensure the gradient wavelet size is even to ensure the wavelet pattern is balanced and symmetric around its center */ const int grad_wav_size = 2 * __float2int_rn(2.0f * s); // check when grad_wav_size is too big if ((c_img_rows + 1) < grad_wav_size || (c_img_cols + 1) < grad_wav_size) return; // Calc X, Y, angle and store it to shared memory const int tid = threadIdx.y * blockDim.x + threadIdx.x; float X = 0.0f, Y = 0.0f, angle = 0.0f; if (tid < ORI_SAMPLES) { const float margin = (float)(grad_wav_size - 1) / 2.0f; const int x = __float2int_rn(featureX[blockIdx.x] + c_aptX[tid] * s - margin); const int y = __float2int_rn(featureY[blockIdx.x] + c_aptY[tid] * s - margin); if (y >= 0 && y < (c_img_rows + 1) - grad_wav_size && x >= 0 && x < (c_img_cols + 1) - grad_wav_size) { X = c_aptW[tid] * icvCalcHaarPatternSum<2>(c_NX, 4, grad_wav_size, y, x); Y = c_aptW[tid] * icvCalcHaarPatternSum<2>(c_NY, 4, grad_wav_size, y, x); angle = atan2f(Y, X); if (angle < 0) angle += 2.0f * CV_PI_F; angle *= 180.0f / CV_PI_F; } } s_X[tid] = X; s_Y[tid] = Y; s_angle[tid] = angle; __syncthreads(); float bestx = 0, besty = 0, best_mod = 0; #if __CUDA_ARCH__ >= 200 #pragma unroll #endif for (int i = 0; i < 18; ++i) { const int dir = (i * 4 + threadIdx.y) * ORI_SEARCH_INC; float sumx = 0.0f, sumy = 0.0f; int d = ::abs(__float2int_rn(s_angle[threadIdx.x]) - dir); if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2) { sumx = s_X[threadIdx.x]; sumy = s_Y[threadIdx.x]; } d = ::abs(__float2int_rn(s_angle[threadIdx.x + 32]) - dir); if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2) { sumx += s_X[threadIdx.x + 32]; sumy += s_Y[threadIdx.x + 32]; } d = ::abs(__float2int_rn(s_angle[threadIdx.x + 64]) - dir); if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2) { sumx += s_X[threadIdx.x + 64]; sumy += s_Y[threadIdx.x + 64]; } d = ::abs(__float2int_rn(s_angle[threadIdx.x + 96]) - dir); if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2) { sumx += s_X[threadIdx.x + 96]; sumy += s_Y[threadIdx.x + 96]; } plus<float> op; device::reduce<32>(smem_tuple(s_sumx + threadIdx.y * 32, s_sumy + threadIdx.y * 32), thrust::tie(sumx, sumy), threadIdx.x, thrust::make_tuple(op, op)); const float temp_mod = sumx * sumx + sumy * sumy; if (temp_mod > best_mod) { best_mod = temp_mod; bestx = sumx; besty = sumy; } __syncthreads(); } if (threadIdx.x == 0) { s_X[threadIdx.y] = bestx; s_Y[threadIdx.y] = besty; s_angle[threadIdx.y] = best_mod; } __syncthreads(); if (threadIdx.x == 0 && threadIdx.y == 0) { int bestIdx = 0; if (s_angle[1] > s_angle[bestIdx]) bestIdx = 1; if (s_angle[2] > s_angle[bestIdx]) bestIdx = 2; if (s_angle[3] > s_angle[bestIdx]) bestIdx = 3; float kp_dir = atan2f(s_Y[bestIdx], s_X[bestIdx]); if (kp_dir < 0) kp_dir += 2.0f * CV_PI_F; kp_dir *= 180.0f / CV_PI_F; kp_dir = 360.0f - kp_dir; if (::fabsf(kp_dir - 360.f) < numeric_limits<float>::epsilon()) kp_dir = 0.f; featureDir[blockIdx.x] = kp_dir; } } #undef ORI_SEARCH_INC #undef ORI_WIN #undef ORI_SAMPLES void icvCalcOrientation_gpu(const float* featureX, const float* featureY, const float* featureSize, float* featureDir, int nFeatures) { dim3 threads; threads.x = 32; threads.y = 4; dim3 grid; grid.x = nFeatures; hipLaunchKernelGGL(( icvCalcOrientation), dim3(grid), dim3(threads), 0, 0, featureX, featureY, featureSize, featureDir); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); } //////////////////////////////////////////////////////////////////////// // Descriptors #define PATCH_SZ 20 __constant__ float c_DW[PATCH_SZ * PATCH_SZ] = { 3.695352233989979e-006f, 8.444558261544444e-006f, 1.760426494001877e-005f, 3.34794785885606e-005f, 5.808438800158911e-005f, 9.193058212986216e-005f, 0.0001327334757661447f, 0.0001748319627949968f, 0.0002100782439811155f, 0.0002302826324012131f, 0.0002302826324012131f, 0.0002100782439811155f, 0.0001748319627949968f, 0.0001327334757661447f, 9.193058212986216e-005f, 5.808438800158911e-005f, 3.34794785885606e-005f, 1.760426494001877e-005f, 8.444558261544444e-006f, 3.695352233989979e-006f, 8.444558261544444e-006f, 1.929736572492402e-005f, 4.022897701361217e-005f, 7.650675252079964e-005f, 0.0001327334903180599f, 0.0002100782585330308f, 0.0003033203829545528f, 0.0003995231236331165f, 0.0004800673632416874f, 0.0005262381164357066f, 0.0005262381164357066f, 0.0004800673632416874f, 0.0003995231236331165f, 0.0003033203829545528f, 0.0002100782585330308f, 0.0001327334903180599f, 7.650675252079964e-005f, 4.022897701361217e-005f, 1.929736572492402e-005f, 8.444558261544444e-006f, 1.760426494001877e-005f, 4.022897701361217e-005f, 8.386484114453197e-005f, 0.0001594926579855382f, 0.0002767078403849155f, 0.0004379475140012801f, 0.0006323281559161842f, 0.0008328808471560478f, 0.001000790391117334f, 0.001097041997127235f, 0.001097041997127235f, 0.001000790391117334f, 0.0008328808471560478f, 0.0006323281559161842f, 0.0004379475140012801f, 0.0002767078403849155f, 0.0001594926579855382f, 8.386484114453197e-005f, 4.022897701361217e-005f, 1.760426494001877e-005f, 3.34794785885606e-005f, 7.650675252079964e-005f, 0.0001594926579855382f, 0.0003033203247468919f, 0.0005262380582280457f, 0.0008328807889483869f, 0.001202550483867526f, 0.001583957928232849f, 0.001903285388834775f, 0.002086334861814976f, 0.002086334861814976f, 0.001903285388834775f, 0.001583957928232849f, 0.001202550483867526f, 0.0008328807889483869f, 0.0005262380582280457f, 0.0003033203247468919f, 0.0001594926579855382f, 7.650675252079964e-005f, 3.34794785885606e-005f, 5.808438800158911e-005f, 0.0001327334903180599f, 0.0002767078403849155f, 0.0005262380582280457f, 0.0009129836107604206f, 0.001444985857233405f, 0.002086335094645619f, 0.002748048631474376f, 0.00330205773934722f, 0.003619635012000799f, 0.003619635012000799f, 0.00330205773934722f, 0.002748048631474376f, 0.002086335094645619f, 0.001444985857233405f, 0.0009129836107604206f, 0.0005262380582280457f, 0.0002767078403849155f, 0.0001327334903180599f, 5.808438800158911e-005f, 9.193058212986216e-005f, 0.0002100782585330308f, 0.0004379475140012801f, 0.0008328807889483869f, 0.001444985857233405f, 0.002286989474669099f, 0.00330205773934722f, 0.004349356517195702f, 0.00522619066759944f, 0.005728822201490402f, 0.005728822201490402f, 0.00522619066759944f, 0.004349356517195702f, 0.00330205773934722f, 0.002286989474669099f, 0.001444985857233405f, 0.0008328807889483869f, 0.0004379475140012801f, 0.0002100782585330308f, 9.193058212986216e-005f, 0.0001327334757661447f, 0.0003033203829545528f, 0.0006323281559161842f, 0.001202550483867526f, 0.002086335094645619f, 0.00330205773934722f, 0.004767658654600382f, 0.006279794964939356f, 0.007545807864516974f, 0.008271530270576477f, 0.008271530270576477f, 0.007545807864516974f, 0.006279794964939356f, 0.004767658654600382f, 0.00330205773934722f, 0.002086335094645619f, 0.001202550483867526f, 0.0006323281559161842f, 0.0003033203829545528f, 0.0001327334757661447f, 0.0001748319627949968f, 0.0003995231236331165f, 0.0008328808471560478f, 0.001583957928232849f, 0.002748048631474376f, 0.004349356517195702f, 0.006279794964939356f, 0.008271529339253902f, 0.009939077310264111f, 0.01089497376233339f, 0.01089497376233339f, 0.009939077310264111f, 0.008271529339253902f, 0.006279794964939356f, 0.004349356517195702f, 0.002748048631474376f, 0.001583957928232849f, 0.0008328808471560478f, 0.0003995231236331165f, 0.0001748319627949968f, 0.0002100782439811155f, 0.0004800673632416874f, 0.001000790391117334f, 0.001903285388834775f, 0.00330205773934722f, 0.00522619066759944f, 0.007545807864516974f, 0.009939077310264111f, 0.01194280479103327f, 0.01309141051024199f, 0.01309141051024199f, 0.01194280479103327f, 0.009939077310264111f, 0.007545807864516974f, 0.00522619066759944f, 0.00330205773934722f, 0.001903285388834775f, 0.001000790391117334f, 0.0004800673632416874f, 0.0002100782439811155f, 0.0002302826324012131f, 0.0005262381164357066f, 0.001097041997127235f, 0.002086334861814976f, 0.003619635012000799f, 0.005728822201490402f, 0.008271530270576477f, 0.01089497376233339f, 0.01309141051024199f, 0.01435048412531614f, 0.01435048412531614f, 0.01309141051024199f, 0.01089497376233339f, 0.008271530270576477f, 0.005728822201490402f, 0.003619635012000799f, 0.002086334861814976f, 0.001097041997127235f, 0.0005262381164357066f, 0.0002302826324012131f, 0.0002302826324012131f, 0.0005262381164357066f, 0.001097041997127235f, 0.002086334861814976f, 0.003619635012000799f, 0.005728822201490402f, 0.008271530270576477f, 0.01089497376233339f, 0.01309141051024199f, 0.01435048412531614f, 0.01435048412531614f, 0.01309141051024199f, 0.01089497376233339f, 0.008271530270576477f, 0.005728822201490402f, 0.003619635012000799f, 0.002086334861814976f, 0.001097041997127235f, 0.0005262381164357066f, 0.0002302826324012131f, 0.0002100782439811155f, 0.0004800673632416874f, 0.001000790391117334f, 0.001903285388834775f, 0.00330205773934722f, 0.00522619066759944f, 0.007545807864516974f, 0.009939077310264111f, 0.01194280479103327f, 0.01309141051024199f, 0.01309141051024199f, 0.01194280479103327f, 0.009939077310264111f, 0.007545807864516974f, 0.00522619066759944f, 0.00330205773934722f, 0.001903285388834775f, 0.001000790391117334f, 0.0004800673632416874f, 0.0002100782439811155f, 0.0001748319627949968f, 0.0003995231236331165f, 0.0008328808471560478f, 0.001583957928232849f, 0.002748048631474376f, 0.004349356517195702f, 0.006279794964939356f, 0.008271529339253902f, 0.009939077310264111f, 0.01089497376233339f, 0.01089497376233339f, 0.009939077310264111f, 0.008271529339253902f, 0.006279794964939356f, 0.004349356517195702f, 0.002748048631474376f, 0.001583957928232849f, 0.0008328808471560478f, 0.0003995231236331165f, 0.0001748319627949968f, 0.0001327334757661447f, 0.0003033203829545528f, 0.0006323281559161842f, 0.001202550483867526f, 0.002086335094645619f, 0.00330205773934722f, 0.004767658654600382f, 0.006279794964939356f, 0.007545807864516974f, 0.008271530270576477f, 0.008271530270576477f, 0.007545807864516974f, 0.006279794964939356f, 0.004767658654600382f, 0.00330205773934722f, 0.002086335094645619f, 0.001202550483867526f, 0.0006323281559161842f, 0.0003033203829545528f, 0.0001327334757661447f, 9.193058212986216e-005f, 0.0002100782585330308f, 0.0004379475140012801f, 0.0008328807889483869f, 0.001444985857233405f, 0.002286989474669099f, 0.00330205773934722f, 0.004349356517195702f, 0.00522619066759944f, 0.005728822201490402f, 0.005728822201490402f, 0.00522619066759944f, 0.004349356517195702f, 0.00330205773934722f, 0.002286989474669099f, 0.001444985857233405f, 0.0008328807889483869f, 0.0004379475140012801f, 0.0002100782585330308f, 9.193058212986216e-005f, 5.808438800158911e-005f, 0.0001327334903180599f, 0.0002767078403849155f, 0.0005262380582280457f, 0.0009129836107604206f, 0.001444985857233405f, 0.002086335094645619f, 0.002748048631474376f, 0.00330205773934722f, 0.003619635012000799f, 0.003619635012000799f, 0.00330205773934722f, 0.002748048631474376f, 0.002086335094645619f, 0.001444985857233405f, 0.0009129836107604206f, 0.0005262380582280457f, 0.0002767078403849155f, 0.0001327334903180599f, 5.808438800158911e-005f, 3.34794785885606e-005f, 7.650675252079964e-005f, 0.0001594926579855382f, 0.0003033203247468919f, 0.0005262380582280457f, 0.0008328807889483869f, 0.001202550483867526f, 0.001583957928232849f, 0.001903285388834775f, 0.002086334861814976f, 0.002086334861814976f, 0.001903285388834775f, 0.001583957928232849f, 0.001202550483867526f, 0.0008328807889483869f, 0.0005262380582280457f, 0.0003033203247468919f, 0.0001594926579855382f, 7.650675252079964e-005f, 3.34794785885606e-005f, 1.760426494001877e-005f, 4.022897701361217e-005f, 8.386484114453197e-005f, 0.0001594926579855382f, 0.0002767078403849155f, 0.0004379475140012801f, 0.0006323281559161842f, 0.0008328808471560478f, 0.001000790391117334f, 0.001097041997127235f, 0.001097041997127235f, 0.001000790391117334f, 0.0008328808471560478f, 0.0006323281559161842f, 0.0004379475140012801f, 0.0002767078403849155f, 0.0001594926579855382f, 8.386484114453197e-005f, 4.022897701361217e-005f, 1.760426494001877e-005f, 8.444558261544444e-006f, 1.929736572492402e-005f, 4.022897701361217e-005f, 7.650675252079964e-005f, 0.0001327334903180599f, 0.0002100782585330308f, 0.0003033203829545528f, 0.0003995231236331165f, 0.0004800673632416874f, 0.0005262381164357066f, 0.0005262381164357066f, 0.0004800673632416874f, 0.0003995231236331165f, 0.0003033203829545528f, 0.0002100782585330308f, 0.0001327334903180599f, 7.650675252079964e-005f, 4.022897701361217e-005f, 1.929736572492402e-005f, 8.444558261544444e-006f, 3.695352233989979e-006f, 8.444558261544444e-006f, 1.760426494001877e-005f, 3.34794785885606e-005f, 5.808438800158911e-005f, 9.193058212986216e-005f, 0.0001327334757661447f, 0.0001748319627949968f, 0.0002100782439811155f, 0.0002302826324012131f, 0.0002302826324012131f, 0.0002100782439811155f, 0.0001748319627949968f, 0.0001327334757661447f, 9.193058212986216e-005f, 5.808438800158911e-005f, 3.34794785885606e-005f, 1.760426494001877e-005f, 8.444558261544444e-006f, 3.695352233989979e-006f }; struct WinReader { typedef uchar elem_type; __device__ __forceinline__ uchar operator ()(int i, int j) const { float pixel_x = centerX + (win_offset + j) * cos_dir + (win_offset + i) * sin_dir; float pixel_y = centerY - (win_offset + j) * sin_dir + (win_offset + i) * cos_dir; return tex2D(imgTex, pixel_x, pixel_y); } float centerX; float centerY; float win_offset; float cos_dir; float sin_dir; int width; int height; }; __device__ void calc_dx_dy(const float* featureX, const float* featureY, const float* featureSize, const float* featureDir, float& dx, float& dy); __device__ void calc_dx_dy(const float* featureX, const float* featureY, const float* featureSize, const float* featureDir, float& dx, float& dy) { __shared__ float s_PATCH[PATCH_SZ + 1][PATCH_SZ + 1]; dx = dy = 0.0f; WinReader win; win.centerX = featureX[blockIdx.x]; win.centerY = featureY[blockIdx.x]; // The sampling intervals and wavelet sized for selecting an orientation // and building the keypoint descriptor are defined relative to 's' const float s = featureSize[blockIdx.x] * 1.2f / 9.0f; // Extract a window of pixels around the keypoint of size 20s const int win_size = (int)((PATCH_SZ + 1) * s); win.width = win.height = win_size; // Nearest neighbour version (faster) win.win_offset = -(win_size - 1.0f) / 2.0f; float descriptor_dir = 360.0f - featureDir[blockIdx.x]; if (::fabsf(descriptor_dir - 360.f) < numeric_limits<float>::epsilon()) descriptor_dir = 0.f; descriptor_dir *= CV_PI_F / 180.0f; sincosf(descriptor_dir, &win.sin_dir, &win.cos_dir); const int tid = threadIdx.y * blockDim.x + threadIdx.x; const int xLoadInd = tid % (PATCH_SZ + 1); const int yLoadInd = tid / (PATCH_SZ + 1); if (yLoadInd < (PATCH_SZ + 1)) { if (s > 1) { AreaFilter<WinReader> filter(win, s, s); s_PATCH[yLoadInd][xLoadInd] = filter(yLoadInd, xLoadInd); } else { LinearFilter<WinReader> filter(win); s_PATCH[yLoadInd][xLoadInd] = filter(yLoadInd * s, xLoadInd * s); } } __syncthreads(); const int xPatchInd = threadIdx.x % 5; const int yPatchInd = threadIdx.x / 5; if (yPatchInd < 5) { const int xBlockInd = threadIdx.y % 4; const int yBlockInd = threadIdx.y / 4; const int xInd = xBlockInd * 5 + xPatchInd; const int yInd = yBlockInd * 5 + yPatchInd; const float dw = c_DW[yInd * PATCH_SZ + xInd]; dx = (s_PATCH[yInd ][xInd + 1] - s_PATCH[yInd][xInd] + s_PATCH[yInd + 1][xInd + 1] - s_PATCH[yInd + 1][xInd ]) * dw; dy = (s_PATCH[yInd + 1][xInd ] - s_PATCH[yInd][xInd] + s_PATCH[yInd + 1][xInd + 1] - s_PATCH[yInd ][xInd + 1]) * dw; } } __global__ void compute_descriptors_64(PtrStep<float4> descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir) { __shared__ float smem[32 * 16]; float* sRow = smem + threadIdx.y * 32; float dx, dy; calc_dx_dy(featureX, featureY, featureSize, featureDir, dx, dy); float dxabs = ::fabsf(dx); float dyabs = ::fabsf(dy); plus<float> op; reduce<32>(sRow, dx, threadIdx.x, op); reduce<32>(sRow, dy, threadIdx.x, op); reduce<32>(sRow, dxabs, threadIdx.x, op); reduce<32>(sRow, dyabs, threadIdx.x, op); float4* descriptors_block = descriptors.ptr(blockIdx.x) + threadIdx.y; // write dx, dy, |dx|, |dy| if (threadIdx.x == 0) *descriptors_block = make_float4(dx, dy, dxabs, dyabs); } __global__ void compute_descriptors_128(PtrStep<float4> descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir) { __shared__ float smem[32 * 16]; float* sRow = smem + threadIdx.y * 32; float dx, dy; calc_dx_dy(featureX, featureY, featureSize, featureDir, dx, dy); float4* descriptors_block = descriptors.ptr(blockIdx.x) + threadIdx.y * 2; plus<float> op; float d1 = 0.0f; float d2 = 0.0f; float abs1 = 0.0f; float abs2 = 0.0f; if (dy >= 0) { d1 = dx; abs1 = ::fabsf(dx); } else { d2 = dx; abs2 = ::fabsf(dx); } reduce<32>(sRow, d1, threadIdx.x, op); reduce<32>(sRow, d2, threadIdx.x, op); reduce<32>(sRow, abs1, threadIdx.x, op); reduce<32>(sRow, abs2, threadIdx.x, op); // write dx (dy >= 0), |dx| (dy >= 0), dx (dy < 0), |dx| (dy < 0) if (threadIdx.x == 0) descriptors_block[0] = make_float4(d1, abs1, d2, abs2); if (dx >= 0) { d1 = dy; abs1 = ::fabsf(dy); d2 = 0.0f; abs2 = 0.0f; } else { d1 = 0.0f; abs1 = 0.0f; d2 = dy; abs2 = ::fabsf(dy); } reduce<32>(sRow, d1, threadIdx.x, op); reduce<32>(sRow, d2, threadIdx.x, op); reduce<32>(sRow, abs1, threadIdx.x, op); reduce<32>(sRow, abs2, threadIdx.x, op); // write dy (dx >= 0), |dy| (dx >= 0), dy (dx < 0), |dy| (dx < 0) if (threadIdx.x == 0) descriptors_block[1] = make_float4(d1, abs1, d2, abs2); } template <int BLOCK_DIM_X> __global__ void normalize_descriptors(PtrStepf descriptors) { __shared__ float smem[BLOCK_DIM_X]; __shared__ float s_len; // no need for thread ID float* descriptor_base = descriptors.ptr(blockIdx.x); // read in the unnormalized descriptor values (squared) const float val = descriptor_base[threadIdx.x]; float len = val * val; reduce<BLOCK_DIM_X>(smem, len, threadIdx.x, plus<float>()); if (threadIdx.x == 0) s_len = ::sqrtf(len); __syncthreads(); // normalize and store in output descriptor_base[threadIdx.x] = val / s_len; } void compute_descriptors_gpu(PtrStepSz<float4> descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir, int nFeatures) { // compute unnormalized descriptors, then normalize them - odd indexing since grid must be 2D if (descriptors.cols == 64) { hipLaunchKernelGGL(( compute_descriptors_64), dim3(nFeatures), dim3(dim3(32, 16)), 0, 0, descriptors, featureX, featureY, featureSize, featureDir); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); hipLaunchKernelGGL(( normalize_descriptors<64>), dim3(nFeatures), dim3(64), 0, 0, (PtrStepSzf) descriptors); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); } else { hipLaunchKernelGGL(( compute_descriptors_128), dim3(nFeatures), dim3(dim3(32, 16)), 0, 0, descriptors, featureX, featureY, featureSize, featureDir); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); hipLaunchKernelGGL(( normalize_descriptors<128>), dim3(nFeatures), dim3(128), 0, 0, (PtrStepSzf) descriptors); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); } } } // namespace surf }}} // namespace cv { namespace gpu { namespace device #endif /* CUDA_DISABLER */
9a9320b30f3fa61ce4ff5b107a30a3cf39b04571.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // // Copyright (c) 2010, Paul Furgale, Chi Hay Tong // // The original code was written by Paul Furgale and Chi Hay Tong // and later optimized and prepared for integration into OpenCV by Itseez. // //M*/ #if !defined CUDA_DISABLER #include "opencv2/gpu/device/common.hpp" #include "opencv2/gpu/device/limits.hpp" #include "opencv2/gpu/device/saturate_cast.hpp" #include "opencv2/gpu/device/reduce.hpp" #include "opencv2/gpu/device/utility.hpp" #include "opencv2/gpu/device/functional.hpp" #include "opencv2/gpu/device/filters.hpp" namespace cv { namespace gpu { namespace device { namespace surf { void loadGlobalConstants(int maxCandidates, int maxFeatures, int img_rows, int img_cols, int nOctaveLayers, float hessianThreshold); void loadOctaveConstants(int octave, int layer_rows, int layer_cols); void bindImgTex(PtrStepSzb img); size_t bindSumTex(PtrStepSz<unsigned int> sum); size_t bindMaskSumTex(PtrStepSz<unsigned int> maskSum); void icvCalcLayerDetAndTrace_gpu(const PtrStepf& det, const PtrStepf& trace, int img_rows, int img_cols, int octave, int nOctaveLayer); void icvFindMaximaInLayer_gpu(const PtrStepf& det, const PtrStepf& trace, int4* maxPosBuffer, unsigned int* maxCounter, int img_rows, int img_cols, int octave, bool use_mask, int nLayers); void icvInterpolateKeypoint_gpu(const PtrStepf& det, const int4* maxPosBuffer, unsigned int maxCounter, float* featureX, float* featureY, int* featureLaplacian, int* featureOctave, float* featureSize, float* featureHessian, unsigned int* featureCounter); void icvCalcOrientation_gpu(const float* featureX, const float* featureY, const float* featureSize, float* featureDir, int nFeatures); void compute_descriptors_gpu(PtrStepSz<float4> descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir, int nFeatures); } }}} namespace cv { namespace gpu { namespace device { namespace surf { //////////////////////////////////////////////////////////////////////// // Global parameters // The maximum number of features (before subpixel interpolation) that memory is reserved for. __constant__ int c_max_candidates; // The maximum number of features that memory is reserved for. __constant__ int c_max_features; // The image size. __constant__ int c_img_rows; __constant__ int c_img_cols; // The number of layers. __constant__ int c_nOctaveLayers; // The hessian threshold. __constant__ float c_hessianThreshold; // The current octave. __constant__ int c_octave; // The current layer size. __constant__ int c_layer_rows; __constant__ int c_layer_cols; void loadGlobalConstants(int maxCandidates, int maxFeatures, int img_rows, int img_cols, int nOctaveLayers, float hessianThreshold) { cudaSafeCall( cudaMemcpyToSymbol(c_max_candidates, &maxCandidates, sizeof(maxCandidates)) ); cudaSafeCall( cudaMemcpyToSymbol(c_max_features, &maxFeatures, sizeof(maxFeatures)) ); cudaSafeCall( cudaMemcpyToSymbol(c_img_rows, &img_rows, sizeof(img_rows)) ); cudaSafeCall( cudaMemcpyToSymbol(c_img_cols, &img_cols, sizeof(img_cols)) ); cudaSafeCall( cudaMemcpyToSymbol(c_nOctaveLayers, &nOctaveLayers, sizeof(nOctaveLayers)) ); cudaSafeCall( cudaMemcpyToSymbol(c_hessianThreshold, &hessianThreshold, sizeof(hessianThreshold)) ); } void loadOctaveConstants(int octave, int layer_rows, int layer_cols) { cudaSafeCall( cudaMemcpyToSymbol(c_octave, &octave, sizeof(octave)) ); cudaSafeCall( cudaMemcpyToSymbol(c_layer_rows, &layer_rows, sizeof(layer_rows)) ); cudaSafeCall( cudaMemcpyToSymbol(c_layer_cols, &layer_cols, sizeof(layer_cols)) ); } //////////////////////////////////////////////////////////////////////// // Integral image texture texture<unsigned char, 2, cudaReadModeElementType> imgTex(0, cudaFilterModePoint, cudaAddressModeClamp); texture<unsigned int, 2, cudaReadModeElementType> sumTex(0, cudaFilterModePoint, cudaAddressModeClamp); texture<unsigned int, 2, cudaReadModeElementType> maskSumTex(0, cudaFilterModePoint, cudaAddressModeClamp); void bindImgTex(PtrStepSzb img) { bindTexture(&imgTex, img); } size_t bindSumTex(PtrStepSz<uint> sum) { size_t offset; cudaChannelFormatDesc desc_sum = cudaCreateChannelDesc<uint>(); cudaSafeCall( cudaBindTexture2D(&offset, sumTex, sum.data, desc_sum, sum.cols, sum.rows, sum.step)); return offset / sizeof(uint); } size_t bindMaskSumTex(PtrStepSz<uint> maskSum) { size_t offset; cudaChannelFormatDesc desc_sum = cudaCreateChannelDesc<uint>(); cudaSafeCall( cudaBindTexture2D(&offset, maskSumTex, maskSum.data, desc_sum, maskSum.cols, maskSum.rows, maskSum.step)); return offset / sizeof(uint); } template <int N> __device__ float icvCalcHaarPatternSum(const float src[][5], int oldSize, int newSize, int y, int x) { #if __CUDA_ARCH__ && __CUDA_ARCH__ >= 200 typedef double real_t; #else typedef float real_t; #endif float ratio = (float)newSize / oldSize; real_t d = 0; #pragma unroll for (int k = 0; k < N; ++k) { int dx1 = __float2int_rn(ratio * src[k][0]); int dy1 = __float2int_rn(ratio * src[k][1]); int dx2 = __float2int_rn(ratio * src[k][2]); int dy2 = __float2int_rn(ratio * src[k][3]); real_t t = 0; t += tex2D(sumTex, x + dx1, y + dy1); t -= tex2D(sumTex, x + dx1, y + dy2); t -= tex2D(sumTex, x + dx2, y + dy1); t += tex2D(sumTex, x + dx2, y + dy2); d += t * src[k][4] / ((dx2 - dx1) * (dy2 - dy1)); } return (float)d; } //////////////////////////////////////////////////////////////////////// // Hessian __constant__ float c_DX [3][5] = { {0, 2, 3, 7, 1}, {3, 2, 6, 7, -2}, {6, 2, 9, 7, 1} }; __constant__ float c_DY [3][5] = { {2, 0, 7, 3, 1}, {2, 3, 7, 6, -2}, {2, 6, 7, 9, 1} }; __constant__ float c_DXY[4][5] = { {1, 1, 4, 4, 1}, {5, 1, 8, 4, -1}, {1, 5, 4, 8, -1}, {5, 5, 8, 8, 1} }; __host__ __device__ __forceinline__ int calcSize(int octave, int layer) { /* Wavelet size at first layer of first octave. */ const int HAAR_SIZE0 = 9; /* Wavelet size increment between layers. This should be an even number, such that the wavelet sizes in an octave are either all even or all odd. This ensures that when looking for the neighbours of a sample, the layers above and below are aligned correctly. */ const int HAAR_SIZE_INC = 6; return (HAAR_SIZE0 + HAAR_SIZE_INC * layer) << octave; } __global__ void icvCalcLayerDetAndTrace(PtrStepf det, PtrStepf trace) { // Determine the indices const int gridDim_y = gridDim.y / (c_nOctaveLayers + 2); const int blockIdx_y = blockIdx.y % gridDim_y; const int blockIdx_z = blockIdx.y / gridDim_y; const int j = threadIdx.x + blockIdx.x * blockDim.x; const int i = threadIdx.y + blockIdx_y * blockDim.y; const int layer = blockIdx_z; const int size = calcSize(c_octave, layer); const int samples_i = 1 + ((c_img_rows - size) >> c_octave); const int samples_j = 1 + ((c_img_cols - size) >> c_octave); // Ignore pixels where some of the kernel is outside the image const int margin = (size >> 1) >> c_octave; if (size <= c_img_rows && size <= c_img_cols && i < samples_i && j < samples_j) { const float dx = icvCalcHaarPatternSum<3>(c_DX , 9, size, (i << c_octave), (j << c_octave)); const float dy = icvCalcHaarPatternSum<3>(c_DY , 9, size, (i << c_octave), (j << c_octave)); const float dxy = icvCalcHaarPatternSum<4>(c_DXY, 9, size, (i << c_octave), (j << c_octave)); det.ptr(layer * c_layer_rows + i + margin)[j + margin] = dx * dy - 0.81f * dxy * dxy; trace.ptr(layer * c_layer_rows + i + margin)[j + margin] = dx + dy; } } void icvCalcLayerDetAndTrace_gpu(const PtrStepf& det, const PtrStepf& trace, int img_rows, int img_cols, int octave, int nOctaveLayers) { const int min_size = calcSize(octave, 0); const int max_samples_i = 1 + ((img_rows - min_size) >> octave); const int max_samples_j = 1 + ((img_cols - min_size) >> octave); dim3 threads(16, 16); dim3 grid; grid.x = divUp(max_samples_j, threads.x); grid.y = divUp(max_samples_i, threads.y) * (nOctaveLayers + 2); icvCalcLayerDetAndTrace<<<grid, threads>>>(det, trace); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); } //////////////////////////////////////////////////////////////////////// // NONMAX __constant__ float c_DM[5] = {0, 0, 9, 9, 1}; struct WithMask { static __device__ bool check(int sum_i, int sum_j, int size) { float ratio = (float)size / 9.0f; float d = 0; int dx1 = __float2int_rn(ratio * c_DM[0]); int dy1 = __float2int_rn(ratio * c_DM[1]); int dx2 = __float2int_rn(ratio * c_DM[2]); int dy2 = __float2int_rn(ratio * c_DM[3]); float t = 0; t += tex2D(maskSumTex, sum_j + dx1, sum_i + dy1); t -= tex2D(maskSumTex, sum_j + dx1, sum_i + dy2); t -= tex2D(maskSumTex, sum_j + dx2, sum_i + dy1); t += tex2D(maskSumTex, sum_j + dx2, sum_i + dy2); d += t * c_DM[4] / ((dx2 - dx1) * (dy2 - dy1)); return (d >= 0.5f); } }; template <typename Mask> __global__ void icvFindMaximaInLayer(const PtrStepf det, const PtrStepf trace, int4* maxPosBuffer, unsigned int* maxCounter) { #if __CUDA_ARCH__ && __CUDA_ARCH__ >= 110 extern __shared__ float N9[]; // The hidx variables are the indices to the hessian buffer. const int gridDim_y = gridDim.y / c_nOctaveLayers; const int blockIdx_y = blockIdx.y % gridDim_y; const int blockIdx_z = blockIdx.y / gridDim_y; const int layer = blockIdx_z + 1; const int size = calcSize(c_octave, layer); // Ignore pixels without a 3x3x3 neighbourhood in the layer above const int margin = ((calcSize(c_octave, layer + 1) >> 1) >> c_octave) + 1; const int j = threadIdx.x + blockIdx.x * (blockDim.x - 2) + margin - 1; const int i = threadIdx.y + blockIdx_y * (blockDim.y - 2) + margin - 1; // Is this thread within the hessian buffer? const int zoff = blockDim.x * blockDim.y; const int localLin = threadIdx.x + threadIdx.y * blockDim.x + zoff; N9[localLin - zoff] = det.ptr(c_layer_rows * (layer - 1) + ::min(::max(i, 0), c_img_rows - 1))[::min(::max(j, 0), c_img_cols - 1)]; N9[localLin ] = det.ptr(c_layer_rows * (layer ) + ::min(::max(i, 0), c_img_rows - 1))[::min(::max(j, 0), c_img_cols - 1)]; N9[localLin + zoff] = det.ptr(c_layer_rows * (layer + 1) + ::min(::max(i, 0), c_img_rows - 1))[::min(::max(j, 0), c_img_cols - 1)]; __syncthreads(); if (i < c_layer_rows - margin && j < c_layer_cols - margin && threadIdx.x > 0 && threadIdx.x < blockDim.x - 1 && threadIdx.y > 0 && threadIdx.y < blockDim.y - 1) { float val0 = N9[localLin]; if (val0 > c_hessianThreshold) { // Coordinates for the start of the wavelet in the sum image. There // is some integer division involved, so don't try to simplify this // (cancel out sampleStep) without checking the result is the same const int sum_i = (i - ((size >> 1) >> c_octave)) << c_octave; const int sum_j = (j - ((size >> 1) >> c_octave)) << c_octave; if (Mask::check(sum_i, sum_j, size)) { // Check to see if we have a max (in its 26 neighbours) const bool condmax = val0 > N9[localLin - 1 - blockDim.x - zoff] && val0 > N9[localLin - blockDim.x - zoff] && val0 > N9[localLin + 1 - blockDim.x - zoff] && val0 > N9[localLin - 1 - zoff] && val0 > N9[localLin - zoff] && val0 > N9[localLin + 1 - zoff] && val0 > N9[localLin - 1 + blockDim.x - zoff] && val0 > N9[localLin + blockDim.x - zoff] && val0 > N9[localLin + 1 + blockDim.x - zoff] && val0 > N9[localLin - 1 - blockDim.x] && val0 > N9[localLin - blockDim.x] && val0 > N9[localLin + 1 - blockDim.x] && val0 > N9[localLin - 1 ] && val0 > N9[localLin + 1 ] && val0 > N9[localLin - 1 + blockDim.x] && val0 > N9[localLin + blockDim.x] && val0 > N9[localLin + 1 + blockDim.x] && val0 > N9[localLin - 1 - blockDim.x + zoff] && val0 > N9[localLin - blockDim.x + zoff] && val0 > N9[localLin + 1 - blockDim.x + zoff] && val0 > N9[localLin - 1 + zoff] && val0 > N9[localLin + zoff] && val0 > N9[localLin + 1 + zoff] && val0 > N9[localLin - 1 + blockDim.x + zoff] && val0 > N9[localLin + blockDim.x + zoff] && val0 > N9[localLin + 1 + blockDim.x + zoff] ; if(condmax) { unsigned int ind = atomicInc(maxCounter,(unsigned int) -1); if (ind < c_max_candidates) { const int laplacian = (int) copysignf(1.0f, trace.ptr(layer * c_layer_rows + i)[j]); maxPosBuffer[ind] = make_int4(j, i, layer, laplacian); } } } } } #endif } void icvFindMaximaInLayer_gpu(const PtrStepf& det, const PtrStepf& trace, int4* maxPosBuffer, unsigned int* maxCounter, int img_rows, int img_cols, int octave, bool use_mask, int nOctaveLayers) { const int layer_rows = img_rows >> octave; const int layer_cols = img_cols >> octave; const int min_margin = ((calcSize(octave, 2) >> 1) >> octave) + 1; dim3 threads(16, 16); dim3 grid; grid.x = divUp(layer_cols - 2 * min_margin, threads.x - 2); grid.y = divUp(layer_rows - 2 * min_margin, threads.y - 2) * nOctaveLayers; const size_t smem_size = threads.x * threads.y * 3 * sizeof(float); if (use_mask) icvFindMaximaInLayer<WithMask><<<grid, threads, smem_size>>>(det, trace, maxPosBuffer, maxCounter); else icvFindMaximaInLayer<WithOutMask><<<grid, threads, smem_size>>>(det, trace, maxPosBuffer, maxCounter); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); } //////////////////////////////////////////////////////////////////////// // INTERPOLATION __global__ void icvInterpolateKeypoint(const PtrStepf det, const int4* maxPosBuffer, float* featureX, float* featureY, int* featureLaplacian, int* featureOctave, float* featureSize, float* featureHessian, unsigned int* featureCounter) { #if __CUDA_ARCH__ && __CUDA_ARCH__ >= 110 const int4 maxPos = maxPosBuffer[blockIdx.x]; const int j = maxPos.x - 1 + threadIdx.x; const int i = maxPos.y - 1 + threadIdx.y; const int layer = maxPos.z - 1 + threadIdx.z; __shared__ float N9[3][3][3]; N9[threadIdx.z][threadIdx.y][threadIdx.x] = det.ptr(c_layer_rows * layer + i)[j]; __syncthreads(); if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0) { __shared__ float dD[3]; //dx dD[0] = -0.5f * (N9[1][1][2] - N9[1][1][0]); //dy dD[1] = -0.5f * (N9[1][2][1] - N9[1][0][1]); //ds dD[2] = -0.5f * (N9[2][1][1] - N9[0][1][1]); __shared__ float H[3][3]; //dxx H[0][0] = N9[1][1][0] - 2.0f * N9[1][1][1] + N9[1][1][2]; //dxy H[0][1]= 0.25f * (N9[1][2][2] - N9[1][2][0] - N9[1][0][2] + N9[1][0][0]); //dxs H[0][2]= 0.25f * (N9[2][1][2] - N9[2][1][0] - N9[0][1][2] + N9[0][1][0]); //dyx = dxy H[1][0] = H[0][1]; //dyy H[1][1] = N9[1][0][1] - 2.0f * N9[1][1][1] + N9[1][2][1]; //dys H[1][2]= 0.25f * (N9[2][2][1] - N9[2][0][1] - N9[0][2][1] + N9[0][0][1]); //dsx = dxs H[2][0] = H[0][2]; //dsy = dys H[2][1] = H[1][2]; //dss H[2][2] = N9[0][1][1] - 2.0f * N9[1][1][1] + N9[2][1][1]; __shared__ float x[3]; if (solve3x3(H, dD, x)) { if (::fabs(x[0]) <= 1.f && ::fabs(x[1]) <= 1.f && ::fabs(x[2]) <= 1.f) { // if the step is within the interpolation region, perform it const int size = calcSize(c_octave, maxPos.z); const int sum_i = (maxPos.y - ((size >> 1) >> c_octave)) << c_octave; const int sum_j = (maxPos.x - ((size >> 1) >> c_octave)) << c_octave; const float center_i = sum_i + (float)(size - 1) / 2; const float center_j = sum_j + (float)(size - 1) / 2; const float px = center_j + x[0] * (1 << c_octave); const float py = center_i + x[1] * (1 << c_octave); const int ds = size - calcSize(c_octave, maxPos.z - 1); const float psize = roundf(size + x[2] * ds); /* The sampling intervals and wavelet sized for selecting an orientation and building the keypoint descriptor are defined relative to 's' */ const float s = psize * 1.2f / 9.0f; /* To find the dominant orientation, the gradients in x and y are sampled in a circle of radius 6s using wavelets of size 4s. We ensure the gradient wavelet size is even to ensure the wavelet pattern is balanced and symmetric around its center */ const int grad_wav_size = 2 * __float2int_rn(2.0f * s); // check when grad_wav_size is too big if ((c_img_rows + 1) >= grad_wav_size && (c_img_cols + 1) >= grad_wav_size) { // Get a new feature index. unsigned int ind = atomicInc(featureCounter, (unsigned int)-1); if (ind < c_max_features) { featureX[ind] = px; featureY[ind] = py; featureLaplacian[ind] = maxPos.w; featureOctave[ind] = c_octave; featureSize[ind] = psize; featureHessian[ind] = N9[1][1][1]; } } // grad_wav_size check } // If the subpixel interpolation worked } } // If this is thread 0. #endif } void icvInterpolateKeypoint_gpu(const PtrStepf& det, const int4* maxPosBuffer, unsigned int maxCounter, float* featureX, float* featureY, int* featureLaplacian, int* featureOctave, float* featureSize, float* featureHessian, unsigned int* featureCounter) { dim3 threads; threads.x = 3; threads.y = 3; threads.z = 3; dim3 grid; grid.x = maxCounter; icvInterpolateKeypoint<<<grid, threads>>>(det, maxPosBuffer, featureX, featureY, featureLaplacian, featureOctave, featureSize, featureHessian, featureCounter); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); } //////////////////////////////////////////////////////////////////////// // Orientation #define ORI_SEARCH_INC 5 #define ORI_WIN 60 #define ORI_SAMPLES 113 __constant__ float c_aptX[ORI_SAMPLES] = {-6, -5, -5, -5, -5, -5, -5, -5, -4, -4, -4, -4, -4, -4, -4, -4, -4, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 6}; __constant__ float c_aptY[ORI_SAMPLES] = {0, -3, -2, -1, 0, 1, 2, 3, -4, -3, -2, -1, 0, 1, 2, 3, 4, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -4, -3, -2, -1, 0, 1, 2, 3, 4, -3, -2, -1, 0, 1, 2, 3, 0}; __constant__ float c_aptW[ORI_SAMPLES] = {0.001455130288377404f, 0.001707611023448408f, 0.002547456417232752f, 0.003238451667129993f, 0.0035081731621176f, 0.003238451667129993f, 0.002547456417232752f, 0.001707611023448408f, 0.002003900473937392f, 0.0035081731621176f, 0.005233579315245152f, 0.00665318313986063f, 0.00720730796456337f, 0.00665318313986063f, 0.005233579315245152f, 0.0035081731621176f, 0.002003900473937392f, 0.001707611023448408f, 0.0035081731621176f, 0.006141661666333675f, 0.009162282571196556f, 0.01164754293859005f, 0.01261763460934162f, 0.01164754293859005f, 0.009162282571196556f, 0.006141661666333675f, 0.0035081731621176f, 0.001707611023448408f, 0.002547456417232752f, 0.005233579315245152f, 0.009162282571196556f, 0.01366852037608624f, 0.01737609319388866f, 0.0188232995569706f, 0.01737609319388866f, 0.01366852037608624f, 0.009162282571196556f, 0.005233579315245152f, 0.002547456417232752f, 0.003238451667129993f, 0.00665318313986063f, 0.01164754293859005f, 0.01737609319388866f, 0.02208934165537357f, 0.02392910048365593f, 0.02208934165537357f, 0.01737609319388866f, 0.01164754293859005f, 0.00665318313986063f, 0.003238451667129993f, 0.001455130288377404f, 0.0035081731621176f, 0.00720730796456337f, 0.01261763460934162f, 0.0188232995569706f, 0.02392910048365593f, 0.02592208795249462f, 0.02392910048365593f, 0.0188232995569706f, 0.01261763460934162f, 0.00720730796456337f, 0.0035081731621176f, 0.001455130288377404f, 0.003238451667129993f, 0.00665318313986063f, 0.01164754293859005f, 0.01737609319388866f, 0.02208934165537357f, 0.02392910048365593f, 0.02208934165537357f, 0.01737609319388866f, 0.01164754293859005f, 0.00665318313986063f, 0.003238451667129993f, 0.002547456417232752f, 0.005233579315245152f, 0.009162282571196556f, 0.01366852037608624f, 0.01737609319388866f, 0.0188232995569706f, 0.01737609319388866f, 0.01366852037608624f, 0.009162282571196556f, 0.005233579315245152f, 0.002547456417232752f, 0.001707611023448408f, 0.0035081731621176f, 0.006141661666333675f, 0.009162282571196556f, 0.01164754293859005f, 0.01261763460934162f, 0.01164754293859005f, 0.009162282571196556f, 0.006141661666333675f, 0.0035081731621176f, 0.001707611023448408f, 0.002003900473937392f, 0.0035081731621176f, 0.005233579315245152f, 0.00665318313986063f, 0.00720730796456337f, 0.00665318313986063f, 0.005233579315245152f, 0.0035081731621176f, 0.002003900473937392f, 0.001707611023448408f, 0.002547456417232752f, 0.003238451667129993f, 0.0035081731621176f, 0.003238451667129993f, 0.002547456417232752f, 0.001707611023448408f, 0.001455130288377404f}; __constant__ float c_NX[2][5] = {{0, 0, 2, 4, -1}, {2, 0, 4, 4, 1}}; __constant__ float c_NY[2][5] = {{0, 0, 4, 2, 1}, {0, 2, 4, 4, -1}}; __global__ void icvCalcOrientation(const float* featureX, const float* featureY, const float* featureSize, float* featureDir) { __shared__ float s_X[128]; __shared__ float s_Y[128]; __shared__ float s_angle[128]; __shared__ float s_sumx[32 * 4]; __shared__ float s_sumy[32 * 4]; /* The sampling intervals and wavelet sized for selecting an orientation and building the keypoint descriptor are defined relative to 's' */ const float s = featureSize[blockIdx.x] * 1.2f / 9.0f; /* To find the dominant orientation, the gradients in x and y are sampled in a circle of radius 6s using wavelets of size 4s. We ensure the gradient wavelet size is even to ensure the wavelet pattern is balanced and symmetric around its center */ const int grad_wav_size = 2 * __float2int_rn(2.0f * s); // check when grad_wav_size is too big if ((c_img_rows + 1) < grad_wav_size || (c_img_cols + 1) < grad_wav_size) return; // Calc X, Y, angle and store it to shared memory const int tid = threadIdx.y * blockDim.x + threadIdx.x; float X = 0.0f, Y = 0.0f, angle = 0.0f; if (tid < ORI_SAMPLES) { const float margin = (float)(grad_wav_size - 1) / 2.0f; const int x = __float2int_rn(featureX[blockIdx.x] + c_aptX[tid] * s - margin); const int y = __float2int_rn(featureY[blockIdx.x] + c_aptY[tid] * s - margin); if (y >= 0 && y < (c_img_rows + 1) - grad_wav_size && x >= 0 && x < (c_img_cols + 1) - grad_wav_size) { X = c_aptW[tid] * icvCalcHaarPatternSum<2>(c_NX, 4, grad_wav_size, y, x); Y = c_aptW[tid] * icvCalcHaarPatternSum<2>(c_NY, 4, grad_wav_size, y, x); angle = atan2f(Y, X); if (angle < 0) angle += 2.0f * CV_PI_F; angle *= 180.0f / CV_PI_F; } } s_X[tid] = X; s_Y[tid] = Y; s_angle[tid] = angle; __syncthreads(); float bestx = 0, besty = 0, best_mod = 0; #if __CUDA_ARCH__ >= 200 #pragma unroll #endif for (int i = 0; i < 18; ++i) { const int dir = (i * 4 + threadIdx.y) * ORI_SEARCH_INC; float sumx = 0.0f, sumy = 0.0f; int d = ::abs(__float2int_rn(s_angle[threadIdx.x]) - dir); if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2) { sumx = s_X[threadIdx.x]; sumy = s_Y[threadIdx.x]; } d = ::abs(__float2int_rn(s_angle[threadIdx.x + 32]) - dir); if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2) { sumx += s_X[threadIdx.x + 32]; sumy += s_Y[threadIdx.x + 32]; } d = ::abs(__float2int_rn(s_angle[threadIdx.x + 64]) - dir); if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2) { sumx += s_X[threadIdx.x + 64]; sumy += s_Y[threadIdx.x + 64]; } d = ::abs(__float2int_rn(s_angle[threadIdx.x + 96]) - dir); if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2) { sumx += s_X[threadIdx.x + 96]; sumy += s_Y[threadIdx.x + 96]; } plus<float> op; device::reduce<32>(smem_tuple(s_sumx + threadIdx.y * 32, s_sumy + threadIdx.y * 32), thrust::tie(sumx, sumy), threadIdx.x, thrust::make_tuple(op, op)); const float temp_mod = sumx * sumx + sumy * sumy; if (temp_mod > best_mod) { best_mod = temp_mod; bestx = sumx; besty = sumy; } __syncthreads(); } if (threadIdx.x == 0) { s_X[threadIdx.y] = bestx; s_Y[threadIdx.y] = besty; s_angle[threadIdx.y] = best_mod; } __syncthreads(); if (threadIdx.x == 0 && threadIdx.y == 0) { int bestIdx = 0; if (s_angle[1] > s_angle[bestIdx]) bestIdx = 1; if (s_angle[2] > s_angle[bestIdx]) bestIdx = 2; if (s_angle[3] > s_angle[bestIdx]) bestIdx = 3; float kp_dir = atan2f(s_Y[bestIdx], s_X[bestIdx]); if (kp_dir < 0) kp_dir += 2.0f * CV_PI_F; kp_dir *= 180.0f / CV_PI_F; kp_dir = 360.0f - kp_dir; if (::fabsf(kp_dir - 360.f) < numeric_limits<float>::epsilon()) kp_dir = 0.f; featureDir[blockIdx.x] = kp_dir; } } #undef ORI_SEARCH_INC #undef ORI_WIN #undef ORI_SAMPLES void icvCalcOrientation_gpu(const float* featureX, const float* featureY, const float* featureSize, float* featureDir, int nFeatures) { dim3 threads; threads.x = 32; threads.y = 4; dim3 grid; grid.x = nFeatures; icvCalcOrientation<<<grid, threads>>>(featureX, featureY, featureSize, featureDir); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); } //////////////////////////////////////////////////////////////////////// // Descriptors #define PATCH_SZ 20 __constant__ float c_DW[PATCH_SZ * PATCH_SZ] = { 3.695352233989979e-006f, 8.444558261544444e-006f, 1.760426494001877e-005f, 3.34794785885606e-005f, 5.808438800158911e-005f, 9.193058212986216e-005f, 0.0001327334757661447f, 0.0001748319627949968f, 0.0002100782439811155f, 0.0002302826324012131f, 0.0002302826324012131f, 0.0002100782439811155f, 0.0001748319627949968f, 0.0001327334757661447f, 9.193058212986216e-005f, 5.808438800158911e-005f, 3.34794785885606e-005f, 1.760426494001877e-005f, 8.444558261544444e-006f, 3.695352233989979e-006f, 8.444558261544444e-006f, 1.929736572492402e-005f, 4.022897701361217e-005f, 7.650675252079964e-005f, 0.0001327334903180599f, 0.0002100782585330308f, 0.0003033203829545528f, 0.0003995231236331165f, 0.0004800673632416874f, 0.0005262381164357066f, 0.0005262381164357066f, 0.0004800673632416874f, 0.0003995231236331165f, 0.0003033203829545528f, 0.0002100782585330308f, 0.0001327334903180599f, 7.650675252079964e-005f, 4.022897701361217e-005f, 1.929736572492402e-005f, 8.444558261544444e-006f, 1.760426494001877e-005f, 4.022897701361217e-005f, 8.386484114453197e-005f, 0.0001594926579855382f, 0.0002767078403849155f, 0.0004379475140012801f, 0.0006323281559161842f, 0.0008328808471560478f, 0.001000790391117334f, 0.001097041997127235f, 0.001097041997127235f, 0.001000790391117334f, 0.0008328808471560478f, 0.0006323281559161842f, 0.0004379475140012801f, 0.0002767078403849155f, 0.0001594926579855382f, 8.386484114453197e-005f, 4.022897701361217e-005f, 1.760426494001877e-005f, 3.34794785885606e-005f, 7.650675252079964e-005f, 0.0001594926579855382f, 0.0003033203247468919f, 0.0005262380582280457f, 0.0008328807889483869f, 0.001202550483867526f, 0.001583957928232849f, 0.001903285388834775f, 0.002086334861814976f, 0.002086334861814976f, 0.001903285388834775f, 0.001583957928232849f, 0.001202550483867526f, 0.0008328807889483869f, 0.0005262380582280457f, 0.0003033203247468919f, 0.0001594926579855382f, 7.650675252079964e-005f, 3.34794785885606e-005f, 5.808438800158911e-005f, 0.0001327334903180599f, 0.0002767078403849155f, 0.0005262380582280457f, 0.0009129836107604206f, 0.001444985857233405f, 0.002086335094645619f, 0.002748048631474376f, 0.00330205773934722f, 0.003619635012000799f, 0.003619635012000799f, 0.00330205773934722f, 0.002748048631474376f, 0.002086335094645619f, 0.001444985857233405f, 0.0009129836107604206f, 0.0005262380582280457f, 0.0002767078403849155f, 0.0001327334903180599f, 5.808438800158911e-005f, 9.193058212986216e-005f, 0.0002100782585330308f, 0.0004379475140012801f, 0.0008328807889483869f, 0.001444985857233405f, 0.002286989474669099f, 0.00330205773934722f, 0.004349356517195702f, 0.00522619066759944f, 0.005728822201490402f, 0.005728822201490402f, 0.00522619066759944f, 0.004349356517195702f, 0.00330205773934722f, 0.002286989474669099f, 0.001444985857233405f, 0.0008328807889483869f, 0.0004379475140012801f, 0.0002100782585330308f, 9.193058212986216e-005f, 0.0001327334757661447f, 0.0003033203829545528f, 0.0006323281559161842f, 0.001202550483867526f, 0.002086335094645619f, 0.00330205773934722f, 0.004767658654600382f, 0.006279794964939356f, 0.007545807864516974f, 0.008271530270576477f, 0.008271530270576477f, 0.007545807864516974f, 0.006279794964939356f, 0.004767658654600382f, 0.00330205773934722f, 0.002086335094645619f, 0.001202550483867526f, 0.0006323281559161842f, 0.0003033203829545528f, 0.0001327334757661447f, 0.0001748319627949968f, 0.0003995231236331165f, 0.0008328808471560478f, 0.001583957928232849f, 0.002748048631474376f, 0.004349356517195702f, 0.006279794964939356f, 0.008271529339253902f, 0.009939077310264111f, 0.01089497376233339f, 0.01089497376233339f, 0.009939077310264111f, 0.008271529339253902f, 0.006279794964939356f, 0.004349356517195702f, 0.002748048631474376f, 0.001583957928232849f, 0.0008328808471560478f, 0.0003995231236331165f, 0.0001748319627949968f, 0.0002100782439811155f, 0.0004800673632416874f, 0.001000790391117334f, 0.001903285388834775f, 0.00330205773934722f, 0.00522619066759944f, 0.007545807864516974f, 0.009939077310264111f, 0.01194280479103327f, 0.01309141051024199f, 0.01309141051024199f, 0.01194280479103327f, 0.009939077310264111f, 0.007545807864516974f, 0.00522619066759944f, 0.00330205773934722f, 0.001903285388834775f, 0.001000790391117334f, 0.0004800673632416874f, 0.0002100782439811155f, 0.0002302826324012131f, 0.0005262381164357066f, 0.001097041997127235f, 0.002086334861814976f, 0.003619635012000799f, 0.005728822201490402f, 0.008271530270576477f, 0.01089497376233339f, 0.01309141051024199f, 0.01435048412531614f, 0.01435048412531614f, 0.01309141051024199f, 0.01089497376233339f, 0.008271530270576477f, 0.005728822201490402f, 0.003619635012000799f, 0.002086334861814976f, 0.001097041997127235f, 0.0005262381164357066f, 0.0002302826324012131f, 0.0002302826324012131f, 0.0005262381164357066f, 0.001097041997127235f, 0.002086334861814976f, 0.003619635012000799f, 0.005728822201490402f, 0.008271530270576477f, 0.01089497376233339f, 0.01309141051024199f, 0.01435048412531614f, 0.01435048412531614f, 0.01309141051024199f, 0.01089497376233339f, 0.008271530270576477f, 0.005728822201490402f, 0.003619635012000799f, 0.002086334861814976f, 0.001097041997127235f, 0.0005262381164357066f, 0.0002302826324012131f, 0.0002100782439811155f, 0.0004800673632416874f, 0.001000790391117334f, 0.001903285388834775f, 0.00330205773934722f, 0.00522619066759944f, 0.007545807864516974f, 0.009939077310264111f, 0.01194280479103327f, 0.01309141051024199f, 0.01309141051024199f, 0.01194280479103327f, 0.009939077310264111f, 0.007545807864516974f, 0.00522619066759944f, 0.00330205773934722f, 0.001903285388834775f, 0.001000790391117334f, 0.0004800673632416874f, 0.0002100782439811155f, 0.0001748319627949968f, 0.0003995231236331165f, 0.0008328808471560478f, 0.001583957928232849f, 0.002748048631474376f, 0.004349356517195702f, 0.006279794964939356f, 0.008271529339253902f, 0.009939077310264111f, 0.01089497376233339f, 0.01089497376233339f, 0.009939077310264111f, 0.008271529339253902f, 0.006279794964939356f, 0.004349356517195702f, 0.002748048631474376f, 0.001583957928232849f, 0.0008328808471560478f, 0.0003995231236331165f, 0.0001748319627949968f, 0.0001327334757661447f, 0.0003033203829545528f, 0.0006323281559161842f, 0.001202550483867526f, 0.002086335094645619f, 0.00330205773934722f, 0.004767658654600382f, 0.006279794964939356f, 0.007545807864516974f, 0.008271530270576477f, 0.008271530270576477f, 0.007545807864516974f, 0.006279794964939356f, 0.004767658654600382f, 0.00330205773934722f, 0.002086335094645619f, 0.001202550483867526f, 0.0006323281559161842f, 0.0003033203829545528f, 0.0001327334757661447f, 9.193058212986216e-005f, 0.0002100782585330308f, 0.0004379475140012801f, 0.0008328807889483869f, 0.001444985857233405f, 0.002286989474669099f, 0.00330205773934722f, 0.004349356517195702f, 0.00522619066759944f, 0.005728822201490402f, 0.005728822201490402f, 0.00522619066759944f, 0.004349356517195702f, 0.00330205773934722f, 0.002286989474669099f, 0.001444985857233405f, 0.0008328807889483869f, 0.0004379475140012801f, 0.0002100782585330308f, 9.193058212986216e-005f, 5.808438800158911e-005f, 0.0001327334903180599f, 0.0002767078403849155f, 0.0005262380582280457f, 0.0009129836107604206f, 0.001444985857233405f, 0.002086335094645619f, 0.002748048631474376f, 0.00330205773934722f, 0.003619635012000799f, 0.003619635012000799f, 0.00330205773934722f, 0.002748048631474376f, 0.002086335094645619f, 0.001444985857233405f, 0.0009129836107604206f, 0.0005262380582280457f, 0.0002767078403849155f, 0.0001327334903180599f, 5.808438800158911e-005f, 3.34794785885606e-005f, 7.650675252079964e-005f, 0.0001594926579855382f, 0.0003033203247468919f, 0.0005262380582280457f, 0.0008328807889483869f, 0.001202550483867526f, 0.001583957928232849f, 0.001903285388834775f, 0.002086334861814976f, 0.002086334861814976f, 0.001903285388834775f, 0.001583957928232849f, 0.001202550483867526f, 0.0008328807889483869f, 0.0005262380582280457f, 0.0003033203247468919f, 0.0001594926579855382f, 7.650675252079964e-005f, 3.34794785885606e-005f, 1.760426494001877e-005f, 4.022897701361217e-005f, 8.386484114453197e-005f, 0.0001594926579855382f, 0.0002767078403849155f, 0.0004379475140012801f, 0.0006323281559161842f, 0.0008328808471560478f, 0.001000790391117334f, 0.001097041997127235f, 0.001097041997127235f, 0.001000790391117334f, 0.0008328808471560478f, 0.0006323281559161842f, 0.0004379475140012801f, 0.0002767078403849155f, 0.0001594926579855382f, 8.386484114453197e-005f, 4.022897701361217e-005f, 1.760426494001877e-005f, 8.444558261544444e-006f, 1.929736572492402e-005f, 4.022897701361217e-005f, 7.650675252079964e-005f, 0.0001327334903180599f, 0.0002100782585330308f, 0.0003033203829545528f, 0.0003995231236331165f, 0.0004800673632416874f, 0.0005262381164357066f, 0.0005262381164357066f, 0.0004800673632416874f, 0.0003995231236331165f, 0.0003033203829545528f, 0.0002100782585330308f, 0.0001327334903180599f, 7.650675252079964e-005f, 4.022897701361217e-005f, 1.929736572492402e-005f, 8.444558261544444e-006f, 3.695352233989979e-006f, 8.444558261544444e-006f, 1.760426494001877e-005f, 3.34794785885606e-005f, 5.808438800158911e-005f, 9.193058212986216e-005f, 0.0001327334757661447f, 0.0001748319627949968f, 0.0002100782439811155f, 0.0002302826324012131f, 0.0002302826324012131f, 0.0002100782439811155f, 0.0001748319627949968f, 0.0001327334757661447f, 9.193058212986216e-005f, 5.808438800158911e-005f, 3.34794785885606e-005f, 1.760426494001877e-005f, 8.444558261544444e-006f, 3.695352233989979e-006f }; struct WinReader { typedef uchar elem_type; __device__ __forceinline__ uchar operator ()(int i, int j) const { float pixel_x = centerX + (win_offset + j) * cos_dir + (win_offset + i) * sin_dir; float pixel_y = centerY - (win_offset + j) * sin_dir + (win_offset + i) * cos_dir; return tex2D(imgTex, pixel_x, pixel_y); } float centerX; float centerY; float win_offset; float cos_dir; float sin_dir; int width; int height; }; __device__ void calc_dx_dy(const float* featureX, const float* featureY, const float* featureSize, const float* featureDir, float& dx, float& dy); __device__ void calc_dx_dy(const float* featureX, const float* featureY, const float* featureSize, const float* featureDir, float& dx, float& dy) { __shared__ float s_PATCH[PATCH_SZ + 1][PATCH_SZ + 1]; dx = dy = 0.0f; WinReader win; win.centerX = featureX[blockIdx.x]; win.centerY = featureY[blockIdx.x]; // The sampling intervals and wavelet sized for selecting an orientation // and building the keypoint descriptor are defined relative to 's' const float s = featureSize[blockIdx.x] * 1.2f / 9.0f; // Extract a window of pixels around the keypoint of size 20s const int win_size = (int)((PATCH_SZ + 1) * s); win.width = win.height = win_size; // Nearest neighbour version (faster) win.win_offset = -(win_size - 1.0f) / 2.0f; float descriptor_dir = 360.0f - featureDir[blockIdx.x]; if (::fabsf(descriptor_dir - 360.f) < numeric_limits<float>::epsilon()) descriptor_dir = 0.f; descriptor_dir *= CV_PI_F / 180.0f; sincosf(descriptor_dir, &win.sin_dir, &win.cos_dir); const int tid = threadIdx.y * blockDim.x + threadIdx.x; const int xLoadInd = tid % (PATCH_SZ + 1); const int yLoadInd = tid / (PATCH_SZ + 1); if (yLoadInd < (PATCH_SZ + 1)) { if (s > 1) { AreaFilter<WinReader> filter(win, s, s); s_PATCH[yLoadInd][xLoadInd] = filter(yLoadInd, xLoadInd); } else { LinearFilter<WinReader> filter(win); s_PATCH[yLoadInd][xLoadInd] = filter(yLoadInd * s, xLoadInd * s); } } __syncthreads(); const int xPatchInd = threadIdx.x % 5; const int yPatchInd = threadIdx.x / 5; if (yPatchInd < 5) { const int xBlockInd = threadIdx.y % 4; const int yBlockInd = threadIdx.y / 4; const int xInd = xBlockInd * 5 + xPatchInd; const int yInd = yBlockInd * 5 + yPatchInd; const float dw = c_DW[yInd * PATCH_SZ + xInd]; dx = (s_PATCH[yInd ][xInd + 1] - s_PATCH[yInd][xInd] + s_PATCH[yInd + 1][xInd + 1] - s_PATCH[yInd + 1][xInd ]) * dw; dy = (s_PATCH[yInd + 1][xInd ] - s_PATCH[yInd][xInd] + s_PATCH[yInd + 1][xInd + 1] - s_PATCH[yInd ][xInd + 1]) * dw; } } __global__ void compute_descriptors_64(PtrStep<float4> descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir) { __shared__ float smem[32 * 16]; float* sRow = smem + threadIdx.y * 32; float dx, dy; calc_dx_dy(featureX, featureY, featureSize, featureDir, dx, dy); float dxabs = ::fabsf(dx); float dyabs = ::fabsf(dy); plus<float> op; reduce<32>(sRow, dx, threadIdx.x, op); reduce<32>(sRow, dy, threadIdx.x, op); reduce<32>(sRow, dxabs, threadIdx.x, op); reduce<32>(sRow, dyabs, threadIdx.x, op); float4* descriptors_block = descriptors.ptr(blockIdx.x) + threadIdx.y; // write dx, dy, |dx|, |dy| if (threadIdx.x == 0) *descriptors_block = make_float4(dx, dy, dxabs, dyabs); } __global__ void compute_descriptors_128(PtrStep<float4> descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir) { __shared__ float smem[32 * 16]; float* sRow = smem + threadIdx.y * 32; float dx, dy; calc_dx_dy(featureX, featureY, featureSize, featureDir, dx, dy); float4* descriptors_block = descriptors.ptr(blockIdx.x) + threadIdx.y * 2; plus<float> op; float d1 = 0.0f; float d2 = 0.0f; float abs1 = 0.0f; float abs2 = 0.0f; if (dy >= 0) { d1 = dx; abs1 = ::fabsf(dx); } else { d2 = dx; abs2 = ::fabsf(dx); } reduce<32>(sRow, d1, threadIdx.x, op); reduce<32>(sRow, d2, threadIdx.x, op); reduce<32>(sRow, abs1, threadIdx.x, op); reduce<32>(sRow, abs2, threadIdx.x, op); // write dx (dy >= 0), |dx| (dy >= 0), dx (dy < 0), |dx| (dy < 0) if (threadIdx.x == 0) descriptors_block[0] = make_float4(d1, abs1, d2, abs2); if (dx >= 0) { d1 = dy; abs1 = ::fabsf(dy); d2 = 0.0f; abs2 = 0.0f; } else { d1 = 0.0f; abs1 = 0.0f; d2 = dy; abs2 = ::fabsf(dy); } reduce<32>(sRow, d1, threadIdx.x, op); reduce<32>(sRow, d2, threadIdx.x, op); reduce<32>(sRow, abs1, threadIdx.x, op); reduce<32>(sRow, abs2, threadIdx.x, op); // write dy (dx >= 0), |dy| (dx >= 0), dy (dx < 0), |dy| (dx < 0) if (threadIdx.x == 0) descriptors_block[1] = make_float4(d1, abs1, d2, abs2); } template <int BLOCK_DIM_X> __global__ void normalize_descriptors(PtrStepf descriptors) { __shared__ float smem[BLOCK_DIM_X]; __shared__ float s_len; // no need for thread ID float* descriptor_base = descriptors.ptr(blockIdx.x); // read in the unnormalized descriptor values (squared) const float val = descriptor_base[threadIdx.x]; float len = val * val; reduce<BLOCK_DIM_X>(smem, len, threadIdx.x, plus<float>()); if (threadIdx.x == 0) s_len = ::sqrtf(len); __syncthreads(); // normalize and store in output descriptor_base[threadIdx.x] = val / s_len; } void compute_descriptors_gpu(PtrStepSz<float4> descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir, int nFeatures) { // compute unnormalized descriptors, then normalize them - odd indexing since grid must be 2D if (descriptors.cols == 64) { compute_descriptors_64<<<nFeatures, dim3(32, 16)>>>(descriptors, featureX, featureY, featureSize, featureDir); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); normalize_descriptors<64><<<nFeatures, 64>>>((PtrStepSzf) descriptors); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); } else { compute_descriptors_128<<<nFeatures, dim3(32, 16)>>>(descriptors, featureX, featureY, featureSize, featureDir); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); normalize_descriptors<128><<<nFeatures, 128>>>((PtrStepSzf) descriptors); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); } } } // namespace surf }}} // namespace cv { namespace gpu { namespace device #endif /* CUDA_DISABLER */
d273b1bfa6e13d1046b7b60bf1d6473fafc1520c.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "composeSingleSystem.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const size_t offset = XSIZE*YSIZE; const float *H = NULL; hipMalloc(&H, XSIZE*YSIZE); const size_t lowresWidth = XSIZE*YSIZE; const size_t lowresHeight = XSIZE*YSIZE; const size_t highresWidth = XSIZE*YSIZE; const size_t highresHeight = XSIZE*YSIZE; const float psfWidth = 1; const int pixelRadius = 1; float *systemMatrixVals = NULL; hipMalloc(&systemMatrixVals, XSIZE*YSIZE); int *systemMatrixCols = NULL; hipMalloc(&systemMatrixCols, XSIZE*YSIZE); int *systemMatrixRows = NULL; hipMalloc(&systemMatrixRows, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( composeSingleSystem), dim3(gridBlock),dim3(threadBlock), 0, 0, offset,H,lowresWidth,lowresHeight,highresWidth,highresHeight,psfWidth,pixelRadius,systemMatrixVals,systemMatrixCols,systemMatrixRows); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( composeSingleSystem), dim3(gridBlock),dim3(threadBlock), 0, 0, offset,H,lowresWidth,lowresHeight,highresWidth,highresHeight,psfWidth,pixelRadius,systemMatrixVals,systemMatrixCols,systemMatrixRows); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( composeSingleSystem), dim3(gridBlock),dim3(threadBlock), 0, 0, offset,H,lowresWidth,lowresHeight,highresWidth,highresHeight,psfWidth,pixelRadius,systemMatrixVals,systemMatrixCols,systemMatrixRows); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
d273b1bfa6e13d1046b7b60bf1d6473fafc1520c.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "composeSingleSystem.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const size_t offset = XSIZE*YSIZE; const float *H = NULL; cudaMalloc(&H, XSIZE*YSIZE); const size_t lowresWidth = XSIZE*YSIZE; const size_t lowresHeight = XSIZE*YSIZE; const size_t highresWidth = XSIZE*YSIZE; const size_t highresHeight = XSIZE*YSIZE; const float psfWidth = 1; const int pixelRadius = 1; float *systemMatrixVals = NULL; cudaMalloc(&systemMatrixVals, XSIZE*YSIZE); int *systemMatrixCols = NULL; cudaMalloc(&systemMatrixCols, XSIZE*YSIZE); int *systemMatrixRows = NULL; cudaMalloc(&systemMatrixRows, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); composeSingleSystem<<<gridBlock,threadBlock>>>(offset,H,lowresWidth,lowresHeight,highresWidth,highresHeight,psfWidth,pixelRadius,systemMatrixVals,systemMatrixCols,systemMatrixRows); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { composeSingleSystem<<<gridBlock,threadBlock>>>(offset,H,lowresWidth,lowresHeight,highresWidth,highresHeight,psfWidth,pixelRadius,systemMatrixVals,systemMatrixCols,systemMatrixRows); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { composeSingleSystem<<<gridBlock,threadBlock>>>(offset,H,lowresWidth,lowresHeight,highresWidth,highresHeight,psfWidth,pixelRadius,systemMatrixVals,systemMatrixCols,systemMatrixRows); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
f4f1ac272833ec93da8c22ede6714b73843e84a5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void computeSphereVertexDistancesKernel(float4 *V, float *dist, unsigned int *NEIGHBOR, unsigned int *NBOFFSETS, unsigned int *nNeighbors, unsigned int nVertices, float circumference) { int n,N; int offset,soffset; // since we are using multiple threads per blocks as well as multiple blocks int vidxb = 4*(blockIdx.x * blockDim.x) + threadIdx.x; int basevert = 4*(blockIdx.x * blockDim.x); int vidx,tab; float4 nv,tv; float dot,n1,n2,norm; // create a cache for 4 elements per block (4*BLOCK_SIZE elements) __shared__ float4 SI[4*BLOCK_SIZE_CVD]; int bidx = threadIdx.x; // this means we have 128 neighboring vertices cached for (vidx=vidxb; vidx<vidxb+4*BLOCK_SIZE_CVD; vidx+=BLOCK_SIZE_CVD) { if (vidx < nVertices) { SI[bidx] = V[vidx]; bidx+=BLOCK_SIZE_CVD; } } __syncthreads(); bidx = threadIdx.x; // preload the current BLOCK_SIZE vertices for (vidx=vidxb; vidx<vidxb+4*BLOCK_SIZE_CVD; vidx+=BLOCK_SIZE_CVD) { if (vidx < nVertices) { offset = NBOFFSETS[ vidx ]; N = nNeighbors[ vidx ]; tv = SI[bidx]; bidx += BLOCK_SIZE_CVD; for (n = 0; n < N; n++) { soffset = NEIGHBOR[offset+n]; /* There seems to be little to NO benefit of this local caching, either because we have no hits, or reading from the shared memory is just as slow as reading from global memory */ tab = soffset - basevert; if (tab > 0 && tab < 4*BLOCK_SIZE_CVD) { nv = SI[tab]; } else { nv = V[soffset]; } // avoid FMADS //dot = tv.x*nv.x + tv.y*nv.y + tv.z*nv.z; dot = __fmul_rn(tv.x,nv.x); dot = __fadd_rn(dot,__fmul_rn(tv.y,nv.y)); dot = __fadd_rn(dot,__fmul_rn(tv.z,nv.z)); //n1 = tv.x*tv.x + tv.y*tv.y + tv.z*tv.z; n1 = __fmul_rn(tv.x,tv.x); n1 = __fadd_rn(n1,__fmul_rn(tv.y,tv.y)); n1 = __fadd_rn(n1,__fmul_rn(tv.z,tv.z)); //n2 = nv.x*nv.x + nv.y*nv.y + nv.z*nv.z; n2 = __fmul_rn(nv.x,nv.x); n2 = __fadd_rn(n2,__fmul_rn(nv.y,nv.y)); n2 = __fadd_rn(n2,__fmul_rn(nv.z,nv.z)); norm = __fmul_rn(__fsqrt_rn(n1),__fsqrt_rn(n2)); //norm = __fsqrt_rn(n1) * __fsqrt_rn(n2); // this seems to be a quell of numerical error here if (norm < 1.0e-7f) { dist[offset+n] = 0.0f; } else if (fabsf(dot) > norm) { dist[offset+n] = 0.0f; } else { dist[offset+n] = __fmul_rn(circumference,fabsf(acosf(dot/norm))); } } } } }
f4f1ac272833ec93da8c22ede6714b73843e84a5.cu
#include "includes.h" __global__ void computeSphereVertexDistancesKernel(float4 *V, float *dist, unsigned int *NEIGHBOR, unsigned int *NBOFFSETS, unsigned int *nNeighbors, unsigned int nVertices, float circumference) { int n,N; int offset,soffset; // since we are using multiple threads per blocks as well as multiple blocks int vidxb = 4*(blockIdx.x * blockDim.x) + threadIdx.x; int basevert = 4*(blockIdx.x * blockDim.x); int vidx,tab; float4 nv,tv; float dot,n1,n2,norm; // create a cache for 4 elements per block (4*BLOCK_SIZE elements) __shared__ float4 SI[4*BLOCK_SIZE_CVD]; int bidx = threadIdx.x; // this means we have 128 neighboring vertices cached for (vidx=vidxb; vidx<vidxb+4*BLOCK_SIZE_CVD; vidx+=BLOCK_SIZE_CVD) { if (vidx < nVertices) { SI[bidx] = V[vidx]; bidx+=BLOCK_SIZE_CVD; } } __syncthreads(); bidx = threadIdx.x; // preload the current BLOCK_SIZE vertices for (vidx=vidxb; vidx<vidxb+4*BLOCK_SIZE_CVD; vidx+=BLOCK_SIZE_CVD) { if (vidx < nVertices) { offset = NBOFFSETS[ vidx ]; N = nNeighbors[ vidx ]; tv = SI[bidx]; bidx += BLOCK_SIZE_CVD; for (n = 0; n < N; n++) { soffset = NEIGHBOR[offset+n]; /* There seems to be little to NO benefit of this local caching, either because we have no hits, or reading from the shared memory is just as slow as reading from global memory */ tab = soffset - basevert; if (tab > 0 && tab < 4*BLOCK_SIZE_CVD) { nv = SI[tab]; } else { nv = V[soffset]; } // avoid FMADS //dot = tv.x*nv.x + tv.y*nv.y + tv.z*nv.z; dot = __fmul_rn(tv.x,nv.x); dot = __fadd_rn(dot,__fmul_rn(tv.y,nv.y)); dot = __fadd_rn(dot,__fmul_rn(tv.z,nv.z)); //n1 = tv.x*tv.x + tv.y*tv.y + tv.z*tv.z; n1 = __fmul_rn(tv.x,tv.x); n1 = __fadd_rn(n1,__fmul_rn(tv.y,tv.y)); n1 = __fadd_rn(n1,__fmul_rn(tv.z,tv.z)); //n2 = nv.x*nv.x + nv.y*nv.y + nv.z*nv.z; n2 = __fmul_rn(nv.x,nv.x); n2 = __fadd_rn(n2,__fmul_rn(nv.y,nv.y)); n2 = __fadd_rn(n2,__fmul_rn(nv.z,nv.z)); norm = __fmul_rn(__fsqrt_rn(n1),__fsqrt_rn(n2)); //norm = __fsqrt_rn(n1) * __fsqrt_rn(n2); // this seems to be a quell of numerical error here if (norm < 1.0e-7f) { dist[offset+n] = 0.0f; } else if (fabsf(dot) > norm) { dist[offset+n] = 0.0f; } else { dist[offset+n] = __fmul_rn(circumference,fabsf(acosf(dot/norm))); } } } } }
548212e0c4481d9500652505f18efcf19deb0ce0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void k(int *ret) { for(int i=0; i<100000; i++) { *ret = i; } } int main(int argc, char** argv) { int *d_argc; hipMalloc(&d_argc, sizeof(int)); hipMemcpy(d_argc, &argc, sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( k), dim3(1),dim3(1), 0, 0, d_argc); for(int i=0; i<100; i++) { argc += i; } }
548212e0c4481d9500652505f18efcf19deb0ce0.cu
__global__ void k(int *ret) { for(int i=0; i<100000; i++) { *ret = i; } } int main(int argc, char** argv) { int *d_argc; cudaMalloc(&d_argc, sizeof(int)); cudaMemcpy(d_argc, &argc, sizeof(int), cudaMemcpyHostToDevice); k<<<1,1>>>(d_argc); for(int i=0; i<100; i++) { argc += i; } }
24c34e7497585aeb927ef766c6d374e423cdc2eb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" // CUDA kernel. Each thread takes care of one element of c __global__ void vecAdd(double *a, double *b, double *c, int n) { // Get our global thread ID int id = blockIdx.x*blockDim.x+threadIdx.x; // Make sure we do not go out of bounds if (id < n) c[id] = a[id] * b[id]; }
24c34e7497585aeb927ef766c6d374e423cdc2eb.cu
#include "includes.h" // CUDA kernel. Each thread takes care of one element of c __global__ void vecAdd(double *a, double *b, double *c, int n) { // Get our global thread ID int id = blockIdx.x*blockDim.x+threadIdx.x; // Make sure we do not go out of bounds if (id < n) c[id] = a[id] * b[id]; }
a6faeb0365f2db687d62ea4c65966e29ef40ed83.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void accumulateColsInplaceKernel(float *input, int channels, int h, int w) { // in-place. // input is already a `channels * (h+1) x (w+1)` array // global column index (of all `channels * w` columns in this image) int colIdx = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x; if (colIdx < channels * w) { input += (colIdx / w) * (h+1) * (w+1); // jump to current channel colIdx %= w; // switch to local column index, ++colIdx; // it's 1-indexed because first output column is always zero input[colIdx] = 0; // first element of every column is always zero double sum = 0; for (int i = 1; i <= h; ++i) { float *currentElement = &input[i * (w+1) + colIdx]; sum += static_cast<double>(*currentElement); *currentElement = static_cast<float>(sum); } } }
a6faeb0365f2db687d62ea4c65966e29ef40ed83.cu
#include "includes.h" __global__ void accumulateColsInplaceKernel(float *input, int channels, int h, int w) { // in-place. // input is already a `channels * (h+1) x (w+1)` array // global column index (of all `channels * w` columns in this image) int colIdx = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x; if (colIdx < channels * w) { input += (colIdx / w) * (h+1) * (w+1); // jump to current channel colIdx %= w; // switch to local column index, ++colIdx; // it's 1-indexed because first output column is always zero input[colIdx] = 0; // first element of every column is always zero double sum = 0; for (int i = 1; i <= h; ++i) { float *currentElement = &input[i * (w+1) + colIdx]; sum += static_cast<double>(*currentElement); *currentElement = static_cast<float>(sum); } } }
7352fb8804996b1bb8600a14031b64a03b2eb49f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Originally written by Hong Li and Allison Kolpas. * Updated and modified by Michael Busch. */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes, project #include <cutil.h> // includes, kernels #include "SAfish.h" #include <SAfish_kernel.cu> //////////////////////////////////////////////////////////////////////////////// // declaration, forward //float HongRatio=0.0; void runTest( int argc, char** argv); extern "C" //void cpufish(float newRatio); void cpufish(); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { runTest( argc, argv); CUT_EXIT(argc, argv); } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUDA //////////////////////////////////////////////////////////////////////////////// void runTest( int argc, char** argv) { float HongRatio[24]; /* float tmp = 0.03125; int index=0; while(index<= 12){ HongRatio[index] = tmp; tmp *=2; printf("hongRatio[%d]=%f\n", index, HongRatio[index]); index++; } HongRatio[13]=5.0; printf("hongRatio[13]=%f\n", HongRatio[13]); HongRatio[14]=6.0; printf("hongRatio[14]=%f\n", HongRatio[14]); HongRatio[15]=7; printf("hongRatio[15]=%f\n", HongRatio[15]); HongRatio[16]=0.75; printf("hongRatio[16]=%f\n", HongRatio[16]); HongRatio[17]=1.25; printf("hongRatio[17]=%f\n", HongRatio[17]); HongRatio[18]=1.5; printf("hongRatio[18]=%f\n", HongRatio[18]); HongRatio[19]=1.75; printf("hongRatio[19]=%f\n", HongRatio[19]); HongRatio[20]=2.5; printf("hongRatio[20]=%f\n", HongRatio[20]); HongRatio[21]=3; printf("hongRatio[21]=%f\n", HongRatio[21]); HongRatio[22]=3.5; printf("hongRatio[22]=%f\n", HongRatio[22]); */ printf("Fishnum = %d, BlockNum = %d\n", FISHNUM, BLOCK_NUM); int ratioIndex=0; HongRatio[0]=4; HongRatio[1]=16; HongRatio[2]=64; for(ratioIndex=0; ratioIndex< 1; ratioIndex++) // for testing, only do first ratioIndex { char SAGPUPosx[100]=""; sprintf(SAGPUPosx, "./data/SAGPUPosxN%dR%f.dat", FISHNUM, HongRatio[ratioIndex]); char SAGPUPosy[100]=""; sprintf(SAGPUPosy, "./data/SAGPUPosyN%dR%f.dat", FISHNUM, HongRatio[ratioIndex]); char SAGPUVelx[100]=""; sprintf(SAGPUVelx, "./data/SAGPUVelxN%dR%f.dat", FISHNUM, HongRatio[ratioIndex]); char SAGPUVely[100]=""; sprintf(SAGPUVely, "./data/SAGPUVelyN%dR%f.dat", FISHNUM, HongRatio[ratioIndex]); FILE *pxFile, *pyFile, *vxFile, *vyFile; pxFile = fopen (SAGPUPosx,"a"); if (pxFile==NULL) printf("file open failure PxFile only\n"); pyFile = fopen (SAGPUPosy,"a"); vxFile = fopen (SAGPUVelx,"a"); vyFile = fopen (SAGPUVely,"a"); if ((pxFile==NULL)||(pyFile==NULL)||(vxFile==NULL)||(vyFile==NULL)) printf("file open failure PxFile\n"); FILE *fp1,*fp2,*fp3,*fp4; char GPUPosx[100]=""; //sprintf(GPUPosx, "./Initial/GPUPosx.dat%f", HongRatio[ratioIndex]); sprintf(GPUPosx, "../long-sim-gpu/data/GPUPosxN%dR%f.dat", FISHNUM, HongRatio[ratioIndex]); char GPUPosy[100]=""; //sprintf(GPUPosy, "./Initial/GPUPosy.dat%f", HongRatio[ratioIndex]); sprintf(GPUPosy, "../long-sim-gpu/data/GPUPosyN%dR%f.dat", FISHNUM, HongRatio[ratioIndex]); char GPUVelx[100]=""; //sprintf(GPUVelx, "./Initial/GPUVelx.dat%f", HongRatio[ratioIndex]); sprintf(GPUVelx, "../long-sim-gpu/data/GPUVelxN%dR%f.dat", FISHNUM, HongRatio[ratioIndex]); char GPUVely[100]=""; //sprintf(GPUVely, "./Initial/GPUVely.dat%f", HongRatio[ratioIndex]); sprintf(GPUVely, "../long-sim-gpu/data/GPUVelyN%dR%f.dat", FISHNUM, HongRatio[ratioIndex]); //printf("GPUVely %s\n", GPUVely); fp1=fopen(GPUPosx,"a"); /* changed 'r' to 'a'*/ fp2=fopen(GPUPosy,"a"); fp3=fopen(GPUVelx,"a"); fp4=fopen(GPUVely,"a"); if((fp1 == NULL) || (fp2 == NULL) || (fp3 == NULL) || (fp4 == NULL)) { printf("file open failure\n"); exit(1); }//else //printf("File open Successfully"); long pos1, pos2, pos3, pos4; for(int set=0; set <NumofRuns; set ++){ printf("SetID, %d\n", set); pos1 = ftell (fp1); pos2 = ftell (fp2); pos3 = ftell (fp3); pos4 = ftell (fp4); float Vrot[2]; for(int PFishID=0; PFishID < 1; PFishID++){ //do all FISHNUM fish in parallel at once for each school float* hostPx = (float*) malloc(ARRAYSIZE*sizeof(float)); float* hostPy = (float*) malloc(ARRAYSIZE*sizeof(float)); float* hostVx = (float*) malloc(ARRAYSIZE*sizeof(float)); float* hostVy = (float*) malloc(ARRAYSIZE*sizeof(float)); float* hostDx = (float*) malloc(ARRAYSIZE*sizeof(float)); float* hostDy = (float*) malloc(ARRAYSIZE*sizeof(float)); float* hostAverageVx = (float*) malloc(BLOCK_NUM*MAXSTEPSPERITER*sizeof(float)); float* hostAverageVy = (float*) malloc(BLOCK_NUM*MAXSTEPSPERITER*sizeof(float)); for (int i = 0; i < BLOCK_NUM; i++) { fseek(fp1,pos1,SEEK_SET); fseek(fp2,pos2,SEEK_SET); fseek(fp3,pos3,SEEK_SET); fseek(fp4,pos4,SEEK_SET); for (int j = 0; j < FISHNUM; j++) { fscanf(fp1,"%f",&hostPx[i*FISHNUM+j]); fscanf(fp2,"%f",&hostPy[i*FISHNUM+j]); fscanf(fp3,"%f",&hostVx[i*FISHNUM+j]); fscanf(fp4,"%f",&hostVy[i*FISHNUM+j]); hostDx[i*FISHNUM+j] = 0.0; hostDy[i*FISHNUM+j] = 0.0; } } ////for the same school, perturb fish 0, block 0, fish 1 block 1, ...////// for (int i = 0; i < BLOCK_NUM; i++) { Vrot[0]=cos(PI/2.0)*hostVx[i]-sin(PI/2.0)*hostVy[i]; Vrot[1]=sin(PI/2.0)*hostVx[i]+cos(PI/2.0)*hostVy[i]; hostVx[i*FISHNUM+i]=Vrot[0]; hostVy[i*FISHNUM+i]=Vrot[1]; } float preseed[BLOCKSIZE*BLOCK_NUM]; for(int i = 0; i <BLOCKSIZE*BLOCK_NUM; i++) preseed[i] =RAND_MAX%(BLOCKSIZE*BLOCK_NUM)*i; float * dpreseed; CUDA_SAFE_CALL(hipMalloc((void**)&dpreseed, sizeof(float)*BLOCKSIZE*BLOCK_NUM)); CUDA_SAFE_CALL(hipMemcpy(dpreseed, preseed, sizeof(float)*BLOCKSIZE*BLOCK_NUM , hipMemcpyHostToDevice)); float* devicePx; CUDA_SAFE_CALL(hipMalloc((void**)&devicePx, sizeof(float)*ARRAYSIZE)); CUDA_SAFE_CALL(hipMemcpy(devicePx, hostPx, sizeof(float)*ARRAYSIZE, hipMemcpyHostToDevice)); float* devicePy; CUDA_SAFE_CALL(hipMalloc((void**)&devicePy, sizeof(float)*ARRAYSIZE)); CUDA_SAFE_CALL(hipMemcpy(devicePy, hostPy, sizeof(float)*ARRAYSIZE, hipMemcpyHostToDevice)); float* deviceVx; CUDA_SAFE_CALL(hipMalloc((void**)&deviceVx, sizeof(float)*ARRAYSIZE)); CUDA_SAFE_CALL(hipMemcpy(deviceVx, hostVx, sizeof(float)*ARRAYSIZE, hipMemcpyHostToDevice)); float* deviceVy; CUDA_SAFE_CALL(hipMalloc((void**)&deviceVy, sizeof(float)*ARRAYSIZE)); CUDA_SAFE_CALL(hipMemcpy(deviceVy, hostVy, sizeof(float)*ARRAYSIZE, hipMemcpyHostToDevice)); float* deviceDx; CUDA_SAFE_CALL(hipMalloc((void**)&deviceDx, sizeof(float)*ARRAYSIZE)); CUDA_SAFE_CALL(hipMemcpy(deviceDx, hostDx, sizeof(float)*ARRAYSIZE, hipMemcpyHostToDevice)); float* deviceDy; CUDA_SAFE_CALL(hipMalloc((void**)&deviceDy, sizeof(float)*ARRAYSIZE)); CUDA_SAFE_CALL(hipMemcpy(deviceDy, hostDy, sizeof(float)*ARRAYSIZE, hipMemcpyHostToDevice)); memset(hostAverageVx, 0, BLOCK_NUM*MAXSTEPSPERITER); memset(hostAverageVy, 0, BLOCK_NUM*MAXSTEPSPERITER); float* deviceAverageVx; float* deviceAverageVy; CUDA_SAFE_CALL(hipMalloc((void**)&deviceAverageVx, sizeof(float)*BLOCK_NUM*MAXSTEPSPERITER)); CUDA_SAFE_CALL(hipMalloc((void**)&deviceAverageVy, sizeof(float)*BLOCK_NUM*MAXSTEPSPERITER)); CUDA_SAFE_CALL(hipMemcpy(deviceAverageVx, hostAverageVx, sizeof(float)*BLOCK_NUM*MAXSTEPSPERITER, hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(deviceAverageVy, hostAverageVy, sizeof(float)*BLOCK_NUM*MAXSTEPSPERITER, hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemset( deviceAverageVx, 0, BLOCK_NUM*MAXSTEPSPERITER)); CUDA_SAFE_CALL(hipMemset( deviceAverageVy, 0, BLOCK_NUM*MAXSTEPSPERITER)); dim3 threads(BLOCKSIZE); dim3 grid(BLOCK_NUM); hipLaunchKernelGGL(( fishKernel), dim3(grid), dim3(threads), 0, 0, devicePx, devicePy, deviceVx, deviceVy, deviceDx, deviceDy, dpreseed, HongRatio[ratioIndex], deviceAverageVx, deviceAverageVy); // check for any errors CUT_CHECK_ERROR("Kernel execution failed"); CUDA_SAFE_CALL(hipMemcpy(hostPx, devicePx, sizeof(float) * ARRAYSIZE, hipMemcpyDeviceToHost)); CUDA_SAFE_CALL(hipMemcpy(hostPy, devicePy, sizeof(float) * ARRAYSIZE, hipMemcpyDeviceToHost)); CUDA_SAFE_CALL(hipMemcpy(hostVx, deviceVx, sizeof(float) * ARRAYSIZE, hipMemcpyDeviceToHost)); CUDA_SAFE_CALL(hipMemcpy(hostVy, deviceVy, sizeof(float) * ARRAYSIZE, hipMemcpyDeviceToHost)); CUDA_SAFE_CALL(hipMemcpy(hostAverageVx, deviceAverageVx, sizeof(float) * BLOCK_NUM*MAXSTEPSPERITER, hipMemcpyDeviceToHost)); CUDA_SAFE_CALL(hipMemcpy(hostAverageVy, deviceAverageVy, sizeof(float) * BLOCK_NUM*MAXSTEPSPERITER, hipMemcpyDeviceToHost)); //double AverageVx=0.0, AverageVy=0.0; // //for (int i = 0; i < BLOCK_NUM; i++) { // hostPx[i] = 0.0; // hostPy[i] = 0.0; // for (int j = 0; j < FISHNUM; j++) { // hostPx[i] += hostVx[i*FISHNUM+j]; // hostPy[i] += hostVy[i*FISHNUM+j]; // } // fprintf(vxFile, "%f ", hostPx[i]/FISHNUM); // fprintf(vyFile, "%f ", hostPy[i]/FISHNUM); // } //fprintf (vxFile, "Fish %d setid %d \n", PFishID, set); //fprintf (vyFile, "Fish %d setid %d \n", PFishID, set); for (int i = 0; i < BLOCK_NUM; i++) { //fprintf (vxFile, "Block %d\n", i); //fprintf (vyFile, "Block %d\n", i); for(int j=0; j< MAXSTEPSPERITER; j++) { fprintf(vxFile, "%f ", hostAverageVx[i*MAXSTEPSPERITER+j]); fprintf(vyFile, "%f ", hostAverageVy[i*MAXSTEPSPERITER+j]); //printf("Host Block %d, step %d average :%f ", i, j,hostAverageVx[i*MAXSTEPSPERITER+j]); } fprintf (vxFile, "\n"); fprintf (vyFile, "\n"); } fprintf (vxFile, "\n"); fprintf (vyFile, "\n"); //printf("Finish Print\n"); CUDA_SAFE_CALL(hipFree(dpreseed)); CUDA_SAFE_CALL(hipFree(devicePx)); CUDA_SAFE_CALL(hipFree(devicePy)); CUDA_SAFE_CALL(hipFree(deviceVx)); CUDA_SAFE_CALL(hipFree(deviceVy)); CUDA_SAFE_CALL(hipFree(deviceDx)); CUDA_SAFE_CALL(hipFree(deviceDy)); free(hostPx); free(hostPy); free(hostVx); free(hostVy); free(hostDx); free(hostDy); } } fprintf (pxFile, "\n"); fprintf (pyFile, "\n"); fprintf (vxFile, "\n"); fprintf (vyFile, "\n"); fclose(pxFile); fclose(pyFile); fclose(pxFile); fclose(vyFile); printf("Ratio is %f\n", HongRatio[ratioIndex]); fclose(fp1); fclose(fp2); fclose(fp3); fclose(fp4); } }
7352fb8804996b1bb8600a14031b64a03b2eb49f.cu
/* * Originally written by Hong Li and Allison Kolpas. * Updated and modified by Michael Busch. */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes, project #include <cutil.h> // includes, kernels #include "SAfish.h" #include <SAfish_kernel.cu> //////////////////////////////////////////////////////////////////////////////// // declaration, forward //float HongRatio=0.0; void runTest( int argc, char** argv); extern "C" //void cpufish(float newRatio); void cpufish(); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { runTest( argc, argv); CUT_EXIT(argc, argv); } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUDA //////////////////////////////////////////////////////////////////////////////// void runTest( int argc, char** argv) { float HongRatio[24]; /* float tmp = 0.03125; int index=0; while(index<= 12){ HongRatio[index] = tmp; tmp *=2; printf("hongRatio[%d]=%f\n", index, HongRatio[index]); index++; } HongRatio[13]=5.0; printf("hongRatio[13]=%f\n", HongRatio[13]); HongRatio[14]=6.0; printf("hongRatio[14]=%f\n", HongRatio[14]); HongRatio[15]=7; printf("hongRatio[15]=%f\n", HongRatio[15]); HongRatio[16]=0.75; printf("hongRatio[16]=%f\n", HongRatio[16]); HongRatio[17]=1.25; printf("hongRatio[17]=%f\n", HongRatio[17]); HongRatio[18]=1.5; printf("hongRatio[18]=%f\n", HongRatio[18]); HongRatio[19]=1.75; printf("hongRatio[19]=%f\n", HongRatio[19]); HongRatio[20]=2.5; printf("hongRatio[20]=%f\n", HongRatio[20]); HongRatio[21]=3; printf("hongRatio[21]=%f\n", HongRatio[21]); HongRatio[22]=3.5; printf("hongRatio[22]=%f\n", HongRatio[22]); */ printf("Fishnum = %d, BlockNum = %d\n", FISHNUM, BLOCK_NUM); int ratioIndex=0; HongRatio[0]=4; HongRatio[1]=16; HongRatio[2]=64; for(ratioIndex=0; ratioIndex< 1; ratioIndex++) // for testing, only do first ratioIndex { char SAGPUPosx[100]=""; sprintf(SAGPUPosx, "./data/SAGPUPosxN%dR%f.dat", FISHNUM, HongRatio[ratioIndex]); char SAGPUPosy[100]=""; sprintf(SAGPUPosy, "./data/SAGPUPosyN%dR%f.dat", FISHNUM, HongRatio[ratioIndex]); char SAGPUVelx[100]=""; sprintf(SAGPUVelx, "./data/SAGPUVelxN%dR%f.dat", FISHNUM, HongRatio[ratioIndex]); char SAGPUVely[100]=""; sprintf(SAGPUVely, "./data/SAGPUVelyN%dR%f.dat", FISHNUM, HongRatio[ratioIndex]); FILE *pxFile, *pyFile, *vxFile, *vyFile; pxFile = fopen (SAGPUPosx,"a"); if (pxFile==NULL) printf("file open failure PxFile only\n"); pyFile = fopen (SAGPUPosy,"a"); vxFile = fopen (SAGPUVelx,"a"); vyFile = fopen (SAGPUVely,"a"); if ((pxFile==NULL)||(pyFile==NULL)||(vxFile==NULL)||(vyFile==NULL)) printf("file open failure PxFile\n"); FILE *fp1,*fp2,*fp3,*fp4; char GPUPosx[100]=""; //sprintf(GPUPosx, "./Initial/GPUPosx.dat%f", HongRatio[ratioIndex]); sprintf(GPUPosx, "../long-sim-gpu/data/GPUPosxN%dR%f.dat", FISHNUM, HongRatio[ratioIndex]); char GPUPosy[100]=""; //sprintf(GPUPosy, "./Initial/GPUPosy.dat%f", HongRatio[ratioIndex]); sprintf(GPUPosy, "../long-sim-gpu/data/GPUPosyN%dR%f.dat", FISHNUM, HongRatio[ratioIndex]); char GPUVelx[100]=""; //sprintf(GPUVelx, "./Initial/GPUVelx.dat%f", HongRatio[ratioIndex]); sprintf(GPUVelx, "../long-sim-gpu/data/GPUVelxN%dR%f.dat", FISHNUM, HongRatio[ratioIndex]); char GPUVely[100]=""; //sprintf(GPUVely, "./Initial/GPUVely.dat%f", HongRatio[ratioIndex]); sprintf(GPUVely, "../long-sim-gpu/data/GPUVelyN%dR%f.dat", FISHNUM, HongRatio[ratioIndex]); //printf("GPUVely %s\n", GPUVely); fp1=fopen(GPUPosx,"a"); /* changed 'r' to 'a'*/ fp2=fopen(GPUPosy,"a"); fp3=fopen(GPUVelx,"a"); fp4=fopen(GPUVely,"a"); if((fp1 == NULL) || (fp2 == NULL) || (fp3 == NULL) || (fp4 == NULL)) { printf("file open failure\n"); exit(1); }//else //printf("File open Successfully"); long pos1, pos2, pos3, pos4; for(int set=0; set <NumofRuns; set ++){ printf("SetID, %d\n", set); pos1 = ftell (fp1); pos2 = ftell (fp2); pos3 = ftell (fp3); pos4 = ftell (fp4); float Vrot[2]; for(int PFishID=0; PFishID < 1; PFishID++){ //do all FISHNUM fish in parallel at once for each school float* hostPx = (float*) malloc(ARRAYSIZE*sizeof(float)); float* hostPy = (float*) malloc(ARRAYSIZE*sizeof(float)); float* hostVx = (float*) malloc(ARRAYSIZE*sizeof(float)); float* hostVy = (float*) malloc(ARRAYSIZE*sizeof(float)); float* hostDx = (float*) malloc(ARRAYSIZE*sizeof(float)); float* hostDy = (float*) malloc(ARRAYSIZE*sizeof(float)); float* hostAverageVx = (float*) malloc(BLOCK_NUM*MAXSTEPSPERITER*sizeof(float)); float* hostAverageVy = (float*) malloc(BLOCK_NUM*MAXSTEPSPERITER*sizeof(float)); for (int i = 0; i < BLOCK_NUM; i++) { fseek(fp1,pos1,SEEK_SET); fseek(fp2,pos2,SEEK_SET); fseek(fp3,pos3,SEEK_SET); fseek(fp4,pos4,SEEK_SET); for (int j = 0; j < FISHNUM; j++) { fscanf(fp1,"%f",&hostPx[i*FISHNUM+j]); fscanf(fp2,"%f",&hostPy[i*FISHNUM+j]); fscanf(fp3,"%f",&hostVx[i*FISHNUM+j]); fscanf(fp4,"%f",&hostVy[i*FISHNUM+j]); hostDx[i*FISHNUM+j] = 0.0; hostDy[i*FISHNUM+j] = 0.0; } } ////for the same school, perturb fish 0, block 0, fish 1 block 1, ...////// for (int i = 0; i < BLOCK_NUM; i++) { Vrot[0]=cos(PI/2.0)*hostVx[i]-sin(PI/2.0)*hostVy[i]; Vrot[1]=sin(PI/2.0)*hostVx[i]+cos(PI/2.0)*hostVy[i]; hostVx[i*FISHNUM+i]=Vrot[0]; hostVy[i*FISHNUM+i]=Vrot[1]; } float preseed[BLOCKSIZE*BLOCK_NUM]; for(int i = 0; i <BLOCKSIZE*BLOCK_NUM; i++) preseed[i] =RAND_MAX%(BLOCKSIZE*BLOCK_NUM)*i; float * dpreseed; CUDA_SAFE_CALL(cudaMalloc((void**)&dpreseed, sizeof(float)*BLOCKSIZE*BLOCK_NUM)); CUDA_SAFE_CALL(cudaMemcpy(dpreseed, preseed, sizeof(float)*BLOCKSIZE*BLOCK_NUM , cudaMemcpyHostToDevice)); float* devicePx; CUDA_SAFE_CALL(cudaMalloc((void**)&devicePx, sizeof(float)*ARRAYSIZE)); CUDA_SAFE_CALL(cudaMemcpy(devicePx, hostPx, sizeof(float)*ARRAYSIZE, cudaMemcpyHostToDevice)); float* devicePy; CUDA_SAFE_CALL(cudaMalloc((void**)&devicePy, sizeof(float)*ARRAYSIZE)); CUDA_SAFE_CALL(cudaMemcpy(devicePy, hostPy, sizeof(float)*ARRAYSIZE, cudaMemcpyHostToDevice)); float* deviceVx; CUDA_SAFE_CALL(cudaMalloc((void**)&deviceVx, sizeof(float)*ARRAYSIZE)); CUDA_SAFE_CALL(cudaMemcpy(deviceVx, hostVx, sizeof(float)*ARRAYSIZE, cudaMemcpyHostToDevice)); float* deviceVy; CUDA_SAFE_CALL(cudaMalloc((void**)&deviceVy, sizeof(float)*ARRAYSIZE)); CUDA_SAFE_CALL(cudaMemcpy(deviceVy, hostVy, sizeof(float)*ARRAYSIZE, cudaMemcpyHostToDevice)); float* deviceDx; CUDA_SAFE_CALL(cudaMalloc((void**)&deviceDx, sizeof(float)*ARRAYSIZE)); CUDA_SAFE_CALL(cudaMemcpy(deviceDx, hostDx, sizeof(float)*ARRAYSIZE, cudaMemcpyHostToDevice)); float* deviceDy; CUDA_SAFE_CALL(cudaMalloc((void**)&deviceDy, sizeof(float)*ARRAYSIZE)); CUDA_SAFE_CALL(cudaMemcpy(deviceDy, hostDy, sizeof(float)*ARRAYSIZE, cudaMemcpyHostToDevice)); memset(hostAverageVx, 0, BLOCK_NUM*MAXSTEPSPERITER); memset(hostAverageVy, 0, BLOCK_NUM*MAXSTEPSPERITER); float* deviceAverageVx; float* deviceAverageVy; CUDA_SAFE_CALL(cudaMalloc((void**)&deviceAverageVx, sizeof(float)*BLOCK_NUM*MAXSTEPSPERITER)); CUDA_SAFE_CALL(cudaMalloc((void**)&deviceAverageVy, sizeof(float)*BLOCK_NUM*MAXSTEPSPERITER)); CUDA_SAFE_CALL(cudaMemcpy(deviceAverageVx, hostAverageVx, sizeof(float)*BLOCK_NUM*MAXSTEPSPERITER, cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(deviceAverageVy, hostAverageVy, sizeof(float)*BLOCK_NUM*MAXSTEPSPERITER, cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemset( deviceAverageVx, 0, BLOCK_NUM*MAXSTEPSPERITER)); CUDA_SAFE_CALL(cudaMemset( deviceAverageVy, 0, BLOCK_NUM*MAXSTEPSPERITER)); dim3 threads(BLOCKSIZE); dim3 grid(BLOCK_NUM); fishKernel<<<grid, threads>>>(devicePx, devicePy, deviceVx, deviceVy, deviceDx, deviceDy, dpreseed, HongRatio[ratioIndex], deviceAverageVx, deviceAverageVy); // check for any errors CUT_CHECK_ERROR("Kernel execution failed"); CUDA_SAFE_CALL(cudaMemcpy(hostPx, devicePx, sizeof(float) * ARRAYSIZE, cudaMemcpyDeviceToHost)); CUDA_SAFE_CALL(cudaMemcpy(hostPy, devicePy, sizeof(float) * ARRAYSIZE, cudaMemcpyDeviceToHost)); CUDA_SAFE_CALL(cudaMemcpy(hostVx, deviceVx, sizeof(float) * ARRAYSIZE, cudaMemcpyDeviceToHost)); CUDA_SAFE_CALL(cudaMemcpy(hostVy, deviceVy, sizeof(float) * ARRAYSIZE, cudaMemcpyDeviceToHost)); CUDA_SAFE_CALL(cudaMemcpy(hostAverageVx, deviceAverageVx, sizeof(float) * BLOCK_NUM*MAXSTEPSPERITER, cudaMemcpyDeviceToHost)); CUDA_SAFE_CALL(cudaMemcpy(hostAverageVy, deviceAverageVy, sizeof(float) * BLOCK_NUM*MAXSTEPSPERITER, cudaMemcpyDeviceToHost)); //double AverageVx=0.0, AverageVy=0.0; // //for (int i = 0; i < BLOCK_NUM; i++) { // hostPx[i] = 0.0; // hostPy[i] = 0.0; // for (int j = 0; j < FISHNUM; j++) { // hostPx[i] += hostVx[i*FISHNUM+j]; // hostPy[i] += hostVy[i*FISHNUM+j]; // } // fprintf(vxFile, "%f ", hostPx[i]/FISHNUM); // fprintf(vyFile, "%f ", hostPy[i]/FISHNUM); // } //fprintf (vxFile, "Fish %d setid %d \n", PFishID, set); //fprintf (vyFile, "Fish %d setid %d \n", PFishID, set); for (int i = 0; i < BLOCK_NUM; i++) { //fprintf (vxFile, "Block %d\n", i); //fprintf (vyFile, "Block %d\n", i); for(int j=0; j< MAXSTEPSPERITER; j++) { fprintf(vxFile, "%f ", hostAverageVx[i*MAXSTEPSPERITER+j]); fprintf(vyFile, "%f ", hostAverageVy[i*MAXSTEPSPERITER+j]); //printf("Host Block %d, step %d average :%f ", i, j,hostAverageVx[i*MAXSTEPSPERITER+j]); } fprintf (vxFile, "\n"); fprintf (vyFile, "\n"); } fprintf (vxFile, "\n"); fprintf (vyFile, "\n"); //printf("Finish Print\n"); CUDA_SAFE_CALL(cudaFree(dpreseed)); CUDA_SAFE_CALL(cudaFree(devicePx)); CUDA_SAFE_CALL(cudaFree(devicePy)); CUDA_SAFE_CALL(cudaFree(deviceVx)); CUDA_SAFE_CALL(cudaFree(deviceVy)); CUDA_SAFE_CALL(cudaFree(deviceDx)); CUDA_SAFE_CALL(cudaFree(deviceDy)); free(hostPx); free(hostPy); free(hostVx); free(hostVy); free(hostDx); free(hostDy); } } fprintf (pxFile, "\n"); fprintf (pyFile, "\n"); fprintf (vxFile, "\n"); fprintf (vyFile, "\n"); fclose(pxFile); fclose(pyFile); fclose(pxFile); fclose(vyFile); printf("Ratio is %f\n", HongRatio[ratioIndex]); fclose(fp1); fclose(fp2); fclose(fp3); fclose(fp4); } }
38f342353cd18493d1501585b938d389d5d5563d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @generated from sparse-iter/blas/magma_zmcsrcompressor_gpu.cu, normal z -> s, Tue Aug 30 09:38:46 2016 @author Hartwig Anzt */ #include "magmasparse_internal.h" #define BLOCK_SIZE1 256 #define BLOCK_SIZE2 1 // copy nonzeros into new structure __global__ void magma_smcsrgpu_kernel1( int num_rows, float *A_val, magma_index_t *A_rowptr, magma_index_t *A_colind, float *B_val, magma_index_t *B_rowptr, magma_index_t *B_colind ) { int row = blockIdx.x*blockDim.x+threadIdx.x; int j; if(row<num_rows){ float zero = MAGMA_S_ZERO; int start = A_rowptr[ row ]; int new_location = start; int end = A_rowptr[ row+1 ]; for( j=start; j<end; j++ ){ if( A_val[j] != zero ){ // B_val[new_location] = A_val[j]; // B_colind[new_location] = A_colind[j]; new_location++; } } // this is not a correctr rowpointer! this is nn_z in this row! B_rowptr[ row ] = new_location-start; } } // generate a valid rowpointer __global__ void magma_smcsrgpu_kernel2( int num_rows, magma_index_t *B_rowptr, magma_index_t *A_rowptr ) { int idx = blockIdx.x*blockDim.x+threadIdx.x; int j, nnz = 0; if( idx == 0 ){ A_rowptr[ 0 ] = nnz; for( j=0; j<num_rows; j++ ){ nnz+=B_rowptr[ j ]; A_rowptr[ j+1 ] = nnz; } } } // copy new structure into original matrix __global__ void magma_smcsrgpu_kernel3( int num_rows, float *B_val, magma_index_t *B_rowptr, magma_index_t *B_colind, magma_index_t *B2_rowptr, float *A_val, magma_index_t *A_rowptr, magma_index_t *A_colind ) { int row = blockIdx.x*blockDim.x+threadIdx.x; int j, new_location; if(row<num_rows){ new_location = A_rowptr[ row ]; int start = B2_rowptr[ row ]; int end = B2_rowptr[ row+1 ]; float zero = MAGMA_S_ZERO; for( j=start; j<end; j++ ){ if( A_val[j] != zero ){ B_val[new_location] = A_val[j]; B_colind[new_location] = A_colind[j]; new_location++; } // A_val[ j ] = B_val[ j ]; // A_colind[ j ] = B_colind[ j ]; } } } /** Purpose ------- Removes zeros in a CSR matrix. This is a GPU implementation of the CSR compressor. Arguments --------- @param[in,out] A magma_s_matrix* input/output matrix @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_saux ********************************************************************/ extern "C" magma_int_t magma_smcsrcompressor_gpu( magma_s_matrix *A, magma_queue_t queue ) { magma_int_t info = 0; magma_s_matrix B={Magma_CSR}, B2={Magma_CSR}; magma_s_matrix dA={Magma_CSR}, CSRA={Magma_CSR}; magma_index_t *cputmp = NULL; if ( A->memory_location == Magma_DEV && A->storage_type == Magma_CSR ) { CHECK( magma_index_malloc( &B.drow, A->num_rows + 1 )); CHECK( magma_index_malloc( &B2.drow, A->num_rows + 1 )); magma_index_copyvector( (A->num_rows+1), A->drow, 1, B2.drow, 1, queue ); dim3 grid1( magma_ceildiv( A->num_rows, BLOCK_SIZE1 ) ); // copying the nonzeros into B and write in B.drow how many there are hipLaunchKernelGGL(( magma_smcsrgpu_kernel1), dim3(grid1), dim3(BLOCK_SIZE1), 0, queue->cuda_stream() , A->num_rows, A->dval, A->drow, A->dcol, B.dval, B.drow, B.dcol ); // correct the row pointer dim3 grid2( 1, 1, 1); hipLaunchKernelGGL(( magma_smcsrgpu_kernel2), dim3(grid2), dim3(BLOCK_SIZE2), 0, queue->cuda_stream() , A->num_rows, B.drow, A->drow ); // access the true number of nonzeros CHECK( magma_index_malloc_cpu( &cputmp, 1 )); magma_index_getvector( 1, A->row+(A->num_rows), 1, cputmp, 1, queue ); A->nnz = (magma_int_t) cputmp[0]; // reallocate with right size CHECK( magma_smalloc( &B.dval, A->nnz )); CHECK( magma_index_malloc( &B.dcol, A->nnz )); // copy correct values back hipLaunchKernelGGL(( magma_smcsrgpu_kernel3), dim3(grid1), dim3(BLOCK_SIZE1), 0, queue->cuda_stream() , A->num_rows, B.dval, B.drow, B.dcol, B2.drow, A->dval, A->drow, A->dcol ); magma_free( A->dcol ); magma_free( A->dval ); A->dcol = B.dcol; A->dval = B.dval; } else { magma_storage_t A_storage = A->storage_type; magma_location_t A_location = A->memory_location; CHECK( magma_smconvert( *A, &CSRA, A->storage_type, Magma_CSR, queue )); CHECK( magma_smtransfer( *A, &dA, A->memory_location, Magma_DEV, queue )); CHECK( magma_smcsrcompressor_gpu( &dA, queue )); magma_smfree( &dA, queue ); magma_smfree( A, queue ); CHECK( magma_smtransfer( dA, &CSRA, Magma_DEV, A_location, queue )); CHECK( magma_smconvert( CSRA, A, Magma_CSR, A_storage, queue )); magma_smfree( &dA, queue ); magma_smfree( &CSRA, queue ); } cleanup: magma_smfree( &dA, queue ); magma_smfree( &CSRA, queue ); magma_free( B2.drow ); magma_free( B.drow ); return info; }
38f342353cd18493d1501585b938d389d5d5563d.cu
/* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @generated from sparse-iter/blas/magma_zmcsrcompressor_gpu.cu, normal z -> s, Tue Aug 30 09:38:46 2016 @author Hartwig Anzt */ #include "magmasparse_internal.h" #define BLOCK_SIZE1 256 #define BLOCK_SIZE2 1 // copy nonzeros into new structure __global__ void magma_smcsrgpu_kernel1( int num_rows, float *A_val, magma_index_t *A_rowptr, magma_index_t *A_colind, float *B_val, magma_index_t *B_rowptr, magma_index_t *B_colind ) { int row = blockIdx.x*blockDim.x+threadIdx.x; int j; if(row<num_rows){ float zero = MAGMA_S_ZERO; int start = A_rowptr[ row ]; int new_location = start; int end = A_rowptr[ row+1 ]; for( j=start; j<end; j++ ){ if( A_val[j] != zero ){ // B_val[new_location] = A_val[j]; // B_colind[new_location] = A_colind[j]; new_location++; } } // this is not a correctr rowpointer! this is nn_z in this row! B_rowptr[ row ] = new_location-start; } } // generate a valid rowpointer __global__ void magma_smcsrgpu_kernel2( int num_rows, magma_index_t *B_rowptr, magma_index_t *A_rowptr ) { int idx = blockIdx.x*blockDim.x+threadIdx.x; int j, nnz = 0; if( idx == 0 ){ A_rowptr[ 0 ] = nnz; for( j=0; j<num_rows; j++ ){ nnz+=B_rowptr[ j ]; A_rowptr[ j+1 ] = nnz; } } } // copy new structure into original matrix __global__ void magma_smcsrgpu_kernel3( int num_rows, float *B_val, magma_index_t *B_rowptr, magma_index_t *B_colind, magma_index_t *B2_rowptr, float *A_val, magma_index_t *A_rowptr, magma_index_t *A_colind ) { int row = blockIdx.x*blockDim.x+threadIdx.x; int j, new_location; if(row<num_rows){ new_location = A_rowptr[ row ]; int start = B2_rowptr[ row ]; int end = B2_rowptr[ row+1 ]; float zero = MAGMA_S_ZERO; for( j=start; j<end; j++ ){ if( A_val[j] != zero ){ B_val[new_location] = A_val[j]; B_colind[new_location] = A_colind[j]; new_location++; } // A_val[ j ] = B_val[ j ]; // A_colind[ j ] = B_colind[ j ]; } } } /** Purpose ------- Removes zeros in a CSR matrix. This is a GPU implementation of the CSR compressor. Arguments --------- @param[in,out] A magma_s_matrix* input/output matrix @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_saux ********************************************************************/ extern "C" magma_int_t magma_smcsrcompressor_gpu( magma_s_matrix *A, magma_queue_t queue ) { magma_int_t info = 0; magma_s_matrix B={Magma_CSR}, B2={Magma_CSR}; magma_s_matrix dA={Magma_CSR}, CSRA={Magma_CSR}; magma_index_t *cputmp = NULL; if ( A->memory_location == Magma_DEV && A->storage_type == Magma_CSR ) { CHECK( magma_index_malloc( &B.drow, A->num_rows + 1 )); CHECK( magma_index_malloc( &B2.drow, A->num_rows + 1 )); magma_index_copyvector( (A->num_rows+1), A->drow, 1, B2.drow, 1, queue ); dim3 grid1( magma_ceildiv( A->num_rows, BLOCK_SIZE1 ) ); // copying the nonzeros into B and write in B.drow how many there are magma_smcsrgpu_kernel1<<< grid1, BLOCK_SIZE1, 0, queue->cuda_stream() >>> ( A->num_rows, A->dval, A->drow, A->dcol, B.dval, B.drow, B.dcol ); // correct the row pointer dim3 grid2( 1, 1, 1); magma_smcsrgpu_kernel2<<< grid2, BLOCK_SIZE2, 0, queue->cuda_stream() >>> ( A->num_rows, B.drow, A->drow ); // access the true number of nonzeros CHECK( magma_index_malloc_cpu( &cputmp, 1 )); magma_index_getvector( 1, A->row+(A->num_rows), 1, cputmp, 1, queue ); A->nnz = (magma_int_t) cputmp[0]; // reallocate with right size CHECK( magma_smalloc( &B.dval, A->nnz )); CHECK( magma_index_malloc( &B.dcol, A->nnz )); // copy correct values back magma_smcsrgpu_kernel3<<< grid1, BLOCK_SIZE1, 0, queue->cuda_stream() >>> ( A->num_rows, B.dval, B.drow, B.dcol, B2.drow, A->dval, A->drow, A->dcol ); magma_free( A->dcol ); magma_free( A->dval ); A->dcol = B.dcol; A->dval = B.dval; } else { magma_storage_t A_storage = A->storage_type; magma_location_t A_location = A->memory_location; CHECK( magma_smconvert( *A, &CSRA, A->storage_type, Magma_CSR, queue )); CHECK( magma_smtransfer( *A, &dA, A->memory_location, Magma_DEV, queue )); CHECK( magma_smcsrcompressor_gpu( &dA, queue )); magma_smfree( &dA, queue ); magma_smfree( A, queue ); CHECK( magma_smtransfer( dA, &CSRA, Magma_DEV, A_location, queue )); CHECK( magma_smconvert( CSRA, A, Magma_CSR, A_storage, queue )); magma_smfree( &dA, queue ); magma_smfree( &CSRA, queue ); } cleanup: magma_smfree( &dA, queue ); magma_smfree( &CSRA, queue ); magma_free( B2.drow ); magma_free( B.drow ); return info; }
8ff5bbd450434a3f4e598f14193a2e623cf74498.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kernel.h" #define TX 32 #define TY 32 #define DIM 2100 struct hipComplex { float r; float i; __device__ hipComplex( float a, float b ) : r(a), i(b) {} __device__ float magnitude2( void ) { return r * r + i * i; } __device__ hipComplex operator*(const hipComplex& a) { return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i); } __device__ hipComplex operator-(const hipComplex& a) { return hipComplex(r-a.r, i-a.i); } __device__ hipComplex operator+(const hipComplex& a) { return hipComplex(r+a.r, i+a.i); } __device__ hipComplex operator/(const hipComplex& a) { return hipComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i)); } }; __device__ hipComplex conj(hipComplex m) { hipComplex out(m.r,-m.i); return out; } __device__ hipComplex nor(hipComplex m) { hipComplex out(m.r*m.r+m.i*m.i,0.0); return out; } __device__ float norg(hipComplex m) { return sqrtf(m.r*m.r+m.i*m.i); } __device__ hipComplex qpoch(hipComplex a, hipComplex q) { hipComplex out(1.0,0.0); hipComplex unity(1.0,0.0); int i = 0; hipComplex Q = q; if(q.magnitude2()>1.0) { return hipComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<80;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ hipComplex qp(hipComplex a, hipComplex q, int n) { hipComplex out(1.0,0.0); hipComplex unity(1.0,0.0); int i = 0; hipComplex Q = q; if(q.magnitude2()>1.0) { return hipComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<n;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ hipComplex ramphi(hipComplex q) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex mq = mone*q; return qpoch(mq,mq)/qpoch(q,mq); } __device__ hipComplex rampsi(hipComplex q) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex mq = mone*q; return qpoch(mq,q)*qpoch(q*q,q*q); } __device__ hipComplex ramchi(hipComplex q) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex mq = mone*q; return qpoch(mq,q*q); } __device__ hipComplex ramf(hipComplex a, hipComplex b) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex ma = mone*a; hipComplex mb = mone*b; return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b); } // complex exponential __device__ hipComplex expc(hipComplex m) { hipComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i)); return out; } __device__ hipComplex powc(hipComplex ag, hipComplex bg) { hipComplex out(0.0,0.0); hipComplex mesp(0.0,0.0); hipComplex frim(0.0,0.0); double radiu, thet; /* get the proper polar form of the complex number */ radiu = sqrtf(ag.r*ag.r + ag.i*ag.i); thet = atan2f(ag.i,ag.r); /* mesp gives R^(c+di) */ mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu)); mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu)); /* frim gives e^(i theta (c+di)) */ /* now since we already have the machinery for performing complex exponentiation (just exp), we can just call that here */ frim.r = -1.0 * bg.i * thet; frim.i = bg.r * thet; frim = expc(frim); out = mesp*frim; return out; } // cosine (nothing algorithmically clean) __device__ hipComplex cosc(hipComplex m) { hipComplex ai(0.0,1.0); hipComplex ot(0.5,0.0); hipComplex mone(-1.0,0.0); hipComplex out = ot*(expc(m*ai) + expc(mone*m*ai)); return out; } __device__ hipComplex sins(hipComplex m) { hipComplex ai(0.0,1.0); hipComplex ot(0.0,0.5); hipComplex mone(-1.0,0.0); hipComplex out = ot*(expc(m*ai) - expc(mone*m*ai)); return out; } __device__ hipComplex tans(hipComplex m) { return sins(m)/cosc(m); } __device__ hipComplex moeb(hipComplex t, hipComplex a, hipComplex z) { hipComplex out(0.0,0.0); hipComplex ai(0.0,1.0); hipComplex unity(1.0,0.0); out = expc(ai*t) * (z-a)/(unity-conj(a)*z); return out; } __device__ hipComplex bnewt(hipComplex z) { hipComplex three(3.0,0.0); hipComplex unity(1.0,0.0); hipComplex out(0.0,0.0); hipComplex Z =z; hipComplex L(0.0,0.0); hipComplex R(0.62348980185873359,0.7818314824680298); hipComplex v(0.62348980185873359,0.7818314824680298); int i; for(i=0;i<100;i++) { L = sins(expc(Z)-cosc(Z))-Z; out = out + v*L; v = R * v; Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity); } return out; } __device__ hipComplex they3(hipComplex z, hipComplex q) { int u; hipComplex out(0.0,0.0); hipComplex enn(-20.0,0.0); hipComplex onn(1.0,0.0); hipComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + powc(q,enn*enn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ hipComplex wahi(hipComplex z) { int u; hipComplex un(1.0,0.0); hipComplex ne(1.0,0.0); hipComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne); ne = ne + un; } out = out + un; return out; } __device__ hipComplex dwahi(hipComplex z) { int u; hipComplex un(1.0,0.0); hipComplex ne(1.0,0.0); hipComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne-un); ne = ne + un; } return out; } __device__ hipComplex they3p(hipComplex z, hipComplex q) { int u; hipComplex out(0.0,0.0); hipComplex enn(-20.0,0.0); hipComplex onn(1.0,0.0); hipComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ hipComplex h3ey3p(hipComplex z, hipComplex q) { int u; hipComplex out(0.0,0.0); hipComplex aut(0.0,0.0); hipComplex enn(-20.0,0.0); hipComplex onn(1.0,0.0); hipComplex dui(0.0,1.0); hipComplex vel(0.0,0.0); hipComplex rav(0.0,0.0); for(u=-40;u<40;u++) { vel = expc(dui*enn*z); rav = powc(q,enn*enn); aut = aut + (enn*enn)*rav/q*vel; out = out + rav*vel; enn = enn + onn; } return out/aut; } __device__ hipComplex thess(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex the1(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); hipComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*sins(z); } __device__ hipComplex the2(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); hipComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*cosc(z); } __device__ hipComplex the3(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex the9(hipComplex z, hipComplex q, hipComplex r) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * the3(tw*z,r) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex the4(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } /* routine to generate q-integers */ __device__ hipComplex qin(hipComplex a, hipComplex q) { hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); out = (unity - powc(q, a))/(unity-q); return out; } /* generating function for n^2 */ __device__ hipComplex geffa(hipComplex z, hipComplex q) { hipComplex out(0.0,0.0); hipComplex unity(1.0,0.0); hipComplex wu(0.0,0.0); hipComplex Z=unity; int v; for(v=0;v<20;v++) { out = out + qin(wu*wu,q)* Z; wu = wu + unity; Z = z * Z; } return out; } __device__ hipComplex thratd(hipComplex z, hipComplex q) { int n; hipComplex fau(4.0,0.0); hipComplex too(2.0,0.0); hipComplex unity(1.0,0.0); hipComplex ennn(1.0,0.0); hipComplex ni(-1.0,0.0); hipComplex noo(-1.0,0.0); hipComplex out(0.0,0.0); hipComplex loo = q; hipComplex qoo =q*q; for(n=0;n<80;n++) { out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z); qoo = qoo * q*q; loo = loo * q; ennn = ennn +unity; noo = ni * noo; } return out*fau; } __device__ hipComplex thess4(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex thesk(hipComplex z, hipComplex q, hipComplex r) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); hipComplex roo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; roo = roo * r * r ; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r)); } return out; } __device__ hipComplex thass(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex rogers( hipComplex q) { hipComplex onf(0.2,0.0); hipComplex Q5 = q*q*q*q*q; hipComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5)); return out; } __device__ hipComplex flat(hipComplex m) { float ua = sqrtf(m.r*m.r + m.i*m.i); hipComplex out(m.r/ua,m.i/ua); return out; } __device__ hipComplex eff(hipComplex z, hipComplex lambda) { return z*z*z*z+ lambda/(z*z*z*z); } __device__ hipComplex thete(float R, hipComplex tau, hipComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ hipComplex A(0.0,0.0); /* miscellaneous setup */ hipComplex pai(3.14159265353898,0.0); hipComplex ai(0.0,1.0); hipComplex oo(1.0,0.0); hipComplex oot(2.0,0.0); hipComplex nini(9.0,0.0); hipComplex eigh(-18.0,0.0); /* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ hipComplex frann(1.0,0.0); frann = pai * ai * tau ; hipComplex shenn(1.0,0.0); shenn = oot * ai * z; hipComplex plenn(1.0,0.0); hipComplex enn(1.0,0.0); hipComplex ann(1.0,0.0); hipComplex bnn(1.0,0.0); hipComplex scrunn(1.0,0.0); float ca, cb,cc; int a, b; for(a=-10;a<10;a++) { ann.r = a; for(b=-10;b<10;b++) { bnn.r = b; if(((a+b)%2)==0) { scrunn.r = a*a + b*b; A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn)); } else { ca = 5.0 + a*a + b*b; cb = 2*(a * cos(R)- b * sin(R)); cc = 4*(b * cos(R)+a*sin(R)); scrunn.r = ca + cb + cc; A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn)); } } } return A; } __device__ hipComplex thetta(hipComplex tau, hipComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ hipComplex A(0.0,0.0); /* miscellaneous setup */ hipComplex pai(3.14159265353898,0.0); hipComplex ai(0.0,1.0); hipComplex oo(1.0,0.0); hipComplex oot(2.0,0.0); hipComplex nini(9.0,0.0); hipComplex eigh(-18.0,0.0); /* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ hipComplex frann(1.0,0.0); frann = pai * ai * tau ; hipComplex shenn(1.0,0.0); shenn = oot * ai * z; hipComplex plenn(1.0,0.0); hipComplex enn(1.0,0.0); int n; for(n=-10;n<10;n++) { enn.r = n; plenn = enn * enn; /* this get the hipComplex out of the event loop */ A = A + expc(frann* plenn) * expc(shenn* enn); } return A; } __device__ hipComplex mitlef(hipComplex z,hipComplex c) { hipComplex out(0.0,0.0); hipComplex Z(1.0,0.0); hipComplex frove(0.0,0.0); int v; for(v=0;v<20;v++) { frove.r = tgammaf(c.r*v+c.i); out = out + Z/frove; Z = Z * z; } return out; } __device__ hipComplex helva(hipComplex z) { hipComplex out(j0f(z.r),j1f(z.i)); return out; } /* derivative of helva, from Mathematica */ __device__ hipComplex helvp(hipComplex z) { hipComplex out(jnf(2,z.r),jnf(1,z.i)); return out; } __device__ hipComplex lanna(hipComplex z) { hipComplex out(j1f(z.r/j0f(z.i)),j1f(z.i/j1f(z.r))); return out; } __device__ hipComplex harva(hipComplex z) { hipComplex out(jnf(floor(z.i),z.r),jnf(ceil(z.r),z.i)); return out; } __device__ hipComplex herve(hipComplex z) { hipComplex out(jnf(floor(z.r-z.i),z.i),jnf(ceil(z.r+z.i),z.r)); return out; } __device__ hipComplex alver(hipComplex z) { hipComplex out(1.0/j0f(z.r),1.0/j1f(z.i)); return out; } __device__ hipComplex alvir(hipComplex z) { hipComplex out(j0f(z.r),1.0/j1f(z.i)); return out; } __device__ hipComplex hexva(int m, hipComplex z) { hipComplex out(jnf(m,z.r),jnf(m,z.i)); return out; } __device__ hipComplex hilva(hipComplex z) { hipComplex out(j1f(z.r),j0f(z.i)); return out; } __device__ hipComplex halvi(hipComplex z) { hipComplex out(j1f(z.r),-j0f(z.i)); return out; } __device__ hipComplex ahilv(hipComplex z) { hipComplex out(1.0/j1f(z.r),1.0/j0f(z.i)); return out; } __device__ hipComplex halva(hipComplex z) { hipComplex out(j0f(z.r),j0f(z.i)); return out; } __device__ hipComplex aciwa(hipComplex z) { hipComplex out(j0f(j1f(z.r)),j1f(j0f(z.i))); return out; } __device__ hipComplex hinva(hipComplex z) { hipComplex out(j1f(z.r),j1f(z.i)); return out; } __device__ hipComplex henga(hipComplex z) { hipComplex out(acoshf(z.r),asinhf(z.i)); return out; } __device__ hipComplex holva(hipComplex z) { hipComplex out(y0f(z.r),y1f(z.i)); return out; } __device__ hipComplex aliva(hipComplex z) { hipComplex out(j1f(z.r),cyl_bessel_i1f(z.i)); return out; } __device__ hipComplex ariva(hipComplex z) { hipComplex out(sinf(z.i),cbrtf(z.r)); return out; } __device__ hipComplex arago(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * harva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex irigo(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * helva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex thy(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * conj(qoo/q * tw*hinva(z)) +hilva( qoo*qoo/(q*q))); } return out; } __device__ hipComplex urigo(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * powc(hilva(q*z),helva(q*z)) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex origo(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * powc(alvir(q*z),alvir(q*z)) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex origa(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * powc(aciwa(q*z),aciwa(q*z)) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex arth(hipComplex z, hipComplex q, hipComplex ao, hipComplex uo, hipComplex fic) { int v; hipComplex out=z; hipComplex lue(1.0,0.0); for(v=0;v<10;v++) { out = out - lanna(ao/(uo-lue))/hinva(uo/(ao+lue)); lue = lue * q; } return out; } __device__ unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); } __global__ void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) { const int c = blockIdx.x*blockDim.x + threadIdx.x; const int r= blockIdx.y*blockDim.y + threadIdx.y; const int i = c + r*w; // 1D indexing float pi = 3.1415926535898; hipComplex ip(pi,0.0); const float scale =4.1; float fx = -scale * (float)(DIM/2 - c)/(DIM/2); float fy = scale * (float)(DIM/2 - r)/(DIM/2); hipComplex effx(fx,0.0); hipComplex effy(fy,0.0); float LA = -scale * (float)(DIM/2 - pos.x)/(DIM/2); float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2); hipComplex mouse(LA,LB); hipComplex moux(LA,0.0); hipComplex mouy(0.0,LB); hipComplex q(fx,fy); /* hipComplex tik(sin(ticks/40.0f),0.0);*/ /* hipComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0)); hipComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024)); hipComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/ hipComplex fixon(.029348,.828934); hipComplex faxon(.029348,-.828934); hipComplex unity(1.0,0.0); hipComplex ai(0.0,1.0); hipComplex tin(1/128.0,0.0); hipComplex aon = expc(ai*moux); hipComplex uon= expc(mouy); hipComplex flurn(0.0,0.0); hipComplex accume(0.0,0.0); hipComplex eccume(0.0,0.0); hipComplex rhun(1.02871376821872462237195122725097462534904479,0.0); hipComplex cue = q; hipComplex lam(0.73736887807831963, -0.67549029426152396); hipComplex due(3.0,0.0); hipComplex tir(2.0,0.0); hipComplex selga(3.5,0.0); hipComplex vro(-1.0,0.0); hipComplex tle(1.0,0.0); hipComplex sle(4.0,0.0); hipComplex cherra(0.62348980185873359, 0.7818314824680298); hipComplex lerra = cherra*cherra; hipComplex ferra = lerra * cherra; hipComplex terra = ferra * cherra; hipComplex zerra = terra * cherra; hipComplex nerra = zerra * cherra; hipComplex vlarv(1/3.0,0.0); hipComplex sugna(0.70710678118654757, 0.70710678118654746); hipComplex regna(0.99966573338968745, 0.025853848581176047); hipComplex spa(sqrtf(2.0),0.0); hipComplex spb(sqrtf(3.0),0.0); hipComplex spc(sqrtf(4.0),0.0); hipComplex spd(sqrtf(5.0),0.0); hipComplex mrun(1/2.0,0.0); hipComplex gloon (4.0,0.0); hipComplex plenod(-.01,0.0); hipComplex nue = unity; hipComplex vue = cue*ai; hipComplex lue = unity; hipComplex rhuva(3.0,0.0); hipComplex rarva(3.0,0.0); hipComplex bor(-10.0,0.0); hipComplex nat(0.0,-10.0); hipComplex rhus(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex D(0.739085133215160641655312087674,0.0); hipComplex pnx(0.0,0.0); hipComplex pny(0.0,0.0); /* if ((c >= w) || (r >= h)) return; // Check if within image bounds const int i = c + r*w; // 1D indexing const int dist = sqrtf((c - pos.x)*(c - pos.x) + (r - pos.y)*(r - pos.y)); const unsigned char intensity = clip(255 - dist);*/ // theta function varying on constant // cue =thess(cue,fixon*mouse); int v=1; int axa=-10; /*while((v<100)&&norg(cue)<2.0) { cue = cue*(cue-mouy)*(cue-moux) -cue * q; v++; }*/ // almost Klein's j-invariant //cue = (powc(powc(arago(flurn,q*aon),rarva)+ powc(the2(flurn,q),rarva) + powc(the4(flurn,q),rarva),rhuva))/powc(the4(flurn,q)*the3(flurn,q)*the2(flurn,q),rarva); //arth(hipComplex z, hipComplex q, hipComplex ao, hipComplex uo, hipComplex fic) // For Questular, Let's see what happens with one of Ramanujan's mock theta functions? //cue = thy(q,flat(accume)*fixon); for(v=0;v<100;v++) { if(norg(cue)>1) { cue = uon*unity/cue; } cue = aon*the3(q,fixon*cue); } /*cue =cue - powc(conj(cue),conj(cue-aon*conj(cue)))-powc(conj(cue),conj(cue-uon*conj(cue)));*/ double tha; tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi)); d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2)); d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2)); d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2)); d_out[i].w = 255; } void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) { const dim3 blockSize(TX, TY); const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY); hipLaunchKernelGGL(( distanceKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_out, w, h, pos); } /*for(v=1;v<5;v++) { cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); } cue = accume;*/ /*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q)); rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q)); cue = rhus+cue; cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/ /*for(v=0;v<60;v++){ cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon)); accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue)); } cue = accume;*/ /* One for (x+d)/cos(d) -cos(x)/d Tungilipa D = cos(D) cos(sqrt(x*D))/D -1 = 0.0 The other for cos(x)-x Eripgrunna */
8ff5bbd450434a3f4e598f14193a2e623cf74498.cu
#include "kernel.h" #define TX 32 #define TY 32 #define DIM 2100 struct cuComplex { float r; float i; __device__ cuComplex( float a, float b ) : r(a), i(b) {} __device__ float magnitude2( void ) { return r * r + i * i; } __device__ cuComplex operator*(const cuComplex& a) { return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i); } __device__ cuComplex operator-(const cuComplex& a) { return cuComplex(r-a.r, i-a.i); } __device__ cuComplex operator+(const cuComplex& a) { return cuComplex(r+a.r, i+a.i); } __device__ cuComplex operator/(const cuComplex& a) { return cuComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i)); } }; __device__ cuComplex conj(cuComplex m) { cuComplex out(m.r,-m.i); return out; } __device__ cuComplex nor(cuComplex m) { cuComplex out(m.r*m.r+m.i*m.i,0.0); return out; } __device__ float norg(cuComplex m) { return sqrtf(m.r*m.r+m.i*m.i); } __device__ cuComplex qpoch(cuComplex a, cuComplex q) { cuComplex out(1.0,0.0); cuComplex unity(1.0,0.0); int i = 0; cuComplex Q = q; if(q.magnitude2()>1.0) { return cuComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<80;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ cuComplex qp(cuComplex a, cuComplex q, int n) { cuComplex out(1.0,0.0); cuComplex unity(1.0,0.0); int i = 0; cuComplex Q = q; if(q.magnitude2()>1.0) { return cuComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<n;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ cuComplex ramphi(cuComplex q) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex mq = mone*q; return qpoch(mq,mq)/qpoch(q,mq); } __device__ cuComplex rampsi(cuComplex q) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex mq = mone*q; return qpoch(mq,q)*qpoch(q*q,q*q); } __device__ cuComplex ramchi(cuComplex q) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex mq = mone*q; return qpoch(mq,q*q); } __device__ cuComplex ramf(cuComplex a, cuComplex b) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex ma = mone*a; cuComplex mb = mone*b; return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b); } // complex exponential __device__ cuComplex expc(cuComplex m) { cuComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i)); return out; } __device__ cuComplex powc(cuComplex ag, cuComplex bg) { cuComplex out(0.0,0.0); cuComplex mesp(0.0,0.0); cuComplex frim(0.0,0.0); double radiu, thet; /* get the proper polar form of the complex number */ radiu = sqrtf(ag.r*ag.r + ag.i*ag.i); thet = atan2f(ag.i,ag.r); /* mesp gives R^(c+di) */ mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu)); mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu)); /* frim gives e^(i theta (c+di)) */ /* now since we already have the machinery for performing complex exponentiation (just exp), we can just call that here */ frim.r = -1.0 * bg.i * thet; frim.i = bg.r * thet; frim = expc(frim); out = mesp*frim; return out; } // cosine (nothing algorithmically clean) __device__ cuComplex cosc(cuComplex m) { cuComplex ai(0.0,1.0); cuComplex ot(0.5,0.0); cuComplex mone(-1.0,0.0); cuComplex out = ot*(expc(m*ai) + expc(mone*m*ai)); return out; } __device__ cuComplex sins(cuComplex m) { cuComplex ai(0.0,1.0); cuComplex ot(0.0,0.5); cuComplex mone(-1.0,0.0); cuComplex out = ot*(expc(m*ai) - expc(mone*m*ai)); return out; } __device__ cuComplex tans(cuComplex m) { return sins(m)/cosc(m); } __device__ cuComplex moeb(cuComplex t, cuComplex a, cuComplex z) { cuComplex out(0.0,0.0); cuComplex ai(0.0,1.0); cuComplex unity(1.0,0.0); out = expc(ai*t) * (z-a)/(unity-conj(a)*z); return out; } __device__ cuComplex bnewt(cuComplex z) { cuComplex three(3.0,0.0); cuComplex unity(1.0,0.0); cuComplex out(0.0,0.0); cuComplex Z =z; cuComplex L(0.0,0.0); cuComplex R(0.62348980185873359,0.7818314824680298); cuComplex v(0.62348980185873359,0.7818314824680298); int i; for(i=0;i<100;i++) { L = sins(expc(Z)-cosc(Z))-Z; out = out + v*L; v = R * v; Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity); } return out; } __device__ cuComplex they3(cuComplex z, cuComplex q) { int u; cuComplex out(0.0,0.0); cuComplex enn(-20.0,0.0); cuComplex onn(1.0,0.0); cuComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + powc(q,enn*enn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ cuComplex wahi(cuComplex z) { int u; cuComplex un(1.0,0.0); cuComplex ne(1.0,0.0); cuComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne); ne = ne + un; } out = out + un; return out; } __device__ cuComplex dwahi(cuComplex z) { int u; cuComplex un(1.0,0.0); cuComplex ne(1.0,0.0); cuComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne-un); ne = ne + un; } return out; } __device__ cuComplex they3p(cuComplex z, cuComplex q) { int u; cuComplex out(0.0,0.0); cuComplex enn(-20.0,0.0); cuComplex onn(1.0,0.0); cuComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ cuComplex h3ey3p(cuComplex z, cuComplex q) { int u; cuComplex out(0.0,0.0); cuComplex aut(0.0,0.0); cuComplex enn(-20.0,0.0); cuComplex onn(1.0,0.0); cuComplex dui(0.0,1.0); cuComplex vel(0.0,0.0); cuComplex rav(0.0,0.0); for(u=-40;u<40;u++) { vel = expc(dui*enn*z); rav = powc(q,enn*enn); aut = aut + (enn*enn)*rav/q*vel; out = out + rav*vel; enn = enn + onn; } return out/aut; } __device__ cuComplex thess(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex the1(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); cuComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*sins(z); } __device__ cuComplex the2(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); cuComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*cosc(z); } __device__ cuComplex the3(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex the9(cuComplex z, cuComplex q, cuComplex r) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * the3(tw*z,r) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex the4(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } /* routine to generate q-integers */ __device__ cuComplex qin(cuComplex a, cuComplex q) { cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); out = (unity - powc(q, a))/(unity-q); return out; } /* generating function for n^2 */ __device__ cuComplex geffa(cuComplex z, cuComplex q) { cuComplex out(0.0,0.0); cuComplex unity(1.0,0.0); cuComplex wu(0.0,0.0); cuComplex Z=unity; int v; for(v=0;v<20;v++) { out = out + qin(wu*wu,q)* Z; wu = wu + unity; Z = z * Z; } return out; } __device__ cuComplex thratd(cuComplex z, cuComplex q) { int n; cuComplex fau(4.0,0.0); cuComplex too(2.0,0.0); cuComplex unity(1.0,0.0); cuComplex ennn(1.0,0.0); cuComplex ni(-1.0,0.0); cuComplex noo(-1.0,0.0); cuComplex out(0.0,0.0); cuComplex loo = q; cuComplex qoo =q*q; for(n=0;n<80;n++) { out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z); qoo = qoo * q*q; loo = loo * q; ennn = ennn +unity; noo = ni * noo; } return out*fau; } __device__ cuComplex thess4(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex thesk(cuComplex z, cuComplex q, cuComplex r) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); cuComplex roo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; roo = roo * r * r ; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r)); } return out; } __device__ cuComplex thass(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex rogers( cuComplex q) { cuComplex onf(0.2,0.0); cuComplex Q5 = q*q*q*q*q; cuComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5)); return out; } __device__ cuComplex flat(cuComplex m) { float ua = sqrtf(m.r*m.r + m.i*m.i); cuComplex out(m.r/ua,m.i/ua); return out; } __device__ cuComplex eff(cuComplex z, cuComplex lambda) { return z*z*z*z+ lambda/(z*z*z*z); } __device__ cuComplex thete(float R, cuComplex tau, cuComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ cuComplex A(0.0,0.0); /* miscellaneous setup */ cuComplex pai(3.14159265353898,0.0); cuComplex ai(0.0,1.0); cuComplex oo(1.0,0.0); cuComplex oot(2.0,0.0); cuComplex nini(9.0,0.0); cuComplex eigh(-18.0,0.0); /* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ cuComplex frann(1.0,0.0); frann = pai * ai * tau ; cuComplex shenn(1.0,0.0); shenn = oot * ai * z; cuComplex plenn(1.0,0.0); cuComplex enn(1.0,0.0); cuComplex ann(1.0,0.0); cuComplex bnn(1.0,0.0); cuComplex scrunn(1.0,0.0); float ca, cb,cc; int a, b; for(a=-10;a<10;a++) { ann.r = a; for(b=-10;b<10;b++) { bnn.r = b; if(((a+b)%2)==0) { scrunn.r = a*a + b*b; A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn)); } else { ca = 5.0 + a*a + b*b; cb = 2*(a * cos(R)- b * sin(R)); cc = 4*(b * cos(R)+a*sin(R)); scrunn.r = ca + cb + cc; A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn)); } } } return A; } __device__ cuComplex thetta(cuComplex tau, cuComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ cuComplex A(0.0,0.0); /* miscellaneous setup */ cuComplex pai(3.14159265353898,0.0); cuComplex ai(0.0,1.0); cuComplex oo(1.0,0.0); cuComplex oot(2.0,0.0); cuComplex nini(9.0,0.0); cuComplex eigh(-18.0,0.0); /* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ cuComplex frann(1.0,0.0); frann = pai * ai * tau ; cuComplex shenn(1.0,0.0); shenn = oot * ai * z; cuComplex plenn(1.0,0.0); cuComplex enn(1.0,0.0); int n; for(n=-10;n<10;n++) { enn.r = n; plenn = enn * enn; /* this get the cuComplex out of the event loop */ A = A + expc(frann* plenn) * expc(shenn* enn); } return A; } __device__ cuComplex mitlef(cuComplex z,cuComplex c) { cuComplex out(0.0,0.0); cuComplex Z(1.0,0.0); cuComplex frove(0.0,0.0); int v; for(v=0;v<20;v++) { frove.r = tgammaf(c.r*v+c.i); out = out + Z/frove; Z = Z * z; } return out; } __device__ cuComplex helva(cuComplex z) { cuComplex out(j0f(z.r),j1f(z.i)); return out; } /* derivative of helva, from Mathematica */ __device__ cuComplex helvp(cuComplex z) { cuComplex out(jnf(2,z.r),jnf(1,z.i)); return out; } __device__ cuComplex lanna(cuComplex z) { cuComplex out(j1f(z.r/j0f(z.i)),j1f(z.i/j1f(z.r))); return out; } __device__ cuComplex harva(cuComplex z) { cuComplex out(jnf(floor(z.i),z.r),jnf(ceil(z.r),z.i)); return out; } __device__ cuComplex herve(cuComplex z) { cuComplex out(jnf(floor(z.r-z.i),z.i),jnf(ceil(z.r+z.i),z.r)); return out; } __device__ cuComplex alver(cuComplex z) { cuComplex out(1.0/j0f(z.r),1.0/j1f(z.i)); return out; } __device__ cuComplex alvir(cuComplex z) { cuComplex out(j0f(z.r),1.0/j1f(z.i)); return out; } __device__ cuComplex hexva(int m, cuComplex z) { cuComplex out(jnf(m,z.r),jnf(m,z.i)); return out; } __device__ cuComplex hilva(cuComplex z) { cuComplex out(j1f(z.r),j0f(z.i)); return out; } __device__ cuComplex halvi(cuComplex z) { cuComplex out(j1f(z.r),-j0f(z.i)); return out; } __device__ cuComplex ahilv(cuComplex z) { cuComplex out(1.0/j1f(z.r),1.0/j0f(z.i)); return out; } __device__ cuComplex halva(cuComplex z) { cuComplex out(j0f(z.r),j0f(z.i)); return out; } __device__ cuComplex aciwa(cuComplex z) { cuComplex out(j0f(j1f(z.r)),j1f(j0f(z.i))); return out; } __device__ cuComplex hinva(cuComplex z) { cuComplex out(j1f(z.r),j1f(z.i)); return out; } __device__ cuComplex henga(cuComplex z) { cuComplex out(acoshf(z.r),asinhf(z.i)); return out; } __device__ cuComplex holva(cuComplex z) { cuComplex out(y0f(z.r),y1f(z.i)); return out; } __device__ cuComplex aliva(cuComplex z) { cuComplex out(j1f(z.r),cyl_bessel_i1f(z.i)); return out; } __device__ cuComplex ariva(cuComplex z) { cuComplex out(sinf(z.i),cbrtf(z.r)); return out; } __device__ cuComplex arago(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * harva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex irigo(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * helva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex thy(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * conj(qoo/q * tw*hinva(z)) +hilva( qoo*qoo/(q*q))); } return out; } __device__ cuComplex urigo(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * powc(hilva(q*z),helva(q*z)) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex origo(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * powc(alvir(q*z),alvir(q*z)) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex origa(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * powc(aciwa(q*z),aciwa(q*z)) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex arth(cuComplex z, cuComplex q, cuComplex ao, cuComplex uo, cuComplex fic) { int v; cuComplex out=z; cuComplex lue(1.0,0.0); for(v=0;v<10;v++) { out = out - lanna(ao/(uo-lue))/hinva(uo/(ao+lue)); lue = lue * q; } return out; } __device__ unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); } __global__ void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) { const int c = blockIdx.x*blockDim.x + threadIdx.x; const int r= blockIdx.y*blockDim.y + threadIdx.y; const int i = c + r*w; // 1D indexing float pi = 3.1415926535898; cuComplex ip(pi,0.0); const float scale =4.1; float fx = -scale * (float)(DIM/2 - c)/(DIM/2); float fy = scale * (float)(DIM/2 - r)/(DIM/2); cuComplex effx(fx,0.0); cuComplex effy(fy,0.0); float LA = -scale * (float)(DIM/2 - pos.x)/(DIM/2); float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2); cuComplex mouse(LA,LB); cuComplex moux(LA,0.0); cuComplex mouy(0.0,LB); cuComplex q(fx,fy); /* cuComplex tik(sin(ticks/40.0f),0.0);*/ /* cuComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0)); cuComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024)); cuComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/ cuComplex fixon(.029348,.828934); cuComplex faxon(.029348,-.828934); cuComplex unity(1.0,0.0); cuComplex ai(0.0,1.0); cuComplex tin(1/128.0,0.0); cuComplex aon = expc(ai*moux); cuComplex uon= expc(mouy); cuComplex flurn(0.0,0.0); cuComplex accume(0.0,0.0); cuComplex eccume(0.0,0.0); cuComplex rhun(1.02871376821872462237195122725097462534904479,0.0); cuComplex cue = q; cuComplex lam(0.73736887807831963, -0.67549029426152396); cuComplex due(3.0,0.0); cuComplex tir(2.0,0.0); cuComplex selga(3.5,0.0); cuComplex vro(-1.0,0.0); cuComplex tle(1.0,0.0); cuComplex sle(4.0,0.0); cuComplex cherra(0.62348980185873359, 0.7818314824680298); cuComplex lerra = cherra*cherra; cuComplex ferra = lerra * cherra; cuComplex terra = ferra * cherra; cuComplex zerra = terra * cherra; cuComplex nerra = zerra * cherra; cuComplex vlarv(1/3.0,0.0); cuComplex sugna(0.70710678118654757, 0.70710678118654746); cuComplex regna(0.99966573338968745, 0.025853848581176047); cuComplex spa(sqrtf(2.0),0.0); cuComplex spb(sqrtf(3.0),0.0); cuComplex spc(sqrtf(4.0),0.0); cuComplex spd(sqrtf(5.0),0.0); cuComplex mrun(1/2.0,0.0); cuComplex gloon (4.0,0.0); cuComplex plenod(-.01,0.0); cuComplex nue = unity; cuComplex vue = cue*ai; cuComplex lue = unity; cuComplex rhuva(3.0,0.0); cuComplex rarva(3.0,0.0); cuComplex bor(-10.0,0.0); cuComplex nat(0.0,-10.0); cuComplex rhus(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex D(0.739085133215160641655312087674,0.0); cuComplex pnx(0.0,0.0); cuComplex pny(0.0,0.0); /* if ((c >= w) || (r >= h)) return; // Check if within image bounds const int i = c + r*w; // 1D indexing const int dist = sqrtf((c - pos.x)*(c - pos.x) + (r - pos.y)*(r - pos.y)); const unsigned char intensity = clip(255 - dist);*/ // theta function varying on constant // cue =thess(cue,fixon*mouse); int v=1; int axa=-10; /*while((v<100)&&norg(cue)<2.0) { cue = cue*(cue-mouy)*(cue-moux) -cue * q; v++; }*/ // almost Klein's j-invariant //cue = (powc(powc(arago(flurn,q*aon),rarva)+ powc(the2(flurn,q),rarva) + powc(the4(flurn,q),rarva),rhuva))/powc(the4(flurn,q)*the3(flurn,q)*the2(flurn,q),rarva); //arth(cuComplex z, cuComplex q, cuComplex ao, cuComplex uo, cuComplex fic) // For Questular, Let's see what happens with one of Ramanujan's mock theta functions? //cue = thy(q,flat(accume)*fixon); for(v=0;v<100;v++) { if(norg(cue)>1) { cue = uon*unity/cue; } cue = aon*the3(q,fixon*cue); } /*cue =cue - powc(conj(cue),conj(cue-aon*conj(cue)))-powc(conj(cue),conj(cue-uon*conj(cue)));*/ double tha; tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi)); d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2)); d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2)); d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2)); d_out[i].w = 255; } void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) { const dim3 blockSize(TX, TY); const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY); distanceKernel<<<gridSize, blockSize>>>(d_out, w, h, pos); } /*for(v=1;v<5;v++) { cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); } cue = accume;*/ /*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q)); rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q)); cue = rhus+cue; cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/ /*for(v=0;v<60;v++){ cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon)); accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue)); } cue = accume;*/ /* One for (x+d)/cos(d) -cos(x)/d Tungilipa D = cos(D) cos(sqrt(x*D))/D -1 = 0.0 The other for cos(x)-x Eripgrunna */
432cc842e0c84cdb6d4d01305c4588f0561019dc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 2018-2019 Autoware Foundation. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ //headers in CUDA #include <thrust/sort.h> //headers in local files #include "lidar_point_pillars/postprocess_cuda.h" __global__ void filter_kernel(const float* box_preds, const float* cls_preds, const float* dir_preds, const int* anchor_mask, const float* dev_anchors_px, const float* dev_anchors_py, const float* dev_anchors_pz, const float* dev_anchors_dx, const float* dev_anchors_dy, const float* dev_anchors_dz, const float* dev_anchors_ro, float* filtered_box, float* filtered_score, int* filtered_dir, float* box_for_nms, int* filter_count, const float FLOAT_MIN, const float FLOAT_MAX, const float score_threshold, const int NUM_BOX_CORNERS, const int NUM_OUTPUT_BOX_FEATURE) { // boxes ([N, 7] Tensor): normal boxes: x, y, z, w, l, h, r int tid = threadIdx.x + blockIdx.x * blockDim.x; //sigmoid funciton float score = 1/(1+expf(-cls_preds[tid])); if(anchor_mask[tid] == 1 && score > score_threshold) { int counter = atomicAdd(filter_count, 1); float za = dev_anchors_pz[tid] + dev_anchors_dz[tid]/2; //decode network output float diagonal = sqrtf(dev_anchors_dx[tid]*dev_anchors_dx[tid] + dev_anchors_dy[tid]*dev_anchors_dy[tid]); float box_px = box_preds[tid*NUM_OUTPUT_BOX_FEATURE + 0] * diagonal + dev_anchors_px[tid]; float box_py = box_preds[tid*NUM_OUTPUT_BOX_FEATURE + 1] * diagonal + dev_anchors_py[tid]; float box_pz = box_preds[tid*NUM_OUTPUT_BOX_FEATURE + 2] * dev_anchors_dz[tid] + za; float box_dx = expf(box_preds[tid*NUM_OUTPUT_BOX_FEATURE + 3]) * dev_anchors_dx[tid]; float box_dy = expf(box_preds[tid*NUM_OUTPUT_BOX_FEATURE + 4]) * dev_anchors_dy[tid]; float box_dz = expf(box_preds[tid*NUM_OUTPUT_BOX_FEATURE + 5]) * dev_anchors_dz[tid]; float box_ro = box_preds[tid*NUM_OUTPUT_BOX_FEATURE + 6] + dev_anchors_ro[tid]; box_pz = box_pz - box_dz/2; filtered_box[counter*NUM_OUTPUT_BOX_FEATURE + 0] = box_px; filtered_box[counter*NUM_OUTPUT_BOX_FEATURE + 1] = box_py; filtered_box[counter*NUM_OUTPUT_BOX_FEATURE + 2] = box_pz; filtered_box[counter*NUM_OUTPUT_BOX_FEATURE + 3] = box_dx; filtered_box[counter*NUM_OUTPUT_BOX_FEATURE + 4] = box_dy; filtered_box[counter*NUM_OUTPUT_BOX_FEATURE + 5] = box_dz; filtered_box[counter*NUM_OUTPUT_BOX_FEATURE + 6] = box_ro; filtered_score[counter] = score; int direction_label; if(dir_preds[tid*2 + 0] < dir_preds[tid*2 + 1]) { direction_label = 1; } else { direction_label = 0; } filtered_dir[counter] = direction_label; //convrt normal box(normal boxes: x, y, z, w, l, h, r) to box(xmin, ymin, xmax, ymax) for nms calculation //First: dx, dy -> box(x0y0, x0y1, x1y0, x1y1) float corners[NUM_3D_BOX_CORNERS_MACRO] = {float(-0.5*box_dx), float(-0.5*box_dy), float(-0.5*box_dx), float( 0.5*box_dy), float( 0.5*box_dx), float( 0.5*box_dy), float( 0.5*box_dx), float(-0.5*box_dy)}; //Second: Rotate, Offset and convert to point(xmin. ymin, xmax, ymax) float rotated_corners[NUM_3D_BOX_CORNERS_MACRO]; float offset_corners[NUM_3D_BOX_CORNERS_MACRO]; float sin_yaw = sinf(box_ro); float cos_yaw = cosf(box_ro); float xmin = FLOAT_MAX; float ymin = FLOAT_MAX; float xmax = FLOAT_MIN; float ymax = FLOAT_MIN; for(size_t i = 0; i < NUM_BOX_CORNERS; i++) { rotated_corners[i*2 + 0] = cos_yaw*corners[i*2 + 0] - sin_yaw*corners[i*2 + 1]; rotated_corners[i*2 + 1] = sin_yaw*corners[i*2 + 0] + cos_yaw*corners[i*2 + 1]; offset_corners[i*2 + 0] = rotated_corners[i*2 + 0] + box_px; offset_corners[i*2 + 1] = rotated_corners[i*2 + 1] + box_py; xmin = fminf(xmin, offset_corners[i*2 + 0]); ymin = fminf(ymin, offset_corners[i*2 + 1]); xmax = fmaxf(xmin, offset_corners[i*2 + 0]); ymax = fmaxf(ymax, offset_corners[i*2 + 1]); } // box_for_nms(num_box, 4) box_for_nms[counter*NUM_BOX_CORNERS + 0] = xmin; box_for_nms[counter*NUM_BOX_CORNERS + 1] = ymin; box_for_nms[counter*NUM_BOX_CORNERS + 2] = xmax; box_for_nms[counter*NUM_BOX_CORNERS + 3] = ymax; } } __global__ void sort_boxes_by_indexes_kernel(float* filtered_box, int* filtered_dir, float* box_for_nms, int* indexes, int filter_count, float* sorted_filtered_boxes, int* sorted_filtered_dir, float* sorted_box_for_nms, const int NUM_BOX_CORNERS, const int NUM_OUTPUT_BOX_FEATURE) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid < filter_count) { int sort_index = indexes[tid]; sorted_filtered_boxes[tid*NUM_OUTPUT_BOX_FEATURE + 0] = filtered_box[sort_index*NUM_OUTPUT_BOX_FEATURE + 0]; sorted_filtered_boxes[tid*NUM_OUTPUT_BOX_FEATURE + 1] = filtered_box[sort_index*NUM_OUTPUT_BOX_FEATURE + 1]; sorted_filtered_boxes[tid*NUM_OUTPUT_BOX_FEATURE + 2] = filtered_box[sort_index*NUM_OUTPUT_BOX_FEATURE + 2]; sorted_filtered_boxes[tid*NUM_OUTPUT_BOX_FEATURE + 3] = filtered_box[sort_index*NUM_OUTPUT_BOX_FEATURE + 3]; sorted_filtered_boxes[tid*NUM_OUTPUT_BOX_FEATURE + 4] = filtered_box[sort_index*NUM_OUTPUT_BOX_FEATURE + 4]; sorted_filtered_boxes[tid*NUM_OUTPUT_BOX_FEATURE + 5] = filtered_box[sort_index*NUM_OUTPUT_BOX_FEATURE + 5]; sorted_filtered_boxes[tid*NUM_OUTPUT_BOX_FEATURE + 6] = filtered_box[sort_index*NUM_OUTPUT_BOX_FEATURE + 6]; sorted_filtered_dir[tid] = filtered_dir[sort_index]; sorted_box_for_nms[tid*NUM_BOX_CORNERS + 0] = box_for_nms[sort_index*NUM_BOX_CORNERS + 0]; sorted_box_for_nms[tid*NUM_BOX_CORNERS + 1] = box_for_nms[sort_index*NUM_BOX_CORNERS + 1]; sorted_box_for_nms[tid*NUM_BOX_CORNERS + 2] = box_for_nms[sort_index*NUM_BOX_CORNERS + 2]; sorted_box_for_nms[tid*NUM_BOX_CORNERS + 3] = box_for_nms[sort_index*NUM_BOX_CORNERS + 3]; } } PostprocessCuda::PostprocessCuda(const float FLOAT_MIN, const float FLOAT_MAX, const int NUM_ANCHOR_X_INDS, const int NUM_ANCHOR_Y_INDS, const int NUM_ANCHOR_R_INDS, const float score_threshold, const int NUM_THREADS, const float nms_overlap_threshold, const int NUM_BOX_CORNERS, const int NUM_OUTPUT_BOX_FEATURE): FLOAT_MIN_(FLOAT_MIN), FLOAT_MAX_(FLOAT_MAX), NUM_ANCHOR_X_INDS_(NUM_ANCHOR_X_INDS), NUM_ANCHOR_Y_INDS_(NUM_ANCHOR_Y_INDS), NUM_ANCHOR_R_INDS_(NUM_ANCHOR_R_INDS), score_threshold_(score_threshold), NUM_THREADS_(NUM_THREADS), nms_overlap_threshold_(nms_overlap_threshold), NUM_BOX_CORNERS_(NUM_BOX_CORNERS), NUM_OUTPUT_BOX_FEATURE_(NUM_OUTPUT_BOX_FEATURE) { nms_cuda_ptr_.reset(new NMSCuda( NUM_THREADS, NUM_BOX_CORNERS, nms_overlap_threshold)); } void PostprocessCuda::doPostprocessCuda(const float* rpn_box_output, const float* rpn_cls_output, const float* rpn_dir_output, int* dev_anchor_mask, const float* dev_anchors_px, const float* dev_anchors_py, const float* dev_anchors_pz, const float* dev_anchors_dx, const float* dev_anchors_dy, const float* dev_anchors_dz, const float* dev_anchors_ro, float* dev_filtered_box, float* dev_filtered_score, int* dev_filtered_dir, float* dev_box_for_nms, int* dev_filter_count, std::vector<float>& out_detection) { hipLaunchKernelGGL(( filter_kernel), dim3(NUM_ANCHOR_X_INDS_*NUM_ANCHOR_R_INDS_), dim3(NUM_ANCHOR_Y_INDS_), 0, 0, rpn_box_output, rpn_cls_output, rpn_dir_output, dev_anchor_mask, dev_anchors_px, dev_anchors_py, dev_anchors_pz, dev_anchors_dx, dev_anchors_dy, dev_anchors_dz, dev_anchors_ro, dev_filtered_box, dev_filtered_score, dev_filtered_dir, dev_box_for_nms, dev_filter_count, FLOAT_MIN_, FLOAT_MAX_, score_threshold_, NUM_BOX_CORNERS_, NUM_OUTPUT_BOX_FEATURE_); int host_filter_count[1]; GPU_CHECK( hipMemcpy(host_filter_count, dev_filter_count, sizeof(int), hipMemcpyDeviceToHost ) ); if(host_filter_count[0] == 0) { return; } int* dev_indexes; float* dev_sorted_filtered_box, *dev_sorted_box_for_nms; int* dev_sorted_filtered_dir; GPU_CHECK(hipMalloc((void**)&dev_indexes, host_filter_count[0]*sizeof(int))); GPU_CHECK(hipMalloc((void**)&dev_sorted_filtered_box, NUM_OUTPUT_BOX_FEATURE_*host_filter_count[0]*sizeof(float))); GPU_CHECK(hipMalloc((void**)&dev_sorted_filtered_dir, host_filter_count[0]*sizeof(int))); GPU_CHECK(hipMalloc((void**)&dev_sorted_box_for_nms, NUM_BOX_CORNERS_*host_filter_count[0]*sizeof(float))); thrust::sequence(thrust::device, dev_indexes, dev_indexes + host_filter_count[0]); thrust::sort_by_key(thrust::device, dev_filtered_score, dev_filtered_score + size_t(host_filter_count[0]), dev_indexes, thrust::greater<float>()); const int num_blocks = DIVUP(host_filter_count[0], NUM_THREADS_); hipLaunchKernelGGL(( sort_boxes_by_indexes_kernel), dim3(num_blocks), dim3(NUM_THREADS_), 0, 0, dev_filtered_box, dev_filtered_dir, dev_box_for_nms, dev_indexes, host_filter_count[0], dev_sorted_filtered_box, dev_sorted_filtered_dir, dev_sorted_box_for_nms, NUM_BOX_CORNERS_, NUM_OUTPUT_BOX_FEATURE_); int keep_inds[host_filter_count[0]] = {0}; int out_num_objects = 0; nms_cuda_ptr_->doNMSCuda(host_filter_count[0], dev_sorted_box_for_nms, keep_inds, out_num_objects); float host_filtered_box[host_filter_count[0]*NUM_OUTPUT_BOX_FEATURE_]; int host_filtered_dir[host_filter_count[0]]; GPU_CHECK( hipMemcpy(host_filtered_box, dev_sorted_filtered_box, NUM_OUTPUT_BOX_FEATURE_*host_filter_count[0] *sizeof(float), hipMemcpyDeviceToHost ) ); GPU_CHECK( hipMemcpy(host_filtered_dir, dev_sorted_filtered_dir, host_filter_count[0] *sizeof(int), hipMemcpyDeviceToHost ) ); for (int i = 0; i < out_num_objects; i++) { out_detection.push_back(host_filtered_box[keep_inds[i]*NUM_OUTPUT_BOX_FEATURE_+0]); out_detection.push_back(host_filtered_box[keep_inds[i]*NUM_OUTPUT_BOX_FEATURE_+1]); out_detection.push_back(host_filtered_box[keep_inds[i]*NUM_OUTPUT_BOX_FEATURE_+2]); out_detection.push_back(host_filtered_box[keep_inds[i]*NUM_OUTPUT_BOX_FEATURE_+3]); out_detection.push_back(host_filtered_box[keep_inds[i]*NUM_OUTPUT_BOX_FEATURE_+4]); out_detection.push_back(host_filtered_box[keep_inds[i]*NUM_OUTPUT_BOX_FEATURE_+5]); if(host_filtered_dir[keep_inds[i]] == 0) { out_detection.push_back(host_filtered_box[keep_inds[i]*NUM_OUTPUT_BOX_FEATURE_+6] + M_PI); } else { out_detection.push_back(host_filtered_box[keep_inds[i]*NUM_OUTPUT_BOX_FEATURE_+6]); } } GPU_CHECK(hipFree(dev_indexes)); GPU_CHECK(hipFree(dev_sorted_filtered_box)); GPU_CHECK(hipFree(dev_sorted_filtered_dir)); GPU_CHECK(hipFree(dev_sorted_box_for_nms)); }
432cc842e0c84cdb6d4d01305c4588f0561019dc.cu
/* * Copyright 2018-2019 Autoware Foundation. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ //headers in CUDA #include <thrust/sort.h> //headers in local files #include "lidar_point_pillars/postprocess_cuda.h" __global__ void filter_kernel(const float* box_preds, const float* cls_preds, const float* dir_preds, const int* anchor_mask, const float* dev_anchors_px, const float* dev_anchors_py, const float* dev_anchors_pz, const float* dev_anchors_dx, const float* dev_anchors_dy, const float* dev_anchors_dz, const float* dev_anchors_ro, float* filtered_box, float* filtered_score, int* filtered_dir, float* box_for_nms, int* filter_count, const float FLOAT_MIN, const float FLOAT_MAX, const float score_threshold, const int NUM_BOX_CORNERS, const int NUM_OUTPUT_BOX_FEATURE) { // boxes ([N, 7] Tensor): normal boxes: x, y, z, w, l, h, r int tid = threadIdx.x + blockIdx.x * blockDim.x; //sigmoid funciton float score = 1/(1+expf(-cls_preds[tid])); if(anchor_mask[tid] == 1 && score > score_threshold) { int counter = atomicAdd(filter_count, 1); float za = dev_anchors_pz[tid] + dev_anchors_dz[tid]/2; //decode network output float diagonal = sqrtf(dev_anchors_dx[tid]*dev_anchors_dx[tid] + dev_anchors_dy[tid]*dev_anchors_dy[tid]); float box_px = box_preds[tid*NUM_OUTPUT_BOX_FEATURE + 0] * diagonal + dev_anchors_px[tid]; float box_py = box_preds[tid*NUM_OUTPUT_BOX_FEATURE + 1] * diagonal + dev_anchors_py[tid]; float box_pz = box_preds[tid*NUM_OUTPUT_BOX_FEATURE + 2] * dev_anchors_dz[tid] + za; float box_dx = expf(box_preds[tid*NUM_OUTPUT_BOX_FEATURE + 3]) * dev_anchors_dx[tid]; float box_dy = expf(box_preds[tid*NUM_OUTPUT_BOX_FEATURE + 4]) * dev_anchors_dy[tid]; float box_dz = expf(box_preds[tid*NUM_OUTPUT_BOX_FEATURE + 5]) * dev_anchors_dz[tid]; float box_ro = box_preds[tid*NUM_OUTPUT_BOX_FEATURE + 6] + dev_anchors_ro[tid]; box_pz = box_pz - box_dz/2; filtered_box[counter*NUM_OUTPUT_BOX_FEATURE + 0] = box_px; filtered_box[counter*NUM_OUTPUT_BOX_FEATURE + 1] = box_py; filtered_box[counter*NUM_OUTPUT_BOX_FEATURE + 2] = box_pz; filtered_box[counter*NUM_OUTPUT_BOX_FEATURE + 3] = box_dx; filtered_box[counter*NUM_OUTPUT_BOX_FEATURE + 4] = box_dy; filtered_box[counter*NUM_OUTPUT_BOX_FEATURE + 5] = box_dz; filtered_box[counter*NUM_OUTPUT_BOX_FEATURE + 6] = box_ro; filtered_score[counter] = score; int direction_label; if(dir_preds[tid*2 + 0] < dir_preds[tid*2 + 1]) { direction_label = 1; } else { direction_label = 0; } filtered_dir[counter] = direction_label; //convrt normal box(normal boxes: x, y, z, w, l, h, r) to box(xmin, ymin, xmax, ymax) for nms calculation //First: dx, dy -> box(x0y0, x0y1, x1y0, x1y1) float corners[NUM_3D_BOX_CORNERS_MACRO] = {float(-0.5*box_dx), float(-0.5*box_dy), float(-0.5*box_dx), float( 0.5*box_dy), float( 0.5*box_dx), float( 0.5*box_dy), float( 0.5*box_dx), float(-0.5*box_dy)}; //Second: Rotate, Offset and convert to point(xmin. ymin, xmax, ymax) float rotated_corners[NUM_3D_BOX_CORNERS_MACRO]; float offset_corners[NUM_3D_BOX_CORNERS_MACRO]; float sin_yaw = sinf(box_ro); float cos_yaw = cosf(box_ro); float xmin = FLOAT_MAX; float ymin = FLOAT_MAX; float xmax = FLOAT_MIN; float ymax = FLOAT_MIN; for(size_t i = 0; i < NUM_BOX_CORNERS; i++) { rotated_corners[i*2 + 0] = cos_yaw*corners[i*2 + 0] - sin_yaw*corners[i*2 + 1]; rotated_corners[i*2 + 1] = sin_yaw*corners[i*2 + 0] + cos_yaw*corners[i*2 + 1]; offset_corners[i*2 + 0] = rotated_corners[i*2 + 0] + box_px; offset_corners[i*2 + 1] = rotated_corners[i*2 + 1] + box_py; xmin = fminf(xmin, offset_corners[i*2 + 0]); ymin = fminf(ymin, offset_corners[i*2 + 1]); xmax = fmaxf(xmin, offset_corners[i*2 + 0]); ymax = fmaxf(ymax, offset_corners[i*2 + 1]); } // box_for_nms(num_box, 4) box_for_nms[counter*NUM_BOX_CORNERS + 0] = xmin; box_for_nms[counter*NUM_BOX_CORNERS + 1] = ymin; box_for_nms[counter*NUM_BOX_CORNERS + 2] = xmax; box_for_nms[counter*NUM_BOX_CORNERS + 3] = ymax; } } __global__ void sort_boxes_by_indexes_kernel(float* filtered_box, int* filtered_dir, float* box_for_nms, int* indexes, int filter_count, float* sorted_filtered_boxes, int* sorted_filtered_dir, float* sorted_box_for_nms, const int NUM_BOX_CORNERS, const int NUM_OUTPUT_BOX_FEATURE) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid < filter_count) { int sort_index = indexes[tid]; sorted_filtered_boxes[tid*NUM_OUTPUT_BOX_FEATURE + 0] = filtered_box[sort_index*NUM_OUTPUT_BOX_FEATURE + 0]; sorted_filtered_boxes[tid*NUM_OUTPUT_BOX_FEATURE + 1] = filtered_box[sort_index*NUM_OUTPUT_BOX_FEATURE + 1]; sorted_filtered_boxes[tid*NUM_OUTPUT_BOX_FEATURE + 2] = filtered_box[sort_index*NUM_OUTPUT_BOX_FEATURE + 2]; sorted_filtered_boxes[tid*NUM_OUTPUT_BOX_FEATURE + 3] = filtered_box[sort_index*NUM_OUTPUT_BOX_FEATURE + 3]; sorted_filtered_boxes[tid*NUM_OUTPUT_BOX_FEATURE + 4] = filtered_box[sort_index*NUM_OUTPUT_BOX_FEATURE + 4]; sorted_filtered_boxes[tid*NUM_OUTPUT_BOX_FEATURE + 5] = filtered_box[sort_index*NUM_OUTPUT_BOX_FEATURE + 5]; sorted_filtered_boxes[tid*NUM_OUTPUT_BOX_FEATURE + 6] = filtered_box[sort_index*NUM_OUTPUT_BOX_FEATURE + 6]; sorted_filtered_dir[tid] = filtered_dir[sort_index]; sorted_box_for_nms[tid*NUM_BOX_CORNERS + 0] = box_for_nms[sort_index*NUM_BOX_CORNERS + 0]; sorted_box_for_nms[tid*NUM_BOX_CORNERS + 1] = box_for_nms[sort_index*NUM_BOX_CORNERS + 1]; sorted_box_for_nms[tid*NUM_BOX_CORNERS + 2] = box_for_nms[sort_index*NUM_BOX_CORNERS + 2]; sorted_box_for_nms[tid*NUM_BOX_CORNERS + 3] = box_for_nms[sort_index*NUM_BOX_CORNERS + 3]; } } PostprocessCuda::PostprocessCuda(const float FLOAT_MIN, const float FLOAT_MAX, const int NUM_ANCHOR_X_INDS, const int NUM_ANCHOR_Y_INDS, const int NUM_ANCHOR_R_INDS, const float score_threshold, const int NUM_THREADS, const float nms_overlap_threshold, const int NUM_BOX_CORNERS, const int NUM_OUTPUT_BOX_FEATURE): FLOAT_MIN_(FLOAT_MIN), FLOAT_MAX_(FLOAT_MAX), NUM_ANCHOR_X_INDS_(NUM_ANCHOR_X_INDS), NUM_ANCHOR_Y_INDS_(NUM_ANCHOR_Y_INDS), NUM_ANCHOR_R_INDS_(NUM_ANCHOR_R_INDS), score_threshold_(score_threshold), NUM_THREADS_(NUM_THREADS), nms_overlap_threshold_(nms_overlap_threshold), NUM_BOX_CORNERS_(NUM_BOX_CORNERS), NUM_OUTPUT_BOX_FEATURE_(NUM_OUTPUT_BOX_FEATURE) { nms_cuda_ptr_.reset(new NMSCuda( NUM_THREADS, NUM_BOX_CORNERS, nms_overlap_threshold)); } void PostprocessCuda::doPostprocessCuda(const float* rpn_box_output, const float* rpn_cls_output, const float* rpn_dir_output, int* dev_anchor_mask, const float* dev_anchors_px, const float* dev_anchors_py, const float* dev_anchors_pz, const float* dev_anchors_dx, const float* dev_anchors_dy, const float* dev_anchors_dz, const float* dev_anchors_ro, float* dev_filtered_box, float* dev_filtered_score, int* dev_filtered_dir, float* dev_box_for_nms, int* dev_filter_count, std::vector<float>& out_detection) { filter_kernel<<<NUM_ANCHOR_X_INDS_*NUM_ANCHOR_R_INDS_, NUM_ANCHOR_Y_INDS_>>> (rpn_box_output, rpn_cls_output, rpn_dir_output, dev_anchor_mask, dev_anchors_px, dev_anchors_py, dev_anchors_pz, dev_anchors_dx, dev_anchors_dy, dev_anchors_dz, dev_anchors_ro, dev_filtered_box, dev_filtered_score, dev_filtered_dir, dev_box_for_nms, dev_filter_count, FLOAT_MIN_, FLOAT_MAX_, score_threshold_, NUM_BOX_CORNERS_, NUM_OUTPUT_BOX_FEATURE_); int host_filter_count[1]; GPU_CHECK( cudaMemcpy(host_filter_count, dev_filter_count, sizeof(int), cudaMemcpyDeviceToHost ) ); if(host_filter_count[0] == 0) { return; } int* dev_indexes; float* dev_sorted_filtered_box, *dev_sorted_box_for_nms; int* dev_sorted_filtered_dir; GPU_CHECK(cudaMalloc((void**)&dev_indexes, host_filter_count[0]*sizeof(int))); GPU_CHECK(cudaMalloc((void**)&dev_sorted_filtered_box, NUM_OUTPUT_BOX_FEATURE_*host_filter_count[0]*sizeof(float))); GPU_CHECK(cudaMalloc((void**)&dev_sorted_filtered_dir, host_filter_count[0]*sizeof(int))); GPU_CHECK(cudaMalloc((void**)&dev_sorted_box_for_nms, NUM_BOX_CORNERS_*host_filter_count[0]*sizeof(float))); thrust::sequence(thrust::device, dev_indexes, dev_indexes + host_filter_count[0]); thrust::sort_by_key(thrust::device, dev_filtered_score, dev_filtered_score + size_t(host_filter_count[0]), dev_indexes, thrust::greater<float>()); const int num_blocks = DIVUP(host_filter_count[0], NUM_THREADS_); sort_boxes_by_indexes_kernel<<<num_blocks, NUM_THREADS_>>>(dev_filtered_box, dev_filtered_dir, dev_box_for_nms, dev_indexes, host_filter_count[0], dev_sorted_filtered_box, dev_sorted_filtered_dir, dev_sorted_box_for_nms, NUM_BOX_CORNERS_, NUM_OUTPUT_BOX_FEATURE_); int keep_inds[host_filter_count[0]] = {0}; int out_num_objects = 0; nms_cuda_ptr_->doNMSCuda(host_filter_count[0], dev_sorted_box_for_nms, keep_inds, out_num_objects); float host_filtered_box[host_filter_count[0]*NUM_OUTPUT_BOX_FEATURE_]; int host_filtered_dir[host_filter_count[0]]; GPU_CHECK( cudaMemcpy(host_filtered_box, dev_sorted_filtered_box, NUM_OUTPUT_BOX_FEATURE_*host_filter_count[0] *sizeof(float), cudaMemcpyDeviceToHost ) ); GPU_CHECK( cudaMemcpy(host_filtered_dir, dev_sorted_filtered_dir, host_filter_count[0] *sizeof(int), cudaMemcpyDeviceToHost ) ); for (int i = 0; i < out_num_objects; i++) { out_detection.push_back(host_filtered_box[keep_inds[i]*NUM_OUTPUT_BOX_FEATURE_+0]); out_detection.push_back(host_filtered_box[keep_inds[i]*NUM_OUTPUT_BOX_FEATURE_+1]); out_detection.push_back(host_filtered_box[keep_inds[i]*NUM_OUTPUT_BOX_FEATURE_+2]); out_detection.push_back(host_filtered_box[keep_inds[i]*NUM_OUTPUT_BOX_FEATURE_+3]); out_detection.push_back(host_filtered_box[keep_inds[i]*NUM_OUTPUT_BOX_FEATURE_+4]); out_detection.push_back(host_filtered_box[keep_inds[i]*NUM_OUTPUT_BOX_FEATURE_+5]); if(host_filtered_dir[keep_inds[i]] == 0) { out_detection.push_back(host_filtered_box[keep_inds[i]*NUM_OUTPUT_BOX_FEATURE_+6] + M_PI); } else { out_detection.push_back(host_filtered_box[keep_inds[i]*NUM_OUTPUT_BOX_FEATURE_+6]); } } GPU_CHECK(cudaFree(dev_indexes)); GPU_CHECK(cudaFree(dev_sorted_filtered_box)); GPU_CHECK(cudaFree(dev_sorted_filtered_dir)); GPU_CHECK(cudaFree(dev_sorted_box_for_nms)); }
d29f227384923eaaa5fdd3ca9a025fea68dd952a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __device__ bool checkBoundary(int blockIdx, int blockDim, int threadIdx){ int x = threadIdx; int y = blockIdx; return (x == 0 || x == (blockDim-1) || y == 0 || y == 479); } __global__ void mAdvect(float *new_data, float *old_data, float *xv, float *yv, float t_step, float s_stepX, float s_stepY) { if(checkBoundary(blockIdx.x, blockDim.x, threadIdx.x)) return; int Idx = blockIdx.x * blockDim.x + threadIdx.x; float curr_x = (float)threadIdx.x; float curr_y = (float)blockIdx.x; float last_x = curr_x - t_step*s_stepX*xv[Idx]; float last_y = curr_y - t_step*s_stepY*yv[Idx]; if(last_x < 1.5) last_x = 1.5; if(last_x > 637.5) last_x = 637.5; if(last_y < 1.5) last_y = 1.5; if(last_y > 477.5) last_y = 477.5; // Bilinear Interpolation float xDiff = last_x - (int)last_x; float yDiff = last_y - (int)last_y; int LeftTopX = (int)last_x; int LeftTopY = (int)last_y; int LeftTopIdx = LeftTopY * blockDim.x + LeftTopX; new_data[Idx] = (xDiff*yDiff)*old_data[LeftTopIdx+blockDim.x+1] +(xDiff*(1.f-yDiff))*old_data[LeftTopIdx+1] +((1.f-xDiff)*yDiff)*old_data[LeftTopIdx+blockDim.x] +((1.f-xDiff)*(1.f-yDiff))*old_data[LeftTopIdx]; }
d29f227384923eaaa5fdd3ca9a025fea68dd952a.cu
#include "includes.h" __device__ bool checkBoundary(int blockIdx, int blockDim, int threadIdx){ int x = threadIdx; int y = blockIdx; return (x == 0 || x == (blockDim-1) || y == 0 || y == 479); } __global__ void mAdvect(float *new_data, float *old_data, float *xv, float *yv, float t_step, float s_stepX, float s_stepY) { if(checkBoundary(blockIdx.x, blockDim.x, threadIdx.x)) return; int Idx = blockIdx.x * blockDim.x + threadIdx.x; float curr_x = (float)threadIdx.x; float curr_y = (float)blockIdx.x; float last_x = curr_x - t_step*s_stepX*xv[Idx]; float last_y = curr_y - t_step*s_stepY*yv[Idx]; if(last_x < 1.5) last_x = 1.5; if(last_x > 637.5) last_x = 637.5; if(last_y < 1.5) last_y = 1.5; if(last_y > 477.5) last_y = 477.5; // Bilinear Interpolation float xDiff = last_x - (int)last_x; float yDiff = last_y - (int)last_y; int LeftTopX = (int)last_x; int LeftTopY = (int)last_y; int LeftTopIdx = LeftTopY * blockDim.x + LeftTopX; new_data[Idx] = (xDiff*yDiff)*old_data[LeftTopIdx+blockDim.x+1] +(xDiff*(1.f-yDiff))*old_data[LeftTopIdx+1] +((1.f-xDiff)*yDiff)*old_data[LeftTopIdx+blockDim.x] +((1.f-xDiff)*(1.f-yDiff))*old_data[LeftTopIdx]; }
58fc49c3967798424bfa55ac95add30bc0ead5b6.hip
// !!! This is a file automatically generated by hipify!!! #include <cutil_inline.h> #include <hip/hip_runtime_api.h> #include <stdio.h> #include <string.h> #include <errno.h> #include "gSTREAM.h" /* include cipher kernel function cu file */ #include "Rabbit_kernel.cu" void gSTREAM_init(gSTREAM_ctx* ctx, int device, int nr_threads, int nr_blocks){ hipDeviceProp_t deviceProp; int nr_streams=nr_threads*nr_blocks; /* set device */ hipGetDeviceProperties(&deviceProp, device); hipSetDevice(device); debug("\nUsing device %d: \"%s\"\n", device, deviceProp.name); cutilSafeCall(hipSetDeviceFlags(hipDeviceMapHost)); ctx->nr_threads = nr_threads; ctx->nr_blocks = nr_blocks; ctx->allocated_keys=0; ctx->allocated_ivs=0; ctx->allocated_buff=0; cutilCheckError(cutCreateTimer(&(ctx->bench.timer))); /* allocate cipher state */ Rabbit_ctx *rctx=&ctx->rctx; cutilSafeCall(hipMalloc((void**)&(rctx->x_d),nr_streams*8*sizeof(u32))); cutilSafeCall(hipMalloc((void**)&(rctx->c_d),nr_streams*8*sizeof(u32))); cutilSafeCall(hipMalloc((void**)&(rctx->carry_d),nr_streams*sizeof(u32))); } void gSTREAM_exit(gSTREAM_ctx* ctx) { if(ctx->allocated_keys) { cutilSafeCall(hipFree(ctx->keys_d)); } if(ctx->allocated_ivs) { cutilSafeCall(hipFree(ctx->ivs_d)); } if(ctx->allocated_buff) { cutilSafeCall(hipHostFree(ctx->buff_h)); } cutilCheckError(cutDeleteTimer(ctx->bench.timer)); /* free cipher state */ Rabbit_ctx *rctx=&ctx->rctx; cutilSafeCall(hipFree(rctx->x_d)); cutilSafeCall(hipFree(rctx->c_d)); cutilSafeCall(hipFree(rctx->carry_d)); } void gSTREAM_keysetup(gSTREAM_ctx* ctx, u8* keys, u32 keysize, u32 ivsize) { size_t keys_size; int nr_streams=ctx->nr_threads*ctx->nr_blocks; u32* keys_h=NULL; size_t key_size_bytes=sizeof(u8)*(((keysize-1)/(sizeof(u8)*8))+1); size_t key_size_nrwords=(((keysize-1)/(sizeof(u32)*8))+1); ctx->key_size=keysize; ctx->iv_size=ivsize; /* allocate keys */ keys_size=nr_streams*sizeof(u32)*(((keysize-1)/(sizeof(u32)*8))+1); cutilSafeCall(hipMalloc((void**)&(ctx->keys_d),keys_size)); ctx->allocated_keys=1; if(!(keys_h=(u32*)malloc(keys_size))) { fprintf(stderr,"Could not allocate keys_h: %s\n",strerror(errno)); exit(-1); } /* copy byte-aligned keys to word-stream-aligned keys */ { u32 *curr_key; u8* tmp_keys=keys; /* allocate a current working key */ if(!(curr_key=(u32*)malloc(sizeof(u32)*key_size_nrwords))) { fprintf(stderr,"Could not allocate curr_key: %s\n",strerror(errno)); exit(-1); } memset(curr_key,0x00,sizeof(u32)*key_size_nrwords); for(int i=0;i<nr_streams;i++) { /* copy one of the keys to current key */ memcpy(curr_key,tmp_keys,key_size_bytes); tmp_keys+=key_size_bytes; /* copy current key to stream-aligned one */ for(int j=0;j<key_size_nrwords;j++) { keys_h[j*nr_streams+i]=CH_ENDIANESS32(curr_key[j]); } } free(curr_key); } /* Copy keys to device and free them from host */ cutilSafeCall(hipMemcpy(ctx->keys_d,keys_h,keys_size, hipMemcpyHostToDevice)); free(keys_h); Rabbit_ctx *rctx=&ctx->rctx; hipLaunchKernelGGL(( Rabbit_keysetup), dim3(ctx->nr_blocks),dim3(ctx->nr_threads), 0, 0, rctx->x_d ,rctx->c_d ,rctx->carry_d ,ctx->keys_d ,ctx->key_size); cutilCheckMsg("Kernel execution failed"); hipDeviceSynchronize(); #if 0 {//print state, each colum corresponds to a different stream u32 *x_h, *c_h,*carry_h; if(!(x_h=(u32*)malloc(nr_streams*8*sizeof(u32)))) { fprintf(stderr, "Failed to allocate c_h: %s\n",strerror(errno)); exit(-1); } if(!(c_h=(u32*)malloc(nr_streams*8*sizeof(u32)))) { fprintf(stderr, "Failed to allocate c_h: %s\n",strerror(errno)); exit(-1); } if(!(carry_h=(u32*)malloc(nr_streams*sizeof(u32)))) { fprintf(stderr, "Failed to allocate carry_h: %s\n",strerror(errno)); exit(-1); } cutilSafeCall(hipMemcpy(x_h,rctx->x_d,(nr_streams*8*sizeof(u32)), hipMemcpyDeviceToHost)); cutilSafeCall(hipMemcpy(c_h,rctx->c_d,(nr_streams*8*sizeof(u32)), hipMemcpyDeviceToHost)); cutilSafeCall(hipMemcpy(carry_h,rctx->carry_d,(nr_streams*sizeof(u32)), hipMemcpyDeviceToHost)); for(int i=0;i<nr_streams*8;i++) { printf("[0x%08x:0x%08x], ",x_h[i],c_h[i]); if(!((i+1)%nr_streams)) { printf("\n"); } } for(int i=0;i<nr_streams;i++) { printf("[ 0x%08x ], ",carry_h[i]); } free(x_h); free(c_h); free(carry_h); } #endif } void gSTREAM_ivsetup(gSTREAM_ctx* ctx, u8* ivs) { int nr_streams=ctx->nr_threads*ctx->nr_blocks; /* initialize the registers to all zeros */ if(ctx->iv_size>0) { u8* tmp_ivs=ivs; u32* ivs_h=NULL; size_t ivs_size= nr_streams*sizeof(u32)*(((ctx->iv_size-1)/(sizeof(u32)*8))+1); u32 *curr_iv; size_t iv_size_bytes=sizeof(u8)*(((ctx->iv_size-1)/(sizeof(u8)*8))+1); size_t iv_size_nrwords=(((ctx->iv_size-1)/(sizeof(u32)*8))+1); cutilSafeCall(hipMalloc((void**)&(ctx->ivs_d),ivs_size)); ctx->allocated_ivs=1; if(!(ivs_h=(u32*)malloc(ivs_size))) { fprintf(stderr,"Could not allocate ivs_h: %s\n",strerror(errno)); exit(-1); } /* allocate a current working iv */ if(!(curr_iv=(u32*)malloc(sizeof(u32)*iv_size_nrwords))) { fprintf(stderr,"Could not allocate curr_iv: %s\n",strerror(errno)); exit(-1); } memset(curr_iv,0x00,sizeof(u32)*iv_size_nrwords); for(int i=0;i<nr_streams;i++) { /* copy one of the ivs to current iv */ memcpy(curr_iv,tmp_ivs,iv_size_bytes); tmp_ivs+=iv_size_bytes; /* copy current iv to stream-aligned one */ for(int j=0;j<iv_size_nrwords;j++) { ivs_h[j*nr_streams+i]=CH_ENDIANESS32(curr_iv[j]); } } free(curr_iv); /* Copy ivs to device and free them from host */ cutilSafeCall(hipMemcpy(ctx->ivs_d,ivs_h,ivs_size, hipMemcpyHostToDevice)); free(ivs_h); } /* Load in iv, key and preclock */ Rabbit_ctx *rctx=&ctx->rctx; hipLaunchKernelGGL(( Rabbit_ivsetup), dim3(ctx->nr_blocks),dim3(ctx->nr_threads), 0, 0, rctx->x_d ,rctx->c_d ,rctx->carry_d ,ctx->ivs_d ,ctx->iv_size); cutilCheckMsg("Kernel execution failed"); hipDeviceSynchronize(); #if 0 {//print state, each colum corresponds to a different stream printf("\nafter ivsetup:\n"); u32 *x_h, *c_h,*carry_h; if(!(x_h=(u32*)malloc(nr_streams*8*sizeof(u32)))) { fprintf(stderr, "Failed to allocate c_h: %s\n",strerror(errno)); exit(-1); } if(!(c_h=(u32*)malloc(nr_streams*8*sizeof(u32)))) { fprintf(stderr, "Failed to allocate c_h: %s\n",strerror(errno)); exit(-1); } if(!(carry_h=(u32*)malloc(nr_streams*sizeof(u32)))) { fprintf(stderr, "Failed to allocate carry_h: %s\n",strerror(errno)); exit(-1); } cutilSafeCall(hipMemcpy(x_h,rctx->x_d,(nr_streams*8*sizeof(u32)), hipMemcpyDeviceToHost)); cutilSafeCall(hipMemcpy(c_h,rctx->c_d,(nr_streams*8*sizeof(u32)), hipMemcpyDeviceToHost)); cutilSafeCall(hipMemcpy(carry_h,rctx->carry_d,(nr_streams*sizeof(u32)), hipMemcpyDeviceToHost)); for(int i=0;i<nr_streams*8;i++) { printf("[0x%08x:0x%08x], ",x_h[i],c_h[i]); if(!((i+1)%nr_streams)) { printf("\n"); } } for(int i=0;i<nr_streams;i++) { printf("[ 0x%08x ], ",carry_h[i]); } free(x_h); free(c_h); free(carry_h); } #endif } void gSTREAM_keystream_bytes(gSTREAM_ctx* ctx, u8* keystreams, u32 length) { gSTREAM_process_bytes(GEN_KEYSTREAM,ctx,NULL,keystreams,length); } void gSTREAM_process_bytes(gSTREAM_action action, gSTREAM_ctx* ctx, u8* inputs, u8* outputs, u32 length) { int nr_streams=ctx->nr_blocks*ctx->nr_threads; size_t length_nr_words=(((length-1)/(sizeof(u32)))+1); size_t buff_size=nr_streams*length_nr_words*sizeof(u32); u32* tmp_buffer; /* allocate buffer */ if((!ctx->allocated_buff)||((length_nr_words*sizeof(u32))>ctx->buff_size)) { if(ctx->allocated_buff) { free(ctx->buff_h); //alocate a large buffer } cutilSafeCall(hipHostMalloc((void**)&(ctx->buff_h),buff_size, hipHostMallocMapped)); cutilSafeCall(hipHostGetDevicePointer((void **)&(ctx->buff_d), ctx->buff_h,0)); ctx->allocated_buff=1; ctx->buff_size=length_nr_words*sizeof(u32); } /* allocate a current working buffer */ if(!(tmp_buffer=(u32*)malloc(sizeof(u32)*length_nr_words))) { fprintf(stderr,"Could not allocate tmp_buffer: %s\n",strerror(errno)); exit(-1); } if(action!=GEN_KEYSTREAM) { for(int i=0;i<nr_streams;i++) { /* copy one of the inputs to current working buffer */ memcpy(tmp_buffer,inputs,length); inputs+=length; /* copy current iv to stream-aligned one */ for(int j=0;j<length_nr_words;j++) { ctx->buff_h[j*nr_streams+i]=CH_ENDIANESS32(tmp_buffer[j]); } } } /* process bytes */ Rabbit_ctx *rctx=&ctx->rctx; cutilCheckError(cutStartTimer(ctx->bench.timer)); hipLaunchKernelGGL(( Rabbit_process_bytes), dim3(ctx->nr_blocks),dim3(ctx->nr_threads), 0, 0, action ,rctx->x_d ,rctx->c_d ,rctx->carry_d ,ctx->buff_d ,length_nr_words); cutilCheckMsg("Kernel execution failed"); hipDeviceSynchronize(); cutilCheckError(cutStopTimer(ctx->bench.timer)); /* copy from working buffer to output buffer */ for(int i=0;i<nr_streams;i++) { /* copy one of the keystreams to current keystream */ for(int j=0;j<length_nr_words;j++) { tmp_buffer[j]=ctx->buff_h[i+j*nr_streams]; } memcpy(outputs,tmp_buffer,length); outputs+=length; } free(tmp_buffer); } double gSTREAM_getTimerValue(gSTREAM_ctx* ctx) { return cutGetTimerValue(ctx->bench.timer); }
58fc49c3967798424bfa55ac95add30bc0ead5b6.cu
#include <cutil_inline.h> #include <cuda_runtime_api.h> #include <stdio.h> #include <string.h> #include <errno.h> #include "gSTREAM.h" /* include cipher kernel function cu file */ #include "Rabbit_kernel.cu" void gSTREAM_init(gSTREAM_ctx* ctx, int device, int nr_threads, int nr_blocks){ cudaDeviceProp deviceProp; int nr_streams=nr_threads*nr_blocks; /* set device */ cudaGetDeviceProperties(&deviceProp, device); cudaSetDevice(device); debug("\nUsing device %d: \"%s\"\n", device, deviceProp.name); cutilSafeCall(cudaSetDeviceFlags(cudaDeviceMapHost)); ctx->nr_threads = nr_threads; ctx->nr_blocks = nr_blocks; ctx->allocated_keys=0; ctx->allocated_ivs=0; ctx->allocated_buff=0; cutilCheckError(cutCreateTimer(&(ctx->bench.timer))); /* allocate cipher state */ Rabbit_ctx *rctx=&ctx->rctx; cutilSafeCall(cudaMalloc((void**)&(rctx->x_d),nr_streams*8*sizeof(u32))); cutilSafeCall(cudaMalloc((void**)&(rctx->c_d),nr_streams*8*sizeof(u32))); cutilSafeCall(cudaMalloc((void**)&(rctx->carry_d),nr_streams*sizeof(u32))); } void gSTREAM_exit(gSTREAM_ctx* ctx) { if(ctx->allocated_keys) { cutilSafeCall(cudaFree(ctx->keys_d)); } if(ctx->allocated_ivs) { cutilSafeCall(cudaFree(ctx->ivs_d)); } if(ctx->allocated_buff) { cutilSafeCall(cudaFreeHost(ctx->buff_h)); } cutilCheckError(cutDeleteTimer(ctx->bench.timer)); /* free cipher state */ Rabbit_ctx *rctx=&ctx->rctx; cutilSafeCall(cudaFree(rctx->x_d)); cutilSafeCall(cudaFree(rctx->c_d)); cutilSafeCall(cudaFree(rctx->carry_d)); } void gSTREAM_keysetup(gSTREAM_ctx* ctx, u8* keys, u32 keysize, u32 ivsize) { size_t keys_size; int nr_streams=ctx->nr_threads*ctx->nr_blocks; u32* keys_h=NULL; size_t key_size_bytes=sizeof(u8)*(((keysize-1)/(sizeof(u8)*8))+1); size_t key_size_nrwords=(((keysize-1)/(sizeof(u32)*8))+1); ctx->key_size=keysize; ctx->iv_size=ivsize; /* allocate keys */ keys_size=nr_streams*sizeof(u32)*(((keysize-1)/(sizeof(u32)*8))+1); cutilSafeCall(cudaMalloc((void**)&(ctx->keys_d),keys_size)); ctx->allocated_keys=1; if(!(keys_h=(u32*)malloc(keys_size))) { fprintf(stderr,"Could not allocate keys_h: %s\n",strerror(errno)); exit(-1); } /* copy byte-aligned keys to word-stream-aligned keys */ { u32 *curr_key; u8* tmp_keys=keys; /* allocate a current working key */ if(!(curr_key=(u32*)malloc(sizeof(u32)*key_size_nrwords))) { fprintf(stderr,"Could not allocate curr_key: %s\n",strerror(errno)); exit(-1); } memset(curr_key,0x00,sizeof(u32)*key_size_nrwords); for(int i=0;i<nr_streams;i++) { /* copy one of the keys to current key */ memcpy(curr_key,tmp_keys,key_size_bytes); tmp_keys+=key_size_bytes; /* copy current key to stream-aligned one */ for(int j=0;j<key_size_nrwords;j++) { keys_h[j*nr_streams+i]=CH_ENDIANESS32(curr_key[j]); } } free(curr_key); } /* Copy keys to device and free them from host */ cutilSafeCall(cudaMemcpy(ctx->keys_d,keys_h,keys_size, cudaMemcpyHostToDevice)); free(keys_h); Rabbit_ctx *rctx=&ctx->rctx; Rabbit_keysetup<<<ctx->nr_blocks,ctx->nr_threads>>>(rctx->x_d ,rctx->c_d ,rctx->carry_d ,ctx->keys_d ,ctx->key_size); cutilCheckMsg("Kernel execution failed"); cudaThreadSynchronize(); #if 0 {//print state, each colum corresponds to a different stream u32 *x_h, *c_h,*carry_h; if(!(x_h=(u32*)malloc(nr_streams*8*sizeof(u32)))) { fprintf(stderr, "Failed to allocate c_h: %s\n",strerror(errno)); exit(-1); } if(!(c_h=(u32*)malloc(nr_streams*8*sizeof(u32)))) { fprintf(stderr, "Failed to allocate c_h: %s\n",strerror(errno)); exit(-1); } if(!(carry_h=(u32*)malloc(nr_streams*sizeof(u32)))) { fprintf(stderr, "Failed to allocate carry_h: %s\n",strerror(errno)); exit(-1); } cutilSafeCall(cudaMemcpy(x_h,rctx->x_d,(nr_streams*8*sizeof(u32)), cudaMemcpyDeviceToHost)); cutilSafeCall(cudaMemcpy(c_h,rctx->c_d,(nr_streams*8*sizeof(u32)), cudaMemcpyDeviceToHost)); cutilSafeCall(cudaMemcpy(carry_h,rctx->carry_d,(nr_streams*sizeof(u32)), cudaMemcpyDeviceToHost)); for(int i=0;i<nr_streams*8;i++) { printf("[0x%08x:0x%08x], ",x_h[i],c_h[i]); if(!((i+1)%nr_streams)) { printf("\n"); } } for(int i=0;i<nr_streams;i++) { printf("[ 0x%08x ], ",carry_h[i]); } free(x_h); free(c_h); free(carry_h); } #endif } void gSTREAM_ivsetup(gSTREAM_ctx* ctx, u8* ivs) { int nr_streams=ctx->nr_threads*ctx->nr_blocks; /* initialize the registers to all zeros */ if(ctx->iv_size>0) { u8* tmp_ivs=ivs; u32* ivs_h=NULL; size_t ivs_size= nr_streams*sizeof(u32)*(((ctx->iv_size-1)/(sizeof(u32)*8))+1); u32 *curr_iv; size_t iv_size_bytes=sizeof(u8)*(((ctx->iv_size-1)/(sizeof(u8)*8))+1); size_t iv_size_nrwords=(((ctx->iv_size-1)/(sizeof(u32)*8))+1); cutilSafeCall(cudaMalloc((void**)&(ctx->ivs_d),ivs_size)); ctx->allocated_ivs=1; if(!(ivs_h=(u32*)malloc(ivs_size))) { fprintf(stderr,"Could not allocate ivs_h: %s\n",strerror(errno)); exit(-1); } /* allocate a current working iv */ if(!(curr_iv=(u32*)malloc(sizeof(u32)*iv_size_nrwords))) { fprintf(stderr,"Could not allocate curr_iv: %s\n",strerror(errno)); exit(-1); } memset(curr_iv,0x00,sizeof(u32)*iv_size_nrwords); for(int i=0;i<nr_streams;i++) { /* copy one of the ivs to current iv */ memcpy(curr_iv,tmp_ivs,iv_size_bytes); tmp_ivs+=iv_size_bytes; /* copy current iv to stream-aligned one */ for(int j=0;j<iv_size_nrwords;j++) { ivs_h[j*nr_streams+i]=CH_ENDIANESS32(curr_iv[j]); } } free(curr_iv); /* Copy ivs to device and free them from host */ cutilSafeCall(cudaMemcpy(ctx->ivs_d,ivs_h,ivs_size, cudaMemcpyHostToDevice)); free(ivs_h); } /* Load in iv, key and preclock */ Rabbit_ctx *rctx=&ctx->rctx; Rabbit_ivsetup<<<ctx->nr_blocks,ctx->nr_threads>>>(rctx->x_d ,rctx->c_d ,rctx->carry_d ,ctx->ivs_d ,ctx->iv_size); cutilCheckMsg("Kernel execution failed"); cudaThreadSynchronize(); #if 0 {//print state, each colum corresponds to a different stream printf("\nafter ivsetup:\n"); u32 *x_h, *c_h,*carry_h; if(!(x_h=(u32*)malloc(nr_streams*8*sizeof(u32)))) { fprintf(stderr, "Failed to allocate c_h: %s\n",strerror(errno)); exit(-1); } if(!(c_h=(u32*)malloc(nr_streams*8*sizeof(u32)))) { fprintf(stderr, "Failed to allocate c_h: %s\n",strerror(errno)); exit(-1); } if(!(carry_h=(u32*)malloc(nr_streams*sizeof(u32)))) { fprintf(stderr, "Failed to allocate carry_h: %s\n",strerror(errno)); exit(-1); } cutilSafeCall(cudaMemcpy(x_h,rctx->x_d,(nr_streams*8*sizeof(u32)), cudaMemcpyDeviceToHost)); cutilSafeCall(cudaMemcpy(c_h,rctx->c_d,(nr_streams*8*sizeof(u32)), cudaMemcpyDeviceToHost)); cutilSafeCall(cudaMemcpy(carry_h,rctx->carry_d,(nr_streams*sizeof(u32)), cudaMemcpyDeviceToHost)); for(int i=0;i<nr_streams*8;i++) { printf("[0x%08x:0x%08x], ",x_h[i],c_h[i]); if(!((i+1)%nr_streams)) { printf("\n"); } } for(int i=0;i<nr_streams;i++) { printf("[ 0x%08x ], ",carry_h[i]); } free(x_h); free(c_h); free(carry_h); } #endif } void gSTREAM_keystream_bytes(gSTREAM_ctx* ctx, u8* keystreams, u32 length) { gSTREAM_process_bytes(GEN_KEYSTREAM,ctx,NULL,keystreams,length); } void gSTREAM_process_bytes(gSTREAM_action action, gSTREAM_ctx* ctx, u8* inputs, u8* outputs, u32 length) { int nr_streams=ctx->nr_blocks*ctx->nr_threads; size_t length_nr_words=(((length-1)/(sizeof(u32)))+1); size_t buff_size=nr_streams*length_nr_words*sizeof(u32); u32* tmp_buffer; /* allocate buffer */ if((!ctx->allocated_buff)||((length_nr_words*sizeof(u32))>ctx->buff_size)) { if(ctx->allocated_buff) { free(ctx->buff_h); //alocate a large buffer } cutilSafeCall(cudaHostAlloc((void**)&(ctx->buff_h),buff_size, cudaHostAllocMapped)); cutilSafeCall(cudaHostGetDevicePointer((void **)&(ctx->buff_d), ctx->buff_h,0)); ctx->allocated_buff=1; ctx->buff_size=length_nr_words*sizeof(u32); } /* allocate a current working buffer */ if(!(tmp_buffer=(u32*)malloc(sizeof(u32)*length_nr_words))) { fprintf(stderr,"Could not allocate tmp_buffer: %s\n",strerror(errno)); exit(-1); } if(action!=GEN_KEYSTREAM) { for(int i=0;i<nr_streams;i++) { /* copy one of the inputs to current working buffer */ memcpy(tmp_buffer,inputs,length); inputs+=length; /* copy current iv to stream-aligned one */ for(int j=0;j<length_nr_words;j++) { ctx->buff_h[j*nr_streams+i]=CH_ENDIANESS32(tmp_buffer[j]); } } } /* process bytes */ Rabbit_ctx *rctx=&ctx->rctx; cutilCheckError(cutStartTimer(ctx->bench.timer)); Rabbit_process_bytes<<<ctx->nr_blocks,ctx->nr_threads>>>(action ,rctx->x_d ,rctx->c_d ,rctx->carry_d ,ctx->buff_d ,length_nr_words); cutilCheckMsg("Kernel execution failed"); cudaThreadSynchronize(); cutilCheckError(cutStopTimer(ctx->bench.timer)); /* copy from working buffer to output buffer */ for(int i=0;i<nr_streams;i++) { /* copy one of the keystreams to current keystream */ for(int j=0;j<length_nr_words;j++) { tmp_buffer[j]=ctx->buff_h[i+j*nr_streams]; } memcpy(outputs,tmp_buffer,length); outputs+=length; } free(tmp_buffer); } double gSTREAM_getTimerValue(gSTREAM_ctx* ctx) { return cutGetTimerValue(ctx->bench.timer); }
e42cc35855d3236fdd0d9e0f4c4be134949fb513.hip
// !!! This is a file automatically generated by hipify!!! /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "column_filter.h" #ifndef OPENCV_TINY_GPU_MODULE namespace filter { template void linearColumn<float3, short3>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, hipStream_t stream); } #endif #endif /* CUDA_DISABLER */
e42cc35855d3236fdd0d9e0f4c4be134949fb513.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "column_filter.h" #ifndef OPENCV_TINY_GPU_MODULE namespace filter { template void linearColumn<float3, short3>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, cudaStream_t stream); } #endif #endif /* CUDA_DISABLER */
c23352f79cdebf4982e679397236465708ec287c.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <cpp/dev_random.cpp> #include <tclap/CmdLine.h> #include <itpp/itbase.h> #include <itpp/stat/histogram.h> #include "cpp/RMT.cpp" #include <cpp/itpp_ext_math.cpp> #include <cpp/spinchain.cpp> #include <itpp/stat/misc_stat.h> #include <fstream> #include <hip/hip_runtime.h> #include "hip_functions.hip" #include "hip_utils.hip" #include "ev_routines.cu" #include "cfp_routines.cu" //using namespace std; //using namespace itpp; //using namespace itppextmath; //using namespace cfpmath; //using namespace spinchain; TCLAP::CmdLine cmd("Command description message", ' ', "0.1"); TCLAP::ValueArg<string> optionArg("o","option", "Option" ,false,"normalito", "string",cmd); TCLAP::ValueArg<string> optionArg2("","option2", "Option2" ,false,"fidelity", "string",cmd); TCLAP::ValueArg<unsigned int> seed("s","seed", "Random seed [0 for urandom]",false, 243243,"unsigned int",cmd); TCLAP::ValueArg<int> qubits("q","qubits", "number of qubits",false, 4,"int",cmd); TCLAP::ValueArg<double> J("J","ising_coupling", "Ising interaction in the z-direction",false, 1.0,"double",cmd); TCLAP::ValueArg<double> bx("","bx", "Magnetic field in x direction",false, 1.4,"double",cmd); TCLAP::ValueArg<double> by("","by", "Magnetic field in y direction",false, 0.,"double",cmd); TCLAP::ValueArg<double> bz("","bz", "Magnetic field in z direction",false, 1.4,"double",cmd); TCLAP::ValueArg<double> theta("","theta", "polar angle",false, 1.0,"double",cmd); TCLAP::ValueArg<double> phi("","phi", "azimultal angle",false, 1.0,"double",cmd); TCLAP::ValueArg<double> deltabx("","deltabx", "perturbation",false, 0.1,"double",cmd); TCLAP::ValueArg<int> steps("","steps","steps",false, 100,"int",cmd); TCLAP::ValueArg<double> Jpert("","Jpert","Perturbation on Ising",false, 0.0,"double",cmd); TCLAP::ValueArg<int> dev("","dev", "Gpu to be used, 0 for c20, 1 para la jodida",false, 0,"int",cmd); int main(int argc, char* argv[]) { cmd.parse( argc, argv ); cout.precision(17); hipSetDevice(dev.getValue()); // {{{ Set seed for random unsigned int semilla=seed.getValue(); if (semilla == 0){ Random semilla_uran; semilla=semilla_uran.strong(); } itpp::RNG_reset(semilla); // }}} itpp::vec b(3), bpert(3), bzeros(3); b(0)=bx.getValue(); b(1)=by.getValue(); b(2)=bz.getValue(); bzeros=b-b; bpert=b; bpert(0)=b(0)+deltabx.getValue(); string option=optionArg.getValue(); string option2=optionArg2.getValue(); itpp::cvec state, staterev, qustate; //ofstream fidelity; //fidelity.open("fidelity.dat"); //qustate=RandomState(64); //int dim=pow_2(qubits.getValue()); qustate=itppextmath::BlochToQubit(theta.getValue(),phi.getValue()); //qustate=RandomState(2); //for(int i=0; i<qubits.getValue()+1;i++){ //list(i)=qustate; //} if(option=="normalito") state=itppextmath::TensorPow(qustate,qubits.getValue()); if(option=="randU") state=RMT::RandomCUE(pow(2, qubits.getValue()))*itppextmath::TensorPow(qustate,qubits.getValue()); if(option=="klimov") state=itppextmath::TensorProduct(itppextmath::TensorProduct(itppextmath::TensorPow(qustate,3),itppextmath::sigma(1)*qustate),itppextmath::TensorPow(qustate,qubits.getValue()-4)); if(option=="klimovy") state=itppextmath::TensorProduct(itppextmath::TensorProduct(itppextmath::TensorPow(qustate,3),itppextmath::sigma(2)*qustate),itppextmath::TensorPow(qustate,qubits.getValue()-4)); if(option=="klimov2") state=itppextmath::TensorProduct(itppextmath::TensorProduct(itppextmath::TensorPow(qustate,2),itppextmath::TensorPow(itppextmath::sigma(1)*qustate,2)),itppextmath::TensorPow(qustate,qubits.getValue()-4)); //cout<< qustate ; staterev=state; double Jrev=J.getValue()+Jpert.getValue(); if(option2=="fidelity"){ itpp::vec list(steps.getValue()); for(int i=0;i<steps.getValue();i++){ list(i)=pow( abs( dot( conj(staterev),state)),2); //cout<< pow( abs( dot( conj(staterev),state)),2) <<endl; std::cout << list(i) <<endl; // cout<< i<< " " << list(i) <<endl; list(i)=sqrt(list(i)); itppcuda::apply_floquet(state, J.getValue(), b); itppcuda::apply_floquet(staterev, Jrev, bpert); //cout<<abs(dot(conj(staterev),state))<<endl; //fidelity<<pow(abs(dot(conj(staterev),state)),2)<<endl; } //fidelity.close(); //cout << staterev; std::cout<< itppextmath::sum_positive_derivatives(list)<< endl; } if(option2=="correlacion"){ itpp::cvec list(steps.getValue()); itpp::cvec init=state; for(int i=0;i<steps.getValue();i++){ list(i)=dot(conj(init),state); std::cout << real(list(i)) << " " << imag(list(i)) <<endl; //cout << list <<endl; itppcuda::apply_floquet(state, J.getValue(), b); } } if(option2=="fidelityandipr"){ itpp::vec listfidel(steps.getValue()); itpp::cvec listcorr(steps.getValue()); itpp::cvec init=state; for(int i=0;i<steps.getValue();i++){ listfidel(i)=pow( abs( dot( conj(staterev),state)),2); listcorr(i)=pow(abs(dot(conj(init),state)),2); //cout<< pow( abs( dot( conj(staterev),state)),2) <<endl; std::cout << listfidel(i) <<endl; // cout<< i<< " " << list(i) <<endl; listfidel(i)=sqrt(listfidel(i)); itppcuda::apply_floquet(state, J.getValue(), b); itppcuda::apply_floquet(staterev, J.getValue(), b); itppcuda::apply_floquet(staterev, Jpert.getValue(), bzeros); //cout<<abs(dot(conj(staterev),state))<<endl; //fidelity<<pow(abs(dot(conj(staterev),state)),2)<<endl; } //fidelity.close(); //cout << staterev; cout<< itppextmath::sum_positive_derivatives(listfidel)<< endl; cout<< real(mean(listcorr))<< endl; } }
c23352f79cdebf4982e679397236465708ec287c.cu
#include <iostream> #include <cpp/dev_random.cpp> #include <tclap/CmdLine.h> #include <itpp/itbase.h> #include <itpp/stat/histogram.h> #include "cpp/RMT.cpp" #include <cpp/itpp_ext_math.cpp> #include <cpp/spinchain.cpp> #include <itpp/stat/misc_stat.h> #include <fstream> #include <cuda.h> #include "cuda_functions.cu" #include "cuda_utils.cu" #include "ev_routines.cu" #include "cfp_routines.cu" //using namespace std; //using namespace itpp; //using namespace itppextmath; //using namespace cfpmath; //using namespace spinchain; TCLAP::CmdLine cmd("Command description message", ' ', "0.1"); TCLAP::ValueArg<string> optionArg("o","option", "Option" ,false,"normalito", "string",cmd); TCLAP::ValueArg<string> optionArg2("","option2", "Option2" ,false,"fidelity", "string",cmd); TCLAP::ValueArg<unsigned int> seed("s","seed", "Random seed [0 for urandom]",false, 243243,"unsigned int",cmd); TCLAP::ValueArg<int> qubits("q","qubits", "number of qubits",false, 4,"int",cmd); TCLAP::ValueArg<double> J("J","ising_coupling", "Ising interaction in the z-direction",false, 1.0,"double",cmd); TCLAP::ValueArg<double> bx("","bx", "Magnetic field in x direction",false, 1.4,"double",cmd); TCLAP::ValueArg<double> by("","by", "Magnetic field in y direction",false, 0.,"double",cmd); TCLAP::ValueArg<double> bz("","bz", "Magnetic field in z direction",false, 1.4,"double",cmd); TCLAP::ValueArg<double> theta("","theta", "polar angle",false, 1.0,"double",cmd); TCLAP::ValueArg<double> phi("","phi", "azimultal angle",false, 1.0,"double",cmd); TCLAP::ValueArg<double> deltabx("","deltabx", "perturbation",false, 0.1,"double",cmd); TCLAP::ValueArg<int> steps("","steps","steps",false, 100,"int",cmd); TCLAP::ValueArg<double> Jpert("","Jpert","Perturbation on Ising",false, 0.0,"double",cmd); TCLAP::ValueArg<int> dev("","dev", "Gpu to be used, 0 for c20, 1 para la jodida",false, 0,"int",cmd); int main(int argc, char* argv[]) { cmd.parse( argc, argv ); cout.precision(17); cudaSetDevice(dev.getValue()); // {{{ Set seed for random unsigned int semilla=seed.getValue(); if (semilla == 0){ Random semilla_uran; semilla=semilla_uran.strong(); } itpp::RNG_reset(semilla); // }}} itpp::vec b(3), bpert(3), bzeros(3); b(0)=bx.getValue(); b(1)=by.getValue(); b(2)=bz.getValue(); bzeros=b-b; bpert=b; bpert(0)=b(0)+deltabx.getValue(); string option=optionArg.getValue(); string option2=optionArg2.getValue(); itpp::cvec state, staterev, qustate; //ofstream fidelity; //fidelity.open("fidelity.dat"); //qustate=RandomState(64); //int dim=pow_2(qubits.getValue()); qustate=itppextmath::BlochToQubit(theta.getValue(),phi.getValue()); //qustate=RandomState(2); //for(int i=0; i<qubits.getValue()+1;i++){ //list(i)=qustate; //} if(option=="normalito") state=itppextmath::TensorPow(qustate,qubits.getValue()); if(option=="randU") state=RMT::RandomCUE(pow(2, qubits.getValue()))*itppextmath::TensorPow(qustate,qubits.getValue()); if(option=="klimov") state=itppextmath::TensorProduct(itppextmath::TensorProduct(itppextmath::TensorPow(qustate,3),itppextmath::sigma(1)*qustate),itppextmath::TensorPow(qustate,qubits.getValue()-4)); if(option=="klimovy") state=itppextmath::TensorProduct(itppextmath::TensorProduct(itppextmath::TensorPow(qustate,3),itppextmath::sigma(2)*qustate),itppextmath::TensorPow(qustate,qubits.getValue()-4)); if(option=="klimov2") state=itppextmath::TensorProduct(itppextmath::TensorProduct(itppextmath::TensorPow(qustate,2),itppextmath::TensorPow(itppextmath::sigma(1)*qustate,2)),itppextmath::TensorPow(qustate,qubits.getValue()-4)); //cout<< qustate ; staterev=state; double Jrev=J.getValue()+Jpert.getValue(); if(option2=="fidelity"){ itpp::vec list(steps.getValue()); for(int i=0;i<steps.getValue();i++){ list(i)=pow( abs( dot( conj(staterev),state)),2); //cout<< pow( abs( dot( conj(staterev),state)),2) <<endl; std::cout << list(i) <<endl; // cout<< i<< " " << list(i) <<endl; list(i)=sqrt(list(i)); itppcuda::apply_floquet(state, J.getValue(), b); itppcuda::apply_floquet(staterev, Jrev, bpert); //cout<<abs(dot(conj(staterev),state))<<endl; //fidelity<<pow(abs(dot(conj(staterev),state)),2)<<endl; } //fidelity.close(); //cout << staterev; std::cout<< itppextmath::sum_positive_derivatives(list)<< endl; } if(option2=="correlacion"){ itpp::cvec list(steps.getValue()); itpp::cvec init=state; for(int i=0;i<steps.getValue();i++){ list(i)=dot(conj(init),state); std::cout << real(list(i)) << " " << imag(list(i)) <<endl; //cout << list <<endl; itppcuda::apply_floquet(state, J.getValue(), b); } } if(option2=="fidelityandipr"){ itpp::vec listfidel(steps.getValue()); itpp::cvec listcorr(steps.getValue()); itpp::cvec init=state; for(int i=0;i<steps.getValue();i++){ listfidel(i)=pow( abs( dot( conj(staterev),state)),2); listcorr(i)=pow(abs(dot(conj(init),state)),2); //cout<< pow( abs( dot( conj(staterev),state)),2) <<endl; std::cout << listfidel(i) <<endl; // cout<< i<< " " << list(i) <<endl; listfidel(i)=sqrt(listfidel(i)); itppcuda::apply_floquet(state, J.getValue(), b); itppcuda::apply_floquet(staterev, J.getValue(), b); itppcuda::apply_floquet(staterev, Jpert.getValue(), bzeros); //cout<<abs(dot(conj(staterev),state))<<endl; //fidelity<<pow(abs(dot(conj(staterev),state)),2)<<endl; } //fidelity.close(); //cout << staterev; cout<< itppextmath::sum_positive_derivatives(listfidel)<< endl; cout<< real(mean(listcorr))<< endl; } }
dbb1f5e9d8b039f731c67a10e3c26a089a913828.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "scale.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *a = NULL; hipMalloc(&a, XSIZE*YSIZE); int size = XSIZE*YSIZE; int c = 2; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( scale), dim3(gridBlock),dim3(threadBlock), 0, 0, a,size,c); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( scale), dim3(gridBlock),dim3(threadBlock), 0, 0, a,size,c); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( scale), dim3(gridBlock),dim3(threadBlock), 0, 0, a,size,c); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
dbb1f5e9d8b039f731c67a10e3c26a089a913828.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "scale.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *a = NULL; cudaMalloc(&a, XSIZE*YSIZE); int size = XSIZE*YSIZE; int c = 2; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); scale<<<gridBlock,threadBlock>>>(a,size,c); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { scale<<<gridBlock,threadBlock>>>(a,size,c); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { scale<<<gridBlock,threadBlock>>>(a,size,c); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
35437f565be4aa12fc3e738f7950ffe9d1ee1b70.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <thrust/copy.h> #include <thrust/sort.h> #include <thrust/memory.h> #include <thrust/system/hip/memory.h> #include <new> // for std::bad_alloc #include <cassert> #include <iostream> #include <iterator> // This example demonstrates how to intercept calls to malloc // and free to implement a fallback for hipMalloc. // When hipMalloc fails to allocate device memory the fallback_allocator // attempts to allocate pinned host memory and then map the host buffer // into the device address space. The fallback_allocator enables // the GPU to process data sets that are larger than the device // memory, albeit with a significantly reduced performance. // initialize some unsorted data __global__ void kernel(int * d_ptr, size_t N) { size_t thread_id = blockDim.x * blockIdx.x + threadIdx.x; size_t grid_size = blockDim.x * gridDim.x; for (size_t i = thread_id; i < N; i += grid_size) d_ptr[i] = i % 1024; } // derive a simple allocator from cuda::dispatchable for using pinned host memory as a functional fallback struct fallback_allocator : thrust::hip::dispatchable<fallback_allocator> {}; // overload malloc on fallback_allocator to implement our special malloc // its job to is allocate host memory as a functional fallback when hipMalloc fails void *malloc(fallback_allocator, std::size_t n) { void *result = 0; // attempt to allocate device memory if(hipMalloc(&result, n) == hipSuccess) { std::cout << " allocated " << n << " bytes of device memory" << std::endl; } else { // attempt to allocate pinned host memory void *h_ptr = 0; if(hipHostMalloc(&h_ptr, n) == hipSuccess) { // attempt to map host pointer into device memory space if(hipHostGetDevicePointer(&result, h_ptr, 0) == hipSuccess) { std::cout << " allocated " << n << " bytes of pinned host memory (fallback successful)" << std::endl; } else { // attempt to deallocate buffer std::cout << " failed to map host memory into device address space (fallback failed)" << std::endl; hipHostFree(h_ptr); result = 0; } } else { std::cout << " failed to allocate " << n << " bytes of memory (fallback failed)" << std::endl; } } return result; } // overload free on fallback_allocator to implement our special free // its job to is inspect where the pointer lives and free it appropriately template<typename Pointer> void free(fallback_allocator, Pointer ptr) { void *raw_ptr = thrust::raw_pointer_cast(ptr); // determine where memory resides hipPointerAttribute_t attributes; if(hipPointerGetAttributes(&attributes, raw_ptr) == hipSuccess) { // free the memory in the appropriate way if(attributes.memoryType == hipMemoryTypeHost) { hipHostFree(raw_ptr); } else { hipFree(raw_ptr); } } } int main(void) { // check whether device supports mapped host memory int device; hipGetDevice(&device); hipDeviceProp_t properties; hipGetDeviceProperties(&properties, device); fallback_allocator alloc; if(!properties.canMapHostMemory) { std::cout << "Device #" << device << " [" << properties.name << "] does not support memory mapping" << std::endl; return 0; } else { std::cout << "Testing fallback_allocator on device #" << device << " [" << properties.name << "] with " << properties.totalGlobalMem << " bytes of device memory" << std::endl; } try { size_t one_million = 1 << 20; size_t one_billion = 1 << 30; for(size_t n = one_million; n < one_billion; n *= 2) { // TODO ideally we'd use the fallback_allocator in the vector too //thrust::hip::vector<int, fallback_allocator> d_vec(n); std::cout << "attempting to sort " << n << " values" << std::endl; // use our special malloc to allocate int *raw_ptr = (int *) malloc(alloc, n * sizeof(int)); if(raw_ptr) { hipLaunchKernelGGL(( kernel), dim3(100),dim3(256), 0, 0, raw_ptr, n); // generate unsorted values thrust::hip::pointer<int> begin = thrust::hip::pointer<int>(raw_ptr); thrust::hip::pointer<int> end = begin + n; // sort the data using our special allocator // if temporary memory is required during the sort, // our versions of malloc & free will be called thrust::sort(alloc, begin, end); free(alloc, raw_ptr); } } } catch(std::bad_alloc) { return 0; } return 0; }
35437f565be4aa12fc3e738f7950ffe9d1ee1b70.cu
#include <thrust/copy.h> #include <thrust/sort.h> #include <thrust/memory.h> #include <thrust/system/cuda/memory.h> #include <new> // for std::bad_alloc #include <cassert> #include <iostream> #include <iterator> // This example demonstrates how to intercept calls to malloc // and free to implement a fallback for cudaMalloc. // When cudaMalloc fails to allocate device memory the fallback_allocator // attempts to allocate pinned host memory and then map the host buffer // into the device address space. The fallback_allocator enables // the GPU to process data sets that are larger than the device // memory, albeit with a significantly reduced performance. // initialize some unsorted data __global__ void kernel(int * d_ptr, size_t N) { size_t thread_id = blockDim.x * blockIdx.x + threadIdx.x; size_t grid_size = blockDim.x * gridDim.x; for (size_t i = thread_id; i < N; i += grid_size) d_ptr[i] = i % 1024; } // derive a simple allocator from cuda::dispatchable for using pinned host memory as a functional fallback struct fallback_allocator : thrust::cuda::dispatchable<fallback_allocator> {}; // overload malloc on fallback_allocator to implement our special malloc // its job to is allocate host memory as a functional fallback when cudaMalloc fails void *malloc(fallback_allocator, std::size_t n) { void *result = 0; // attempt to allocate device memory if(cudaMalloc(&result, n) == cudaSuccess) { std::cout << " allocated " << n << " bytes of device memory" << std::endl; } else { // attempt to allocate pinned host memory void *h_ptr = 0; if(cudaMallocHost(&h_ptr, n) == cudaSuccess) { // attempt to map host pointer into device memory space if(cudaHostGetDevicePointer(&result, h_ptr, 0) == cudaSuccess) { std::cout << " allocated " << n << " bytes of pinned host memory (fallback successful)" << std::endl; } else { // attempt to deallocate buffer std::cout << " failed to map host memory into device address space (fallback failed)" << std::endl; cudaFreeHost(h_ptr); result = 0; } } else { std::cout << " failed to allocate " << n << " bytes of memory (fallback failed)" << std::endl; } } return result; } // overload free on fallback_allocator to implement our special free // its job to is inspect where the pointer lives and free it appropriately template<typename Pointer> void free(fallback_allocator, Pointer ptr) { void *raw_ptr = thrust::raw_pointer_cast(ptr); // determine where memory resides cudaPointerAttributes attributes; if(cudaPointerGetAttributes(&attributes, raw_ptr) == cudaSuccess) { // free the memory in the appropriate way if(attributes.memoryType == cudaMemoryTypeHost) { cudaFreeHost(raw_ptr); } else { cudaFree(raw_ptr); } } } int main(void) { // check whether device supports mapped host memory int device; cudaGetDevice(&device); cudaDeviceProp properties; cudaGetDeviceProperties(&properties, device); fallback_allocator alloc; if(!properties.canMapHostMemory) { std::cout << "Device #" << device << " [" << properties.name << "] does not support memory mapping" << std::endl; return 0; } else { std::cout << "Testing fallback_allocator on device #" << device << " [" << properties.name << "] with " << properties.totalGlobalMem << " bytes of device memory" << std::endl; } try { size_t one_million = 1 << 20; size_t one_billion = 1 << 30; for(size_t n = one_million; n < one_billion; n *= 2) { // TODO ideally we'd use the fallback_allocator in the vector too //thrust::cuda::vector<int, fallback_allocator> d_vec(n); std::cout << "attempting to sort " << n << " values" << std::endl; // use our special malloc to allocate int *raw_ptr = (int *) malloc(alloc, n * sizeof(int)); if(raw_ptr) { kernel<<<100,256>>>(raw_ptr, n); // generate unsorted values thrust::cuda::pointer<int> begin = thrust::cuda::pointer<int>(raw_ptr); thrust::cuda::pointer<int> end = begin + n; // sort the data using our special allocator // if temporary memory is required during the sort, // our versions of malloc & free will be called thrust::sort(alloc, begin, end); free(alloc, raw_ptr); } } } catch(std::bad_alloc) { return 0; } return 0; }
d042738993e736cee171139104bb9c601257ce9a.hip
// !!! This is a file automatically generated by hipify!!! // ---------------------------------------------------------------------------- // - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- // Copyright (c) 2018-2023 www.open3d.org // SPDX-License-Identifier: MIT // ---------------------------------------------------------------------------- #define EIGEN_USE_GPU #include "ContinuousConvBackpropFilterOpKernel.h" #include "open3d/core/CUDAUtils.h" #include "open3d/ml/impl/continuous_conv/ContinuousConvBackpropFilter.cuh" using namespace open3d; using namespace open3d::ml; using namespace open3d::ml::impl; using namespace tensorflow; template <class TFeat, class TOut, class TReal, class TIndex> class ContinuousConvBackpropFilterOpKernelCUDA : public ContinuousConvBackpropFilterOpKernel<TIndex> { public: explicit ContinuousConvBackpropFilterOpKernelCUDA( OpKernelConstruction* construction) : ContinuousConvBackpropFilterOpKernel<TIndex>(construction) { texture_alignment = open3d::core::GetCUDACurrentDeviceTextureAlignment(); } void Kernel(tensorflow::OpKernelContext* context, const tensorflow::Tensor& filter, const tensorflow::Tensor& out_positions, const tensorflow::Tensor& extents, const tensorflow::Tensor& offset, const tensorflow::Tensor& inp_positions, const tensorflow::Tensor& inp_features, const tensorflow::Tensor& inp_importance, const tensorflow::Tensor& neighbors_index, const tensorflow::Tensor& neighbors_importance, const tensorflow::Tensor& neighbors_row_splits, const tensorflow::Tensor& out_features_gradient, const std::vector<int>& filter_dims, const bool individual_extents, const bool isotropic_extents, const bool point_importances, const bool has_neighbors_importances, tensorflow::Tensor& filter_backprop) { auto device = context->eigen_gpu_device(); void* temp_ptr = nullptr; size_t temp_size = 0; size_t max_temp_size = 0; // determine temp_size CConvBackpropFilterCUDA<TFeat, TOut, TReal, TIndex>( device.stream(), temp_ptr, temp_size, max_temp_size, texture_alignment, filter_backprop.flat<TOut>().data(), filter_dims, out_positions.shape().dim_size(0), out_positions.flat<TReal>().data(), inp_positions.shape().dim_size(0), inp_positions.flat<TReal>().data(), inp_features.flat<TFeat>().data(), point_importances ? inp_importance.flat<TFeat>().data() : nullptr, neighbors_index.shape().dim_size(0), (TIndex*)neighbors_index.flat<TIndex>().data(), has_neighbors_importances ? neighbors_importance.flat<TFeat>().data() : nullptr, (int64_t*)neighbors_row_splits.flat<int64>().data(), extents.flat<TReal>().data(), offset.flat<TReal>().data(), out_features_gradient.flat<TFeat>().data(), this->interpolation, this->coordinate_mapping, this->align_corners, individual_extents, isotropic_extents, this->normalize); temp_size = ::max(::min(size_t(this->max_temp_mem_MB) * 1024 * 1024, max_temp_size), temp_size); Tensor temp_tensor; TensorShape temp_shape({ssize_t(temp_size)}); OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<uint8_t>::v(), temp_shape, &temp_tensor)); temp_ptr = temp_tensor.flat<uint8_t>().data(); // actually run the operation CConvBackpropFilterCUDA<TFeat, TOut, TReal, TIndex>( device.stream(), temp_ptr, temp_size, max_temp_size, texture_alignment, filter_backprop.flat<TOut>().data(), filter_dims, out_positions.shape().dim_size(0), out_positions.flat<TReal>().data(), inp_positions.shape().dim_size(0), inp_positions.flat<TReal>().data(), inp_features.flat<TFeat>().data(), point_importances ? inp_importance.flat<TFeat>().data() : nullptr, neighbors_index.shape().dim_size(0), (TIndex*)neighbors_index.flat<TIndex>().data(), has_neighbors_importances ? neighbors_importance.flat<TFeat>().data() : nullptr, (int64_t*)neighbors_row_splits.flat<int64>().data(), extents.flat<TReal>().data(), offset.flat<TReal>().data(), out_features_gradient.flat<TFeat>().data(), this->interpolation, this->coordinate_mapping, this->align_corners, individual_extents, isotropic_extents, this->normalize); } private: int texture_alignment; }; #define REG_KB(feattype, outtype, realtype, indextype) \ REGISTER_KERNEL_BUILDER( \ Name("Open3DContinuousConvBackpropFilter") \ .Device(DEVICE_GPU) \ .TypeConstraint<feattype>("TFeat") \ .TypeConstraint<outtype>("output_type") \ .TypeConstraint<realtype>("TReal") \ .TypeConstraint<indextype>("TIndex"), \ ContinuousConvBackpropFilterOpKernelCUDA<feattype, outtype, \ realtype, indextype>); REG_KB(float, float, float, int32) #undef REG_KB
d042738993e736cee171139104bb9c601257ce9a.cu
// ---------------------------------------------------------------------------- // - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- // Copyright (c) 2018-2023 www.open3d.org // SPDX-License-Identifier: MIT // ---------------------------------------------------------------------------- #define EIGEN_USE_GPU #include "ContinuousConvBackpropFilterOpKernel.h" #include "open3d/core/CUDAUtils.h" #include "open3d/ml/impl/continuous_conv/ContinuousConvBackpropFilter.cuh" using namespace open3d; using namespace open3d::ml; using namespace open3d::ml::impl; using namespace tensorflow; template <class TFeat, class TOut, class TReal, class TIndex> class ContinuousConvBackpropFilterOpKernelCUDA : public ContinuousConvBackpropFilterOpKernel<TIndex> { public: explicit ContinuousConvBackpropFilterOpKernelCUDA( OpKernelConstruction* construction) : ContinuousConvBackpropFilterOpKernel<TIndex>(construction) { texture_alignment = open3d::core::GetCUDACurrentDeviceTextureAlignment(); } void Kernel(tensorflow::OpKernelContext* context, const tensorflow::Tensor& filter, const tensorflow::Tensor& out_positions, const tensorflow::Tensor& extents, const tensorflow::Tensor& offset, const tensorflow::Tensor& inp_positions, const tensorflow::Tensor& inp_features, const tensorflow::Tensor& inp_importance, const tensorflow::Tensor& neighbors_index, const tensorflow::Tensor& neighbors_importance, const tensorflow::Tensor& neighbors_row_splits, const tensorflow::Tensor& out_features_gradient, const std::vector<int>& filter_dims, const bool individual_extents, const bool isotropic_extents, const bool point_importances, const bool has_neighbors_importances, tensorflow::Tensor& filter_backprop) { auto device = context->eigen_gpu_device(); void* temp_ptr = nullptr; size_t temp_size = 0; size_t max_temp_size = 0; // determine temp_size CConvBackpropFilterCUDA<TFeat, TOut, TReal, TIndex>( device.stream(), temp_ptr, temp_size, max_temp_size, texture_alignment, filter_backprop.flat<TOut>().data(), filter_dims, out_positions.shape().dim_size(0), out_positions.flat<TReal>().data(), inp_positions.shape().dim_size(0), inp_positions.flat<TReal>().data(), inp_features.flat<TFeat>().data(), point_importances ? inp_importance.flat<TFeat>().data() : nullptr, neighbors_index.shape().dim_size(0), (TIndex*)neighbors_index.flat<TIndex>().data(), has_neighbors_importances ? neighbors_importance.flat<TFeat>().data() : nullptr, (int64_t*)neighbors_row_splits.flat<int64>().data(), extents.flat<TReal>().data(), offset.flat<TReal>().data(), out_features_gradient.flat<TFeat>().data(), this->interpolation, this->coordinate_mapping, this->align_corners, individual_extents, isotropic_extents, this->normalize); temp_size = std::max(std::min(size_t(this->max_temp_mem_MB) * 1024 * 1024, max_temp_size), temp_size); Tensor temp_tensor; TensorShape temp_shape({ssize_t(temp_size)}); OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<uint8_t>::v(), temp_shape, &temp_tensor)); temp_ptr = temp_tensor.flat<uint8_t>().data(); // actually run the operation CConvBackpropFilterCUDA<TFeat, TOut, TReal, TIndex>( device.stream(), temp_ptr, temp_size, max_temp_size, texture_alignment, filter_backprop.flat<TOut>().data(), filter_dims, out_positions.shape().dim_size(0), out_positions.flat<TReal>().data(), inp_positions.shape().dim_size(0), inp_positions.flat<TReal>().data(), inp_features.flat<TFeat>().data(), point_importances ? inp_importance.flat<TFeat>().data() : nullptr, neighbors_index.shape().dim_size(0), (TIndex*)neighbors_index.flat<TIndex>().data(), has_neighbors_importances ? neighbors_importance.flat<TFeat>().data() : nullptr, (int64_t*)neighbors_row_splits.flat<int64>().data(), extents.flat<TReal>().data(), offset.flat<TReal>().data(), out_features_gradient.flat<TFeat>().data(), this->interpolation, this->coordinate_mapping, this->align_corners, individual_extents, isotropic_extents, this->normalize); } private: int texture_alignment; }; #define REG_KB(feattype, outtype, realtype, indextype) \ REGISTER_KERNEL_BUILDER( \ Name("Open3DContinuousConvBackpropFilter") \ .Device(DEVICE_GPU) \ .TypeConstraint<feattype>("TFeat") \ .TypeConstraint<outtype>("output_type") \ .TypeConstraint<realtype>("TReal") \ .TypeConstraint<indextype>("TIndex"), \ ContinuousConvBackpropFilterOpKernelCUDA<feattype, outtype, \ realtype, indextype>); REG_KB(float, float, float, int32) #undef REG_KB
672a47dd0bf2ca6e214e8ffb8823685782e00afe.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //========================================================================================================================================================================================================200 // findRangeK function //========================================================================================================================================================================================================200 __global__ void findRangeK( long height, knode *knodesD, long knodes_elem, long *currKnodeD, long *offsetD, long *lastKnodeD, long *offset_2D, int *startD, int *endD, int *RecstartD, int *ReclenD) { // private thread IDs int thid = threadIdx.x; int bid = blockIdx.x; int start = startD[bid]; int end = endD[bid]; int *curr_keys = knodesD[currKnodeD[bid]].keys; int *last_keys = knodesD[lastKnodeD[bid]].keys; int *curr_indices = knodesD[currKnodeD[bid]].indices; int *last_indices = knodesD[lastKnodeD[bid]].indices; // ??? int i; for(i = 0; i < height; i++){ if((curr_keys[thid] <= start) && (curr_keys[thid+1] > start)){ // this conditional statement is inserted to avoid crush due to but in original code // "offset[bid]" calculated below that later addresses part of knodes goes outside of its bounds cause segmentation fault // more specifically, values saved into knodes->indices in the main function are out of bounds of knodes that they address if(curr_indices[thid] < knodes_elem){ offsetD[bid] = curr_indices[thid]; } } if((last_keys[thid] <= end) && (last_keys[thid+1] > end)){ // this conditional statement is inserted to avoid crush due to but in original code // "offset_2[bid]" calculated below that later addresses part of knodes goes outside of its bounds cause segmentation fault // more specifically, values saved into knodes->indices in the main function are out of bounds of knodes that they address if(last_indices[thid] < knodes_elem){ offset_2D[bid] = last_indices[thid]; } } __syncthreads(); // set for next tree level curr_keys = knodesD[offsetD[bid]].keys; curr_indices = knodesD[offsetD[bid]].indices; last_keys = knodesD[offset_2D[bid]].keys; last_indices = knodesD[offset_2D[bid]].indices; __syncthreads(); } int kend = last_keys[thid]; // Find the index of the starting record if(curr_keys[thid] == start){ RecstartD[bid] = curr_indices[thid]; } __syncthreads(); // Find the index of the ending record if(kend == end){ ReclenD[bid] = last_indices[thid] - RecstartD[bid]+1; } } //========================================================================================================================================================================================================200 // End //========================================================================================================================================================================================================200
672a47dd0bf2ca6e214e8ffb8823685782e00afe.cu
//========================================================================================================================================================================================================200 // findRangeK function //========================================================================================================================================================================================================200 __global__ void findRangeK( long height, knode *knodesD, long knodes_elem, long *currKnodeD, long *offsetD, long *lastKnodeD, long *offset_2D, int *startD, int *endD, int *RecstartD, int *ReclenD) { // private thread IDs int thid = threadIdx.x; int bid = blockIdx.x; int start = startD[bid]; int end = endD[bid]; int *curr_keys = knodesD[currKnodeD[bid]].keys; int *last_keys = knodesD[lastKnodeD[bid]].keys; int *curr_indices = knodesD[currKnodeD[bid]].indices; int *last_indices = knodesD[lastKnodeD[bid]].indices; // ??? int i; for(i = 0; i < height; i++){ if((curr_keys[thid] <= start) && (curr_keys[thid+1] > start)){ // this conditional statement is inserted to avoid crush due to but in original code // "offset[bid]" calculated below that later addresses part of knodes goes outside of its bounds cause segmentation fault // more specifically, values saved into knodes->indices in the main function are out of bounds of knodes that they address if(curr_indices[thid] < knodes_elem){ offsetD[bid] = curr_indices[thid]; } } if((last_keys[thid] <= end) && (last_keys[thid+1] > end)){ // this conditional statement is inserted to avoid crush due to but in original code // "offset_2[bid]" calculated below that later addresses part of knodes goes outside of its bounds cause segmentation fault // more specifically, values saved into knodes->indices in the main function are out of bounds of knodes that they address if(last_indices[thid] < knodes_elem){ offset_2D[bid] = last_indices[thid]; } } __syncthreads(); // set for next tree level curr_keys = knodesD[offsetD[bid]].keys; curr_indices = knodesD[offsetD[bid]].indices; last_keys = knodesD[offset_2D[bid]].keys; last_indices = knodesD[offset_2D[bid]].indices; __syncthreads(); } int kend = last_keys[thid]; // Find the index of the starting record if(curr_keys[thid] == start){ RecstartD[bid] = curr_indices[thid]; } __syncthreads(); // Find the index of the ending record if(kend == end){ ReclenD[bid] = last_indices[thid] - RecstartD[bid]+1; } } //========================================================================================================================================================================================================200 // End //========================================================================================================================================================================================================200
212e4814cc12ed4e983603351f8c6948e89402c1.hip
// !!! This is a file automatically generated by hipify!!! #include <af/Mesh.h> #include <af/MotionGraph.h> #include <af/Constants.h> #include <KNN.cu> #include <knncuda.cu> #include <set> #include <unordered_set> namespace af { void buildGraphCuda(MotionGraph& graph, const std::vector<Vec3f>& mesh, const std::size_t cMotionGraphKnn, const float cMotionGraphMinRadius) { if (mesh.size() == 0) return; std::vector<int> outputIndicies(cMotionGraphKnn); std::vector<float> outputDists(cMotionGraphKnn); if (graph.graph().size() == 0) { graph.push_back(mesh[0], cMotionGraphMinRadius, Mat4f::Identity()); } int knn = 0; for (size_t i = 1; i < mesh.size(); i++) { knn = ::min(graph.graph().size(), cMotionGraphKnn); knn_cuda_global(&(graph.graph().vec_[0][0]), graph.graph().size(), &(mesh[i][0]), 1, 3, knn, outputDists.data(), outputIndicies.data()); // graph.knnSearch(outputIndicies, outputDists, mesh[i], cMotionGraphKnn); bool isSupported = false; for (std::size_t j = 0; j < knn; ++j) { if (outputDists[j] < graph.radiuses()[outputIndicies[j]]) { isSupported = true; break; } } if (isSupported) continue; graph.push_back(mesh[i], cMotionGraphMinRadius, Mat4f::Identity()); } } void buildGraphCudaMine(thrust::device_vector<Vec3f>& graph_d, unsigned int& graphSize, const std::vector<Vec3f>& mesh, const unsigned int cMotionGraphKnn, const float cMotionGraphMinRadius) { std::vector<int> outputIndicies(cMotionGraphKnn); std::vector<float> outputDists(cMotionGraphKnn); thrust::device_vector<float> graphRadiuses_d(mesh.size()); thrust::device_vector<Mat4f> graphTransforms_d(mesh.size()); graph_d[graphSize] = mesh[0]; graphRadiuses_d[graphSize] = cMotionGraphMinRadius; graphTransforms_d[graphSize] = Mat4f::Identity(); ++graphSize; int knn = 0; for (size_t i = 1; i < mesh.size(); i++) { knn = ::min(graphSize, cMotionGraphKnn); af::knn(graph_d.data().get(), graphSize, mesh[i], knn, outputIndicies, outputDists); // knn_cuda_global(&(graph.graph().vec_[0][0]), graph.graph().size(), &(mesh[i][0]), 1, 3, // knn, outputDists.data(), outputIndicies.data()); // graph.knnSearch(outputIndicies, outputDists, mesh[i], cMotionGraphKnn); bool isSupported = false; for (std::size_t j = 0; j < knn; ++j) { if (outputDists[j] < graphRadiuses_d[outputIndicies[j]]) { isSupported = true; break; } } if (isSupported) continue; graph_d[graphSize] = mesh[i]; graphRadiuses_d[graphSize] = cMotionGraphMinRadius; graphTransforms_d[graphSize] = Mat4f::Identity(); ++graphSize; } } } // namespace af
212e4814cc12ed4e983603351f8c6948e89402c1.cu
#include <af/Mesh.h> #include <af/MotionGraph.h> #include <af/Constants.h> #include <KNN.cu> #include <knncuda.cu> #include <set> #include <unordered_set> namespace af { void buildGraphCuda(MotionGraph& graph, const std::vector<Vec3f>& mesh, const std::size_t cMotionGraphKnn, const float cMotionGraphMinRadius) { if (mesh.size() == 0) return; std::vector<int> outputIndicies(cMotionGraphKnn); std::vector<float> outputDists(cMotionGraphKnn); if (graph.graph().size() == 0) { graph.push_back(mesh[0], cMotionGraphMinRadius, Mat4f::Identity()); } int knn = 0; for (size_t i = 1; i < mesh.size(); i++) { knn = std::min(graph.graph().size(), cMotionGraphKnn); knn_cuda_global(&(graph.graph().vec_[0][0]), graph.graph().size(), &(mesh[i][0]), 1, 3, knn, outputDists.data(), outputIndicies.data()); // graph.knnSearch(outputIndicies, outputDists, mesh[i], cMotionGraphKnn); bool isSupported = false; for (std::size_t j = 0; j < knn; ++j) { if (outputDists[j] < graph.radiuses()[outputIndicies[j]]) { isSupported = true; break; } } if (isSupported) continue; graph.push_back(mesh[i], cMotionGraphMinRadius, Mat4f::Identity()); } } void buildGraphCudaMine(thrust::device_vector<Vec3f>& graph_d, unsigned int& graphSize, const std::vector<Vec3f>& mesh, const unsigned int cMotionGraphKnn, const float cMotionGraphMinRadius) { std::vector<int> outputIndicies(cMotionGraphKnn); std::vector<float> outputDists(cMotionGraphKnn); thrust::device_vector<float> graphRadiuses_d(mesh.size()); thrust::device_vector<Mat4f> graphTransforms_d(mesh.size()); graph_d[graphSize] = mesh[0]; graphRadiuses_d[graphSize] = cMotionGraphMinRadius; graphTransforms_d[graphSize] = Mat4f::Identity(); ++graphSize; int knn = 0; for (size_t i = 1; i < mesh.size(); i++) { knn = std::min(graphSize, cMotionGraphKnn); af::knn(graph_d.data().get(), graphSize, mesh[i], knn, outputIndicies, outputDists); // knn_cuda_global(&(graph.graph().vec_[0][0]), graph.graph().size(), &(mesh[i][0]), 1, 3, // knn, outputDists.data(), outputIndicies.data()); // graph.knnSearch(outputIndicies, outputDists, mesh[i], cMotionGraphKnn); bool isSupported = false; for (std::size_t j = 0; j < knn; ++j) { if (outputDists[j] < graphRadiuses_d[outputIndicies[j]]) { isSupported = true; break; } } if (isSupported) continue; graph_d[graphSize] = mesh[i]; graphRadiuses_d[graphSize] = cMotionGraphMinRadius; graphTransforms_d[graphSize] = Mat4f::Identity(); ++graphSize; } } } // namespace af
3029bca8c68159f097c4778dd83aa984657db9c5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2007 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. Users and possessors of this source code * are hereby granted a nonexclusive, royalty-free license to use this code * in individual and commercial software. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. * * Any use of this source code in individual and commercial software must * include, in the user documentation and internal comments to the code, * the above Disclaimer and U.S. Government End Users Notice. */ /* * This sample implements Mersenne Twister random number generator * and Cartesian Box-Muller transformation on the GPU. * See supplied whitepaper for more explanations. */ #include <stdlib.h> #include <stdio.h> #include <time.h> #include <string.h> #include <cutil_inline.h> #include "MersenneTwister.h" /////////////////////////////////////////////////////////////////////////////// // Common host and device function /////////////////////////////////////////////////////////////////////////////// //ceil(a / b) extern "C" int iDivUp(int a, int b){ return ((a % b) != 0) ? (a / b + 1) : (a / b); } //floor(a / b) extern "C" int iDivDown(int a, int b){ return a / b; } //Align a to nearest higher multiple of b extern "C" int iAlignUp(int a, int b){ return ((a % b) != 0) ? (a - a % b + b) : a; } //Align a to nearest lower multiple of b extern "C" int iAlignDown(int a, int b){ return a - a % b; } /////////////////////////////////////////////////////////////////////////////// // Reference MT front-end and Box-Muller transform /////////////////////////////////////////////////////////////////////////////// extern "C" void initMTRef(const char *fname); extern "C" void RandomRef(float *h_Random, int NPerRng, unsigned int seed); extern "C" void BoxMullerRef(float *h_Random, int NPerRng); /////////////////////////////////////////////////////////////////////////////// // Fast GPU random number generator and Box-Muller transform /////////////////////////////////////////////////////////////////////////////// #include "MersenneTwister_kernel.cuh" /////////////////////////////////////////////////////////////////////////////// // Data configuration /////////////////////////////////////////////////////////////////////////////// const int PATH_N = 24000000; const int N_PER_RNG = iAlignUp(iDivUp(PATH_N, MT_RNG_COUNT), 2); const int RAND_N = MT_RNG_COUNT * N_PER_RNG; const unsigned int SEED = 777; #define DO_BOXMULLER /////////////////////////////////////////////////////////////////////////////// // Main program /////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv){ float *d_Rand; float *h_RandCPU, *h_RandGPU; double rCPU, rGPU, delta, sum_delta, max_delta, sum_ref, L1norm, gpuTime; int i, j; unsigned int hTimer; if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") ) cutilDeviceInit(argc, argv); else hipSetDevice( cutGetMaxGflopsDeviceId() ); cutilCheckError( cutCreateTimer(&hTimer) ); printf("Initializing data for %i samples...\n", PATH_N); h_RandCPU = (float *)malloc(RAND_N * sizeof(float)); h_RandGPU = (float *)malloc(RAND_N * sizeof(float)); cutilSafeCall( hipMalloc((void **)&d_Rand, RAND_N * sizeof(float)) ); printf("Loading CPU and GPU twisters configurations...\n"); const char *raw_path = cutFindFilePath("MersenneTwister.raw", argv[0]); const char *dat_path = cutFindFilePath("MersenneTwister.dat", argv[0]); initMTRef(raw_path); loadMTGPU(dat_path); seedMTGPU(SEED); printf("Generating random numbers on GPU...\n"); cutilSafeCall( hipDeviceSynchronize() ); cutilCheckError( cutResetTimer(hTimer) ); cutilCheckError( cutStartTimer(hTimer) ); hipLaunchKernelGGL(( RandomGPU), dim3(32), dim3(128), 0, 0, d_Rand, N_PER_RNG); cutilCheckMsg("RandomGPU() execution failed\n"); cutilSafeCall( hipDeviceSynchronize() ); cutilCheckError( cutStopTimer(hTimer) ); gpuTime = cutGetTimerValue(hTimer); printf("Generated samples : %i \n", RAND_N); printf("RandomGPU() time : %f \n", gpuTime); printf("Samples per second: %E \n", RAND_N / (gpuTime * 0.001)); #ifdef DO_BOXMULLER printf("Applying Box-Muller transformation on GPU...\n"); cutilSafeCall( hipDeviceSynchronize() ); cutilCheckError( cutResetTimer(hTimer) ); cutilCheckError( cutStartTimer(hTimer) ); hipLaunchKernelGGL(( BoxMullerGPU), dim3(32), dim3(128), 0, 0, d_Rand, N_PER_RNG); cutilCheckMsg("BoxMullerGPU() execution failed\n"); cutilSafeCall( hipDeviceSynchronize() ); cutilCheckError( cutStopTimer(hTimer) ); gpuTime = cutGetTimerValue(hTimer); printf("Transformed samples : %i \n", RAND_N); printf("BoxMullerGPU() time : %f \n", gpuTime); printf("Samples per second : %E \n", RAND_N / (gpuTime * 0.001)); #endif printf("Reading back the results...\n"); cutilSafeCall( hipMemcpy(h_RandGPU, d_Rand, RAND_N * sizeof(float), hipMemcpyDeviceToHost) ); printf("Checking GPU results...\n"); printf("...generating random numbers on CPU using reference generator\n"); RandomRef(h_RandCPU, N_PER_RNG, SEED); #ifdef DO_BOXMULLER printf("...applying Box-Muller transformation on CPU\n"); BoxMullerRef(h_RandCPU, N_PER_RNG); #endif printf("...comparing the results\n"); max_delta = 0; sum_delta = 0; sum_ref = 0; for(i = 0; i < MT_RNG_COUNT; i++) for(j = 0; j < N_PER_RNG; j++){ rCPU = h_RandCPU[i * N_PER_RNG + j]; rGPU = h_RandGPU[i + j * MT_RNG_COUNT]; delta = fabs(rCPU - rGPU); sum_delta += delta; sum_ref += fabs(rCPU); if(delta >= max_delta) max_delta = delta; } L1norm = (float)(sum_delta / sum_ref); printf("Max absolute error: %E\n", max_delta); printf("L1 norm: %E\n", L1norm); printf((L1norm < 1e-6) ? "TEST PASSED\n" : "TEST FAILED\n"); printf("Shutting down...\n"); cutilSafeCall( hipFree(d_Rand) ); free(h_RandGPU); free(h_RandCPU); cutilCheckError( cutDeleteTimer( hTimer) ); hipDeviceReset(); cutilExit(argc, argv); }
3029bca8c68159f097c4778dd83aa984657db9c5.cu
/* * Copyright 1993-2007 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. Users and possessors of this source code * are hereby granted a nonexclusive, royalty-free license to use this code * in individual and commercial software. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. * * Any use of this source code in individual and commercial software must * include, in the user documentation and internal comments to the code, * the above Disclaimer and U.S. Government End Users Notice. */ /* * This sample implements Mersenne Twister random number generator * and Cartesian Box-Muller transformation on the GPU. * See supplied whitepaper for more explanations. */ #include <stdlib.h> #include <stdio.h> #include <time.h> #include <string.h> #include <cutil_inline.h> #include "MersenneTwister.h" /////////////////////////////////////////////////////////////////////////////// // Common host and device function /////////////////////////////////////////////////////////////////////////////// //ceil(a / b) extern "C" int iDivUp(int a, int b){ return ((a % b) != 0) ? (a / b + 1) : (a / b); } //floor(a / b) extern "C" int iDivDown(int a, int b){ return a / b; } //Align a to nearest higher multiple of b extern "C" int iAlignUp(int a, int b){ return ((a % b) != 0) ? (a - a % b + b) : a; } //Align a to nearest lower multiple of b extern "C" int iAlignDown(int a, int b){ return a - a % b; } /////////////////////////////////////////////////////////////////////////////// // Reference MT front-end and Box-Muller transform /////////////////////////////////////////////////////////////////////////////// extern "C" void initMTRef(const char *fname); extern "C" void RandomRef(float *h_Random, int NPerRng, unsigned int seed); extern "C" void BoxMullerRef(float *h_Random, int NPerRng); /////////////////////////////////////////////////////////////////////////////// // Fast GPU random number generator and Box-Muller transform /////////////////////////////////////////////////////////////////////////////// #include "MersenneTwister_kernel.cuh" /////////////////////////////////////////////////////////////////////////////// // Data configuration /////////////////////////////////////////////////////////////////////////////// const int PATH_N = 24000000; const int N_PER_RNG = iAlignUp(iDivUp(PATH_N, MT_RNG_COUNT), 2); const int RAND_N = MT_RNG_COUNT * N_PER_RNG; const unsigned int SEED = 777; #define DO_BOXMULLER /////////////////////////////////////////////////////////////////////////////// // Main program /////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv){ float *d_Rand; float *h_RandCPU, *h_RandGPU; double rCPU, rGPU, delta, sum_delta, max_delta, sum_ref, L1norm, gpuTime; int i, j; unsigned int hTimer; if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") ) cutilDeviceInit(argc, argv); else cudaSetDevice( cutGetMaxGflopsDeviceId() ); cutilCheckError( cutCreateTimer(&hTimer) ); printf("Initializing data for %i samples...\n", PATH_N); h_RandCPU = (float *)malloc(RAND_N * sizeof(float)); h_RandGPU = (float *)malloc(RAND_N * sizeof(float)); cutilSafeCall( cudaMalloc((void **)&d_Rand, RAND_N * sizeof(float)) ); printf("Loading CPU and GPU twisters configurations...\n"); const char *raw_path = cutFindFilePath("MersenneTwister.raw", argv[0]); const char *dat_path = cutFindFilePath("MersenneTwister.dat", argv[0]); initMTRef(raw_path); loadMTGPU(dat_path); seedMTGPU(SEED); printf("Generating random numbers on GPU...\n"); cutilSafeCall( cudaThreadSynchronize() ); cutilCheckError( cutResetTimer(hTimer) ); cutilCheckError( cutStartTimer(hTimer) ); RandomGPU<<<32, 128>>>(d_Rand, N_PER_RNG); cutilCheckMsg("RandomGPU() execution failed\n"); cutilSafeCall( cudaThreadSynchronize() ); cutilCheckError( cutStopTimer(hTimer) ); gpuTime = cutGetTimerValue(hTimer); printf("Generated samples : %i \n", RAND_N); printf("RandomGPU() time : %f \n", gpuTime); printf("Samples per second: %E \n", RAND_N / (gpuTime * 0.001)); #ifdef DO_BOXMULLER printf("Applying Box-Muller transformation on GPU...\n"); cutilSafeCall( cudaThreadSynchronize() ); cutilCheckError( cutResetTimer(hTimer) ); cutilCheckError( cutStartTimer(hTimer) ); BoxMullerGPU<<<32, 128>>>(d_Rand, N_PER_RNG); cutilCheckMsg("BoxMullerGPU() execution failed\n"); cutilSafeCall( cudaThreadSynchronize() ); cutilCheckError( cutStopTimer(hTimer) ); gpuTime = cutGetTimerValue(hTimer); printf("Transformed samples : %i \n", RAND_N); printf("BoxMullerGPU() time : %f \n", gpuTime); printf("Samples per second : %E \n", RAND_N / (gpuTime * 0.001)); #endif printf("Reading back the results...\n"); cutilSafeCall( cudaMemcpy(h_RandGPU, d_Rand, RAND_N * sizeof(float), cudaMemcpyDeviceToHost) ); printf("Checking GPU results...\n"); printf("...generating random numbers on CPU using reference generator\n"); RandomRef(h_RandCPU, N_PER_RNG, SEED); #ifdef DO_BOXMULLER printf("...applying Box-Muller transformation on CPU\n"); BoxMullerRef(h_RandCPU, N_PER_RNG); #endif printf("...comparing the results\n"); max_delta = 0; sum_delta = 0; sum_ref = 0; for(i = 0; i < MT_RNG_COUNT; i++) for(j = 0; j < N_PER_RNG; j++){ rCPU = h_RandCPU[i * N_PER_RNG + j]; rGPU = h_RandGPU[i + j * MT_RNG_COUNT]; delta = fabs(rCPU - rGPU); sum_delta += delta; sum_ref += fabs(rCPU); if(delta >= max_delta) max_delta = delta; } L1norm = (float)(sum_delta / sum_ref); printf("Max absolute error: %E\n", max_delta); printf("L1 norm: %E\n", L1norm); printf((L1norm < 1e-6) ? "TEST PASSED\n" : "TEST FAILED\n"); printf("Shutting down...\n"); cutilSafeCall( cudaFree(d_Rand) ); free(h_RandGPU); free(h_RandCPU); cutilCheckError( cutDeleteTimer( hTimer) ); cudaThreadExit(); cutilExit(argc, argv); }
46b79db0cb5615b31e5ee169ef64ee474bf34529.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void adjacent_difference_simple(int *result, int *input) { // compute this thread's global index unsigned int i = blockDim.x * blockIdx.x + threadIdx.x; if(i > 0) { // each thread loads two elements from global memory int x_i = input[i]; int x_i_minus_one = input[i-1]; // compute the difference using values stored in registers result[i] = x_i - x_i_minus_one; } }
46b79db0cb5615b31e5ee169ef64ee474bf34529.cu
#include "includes.h" __global__ void adjacent_difference_simple(int *result, int *input) { // compute this thread's global index unsigned int i = blockDim.x * blockIdx.x + threadIdx.x; if(i > 0) { // each thread loads two elements from global memory int x_i = input[i]; int x_i_minus_one = input[i-1]; // compute the difference using values stored in registers result[i] = x_i - x_i_minus_one; } }
f13b49e6cf9a27edfa75bfdb68f1a56193bf94a2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void mAddDrip(float *dense, int centerX, int centerY, float redius) { int Idx = blockIdx.x * blockDim.x + threadIdx.x; int x = threadIdx.x; int y = blockIdx.x; float length = sqrt((float)((x-centerX)*(x-centerX))+(float)((y-centerY)*(y-centerY))); if(length < redius) { dense[Idx] += 200; } }
f13b49e6cf9a27edfa75bfdb68f1a56193bf94a2.cu
#include "includes.h" __global__ void mAddDrip(float *dense, int centerX, int centerY, float redius) { int Idx = blockIdx.x * blockDim.x + threadIdx.x; int x = threadIdx.x; int y = blockIdx.x; float length = sqrt((float)((x-centerX)*(x-centerX))+(float)((y-centerY)*(y-centerY))); if(length < redius) { dense[Idx] += 200; } }
d81722eab340a01e4c2c7c0c22b40afd21d8b680.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include <windows.h> #define BLOCK_SIZE 16 // submatrix size #define N 4000 // matrix size is N*N __global__ void matMult ( double * a, double * b, int n, double * c ) { int bx = blockIdx.x; // block index int by = blockIdx.y; int tx = threadIdx.x; // thread index int ty = threadIdx.y; // Index of the first sub-matrix of A processed by the block int aBegin = n * BLOCK_SIZE * by; int aEnd = aBegin + n - 1; // Step size used to iterate through the sub-matrices of A int aStep = BLOCK_SIZE; // Index of the first sub-matrix of B processed by the block int bBegin = BLOCK_SIZE * bx; // Step size used to iterate through the sub-matrices of B int bStep = BLOCK_SIZE * n; double sum = 0; // computed subelement for ( int ia = aBegin, ib = bBegin; ia <= aEnd; ia += aStep, ib += bStep ) { // Shared memory for the sub-matrix of A __shared__ float as [BLOCK_SIZE][BLOCK_SIZE]; // Shared memory for the sub-matrix of B __shared__ float bs [BLOCK_SIZE][BLOCK_SIZE]; // Load the matrices from global memory to shared memory; as [ty][tx] = a [ia + n * ty + tx]; bs [ty][tx] = b [ib + n * ty + tx]; __syncthreads(); // Synchronize to make sure the matrices are loaded // Multiply the two matrices together; for ( int k = 0; k < BLOCK_SIZE; k++ ) sum += as [ty][k] * bs [k][tx]; // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write the block sub-matrix to global memory; // each thread writes one element int ic = n * BLOCK_SIZE * by + BLOCK_SIZE * bx; c [ic + n * ty + tx] = sum; } int main ( int argc, char * argv [] ) { int numBytes = N * N * sizeof ( double ); // allocate host memory double * a = new double [N*N]; double * b = new double [N*N]; double * c = new double [N*N]; for ( int i = 0; i < N; i++ ) for ( int j = 0; j < N; j++ ) { a [i] = 5*i+j*7+13; b [i] = 5*i+j*7+13; } // allocate device memory double * adev = NULL; double * bdev = NULL; double * cdev = NULL; hipMalloc ( (void**)&adev, numBytes ); hipMalloc ( (void**)&bdev, numBytes ); hipMalloc ( (void**)&cdev, numBytes ); // set kernel launch configuration dim3 threads ( BLOCK_SIZE, BLOCK_SIZE ); dim3 blocks ( N / threads.x, N / threads.y); // create cuda event handles hipEvent_t start, stop; float gpuTime = 0.0f; hipEventCreate ( &start ); hipEventCreate ( &stop ); // asynchronously issue work to the GPU (all to stream 0) hipEventRecord ( start, 0 ); hipMemcpy ( adev, a, numBytes, hipMemcpyHostToDevice ); hipMemcpy ( bdev, b, numBytes, hipMemcpyHostToDevice ); hipLaunchKernelGGL(( matMult), dim3(blocks), dim3(threads), 0, 0, adev, bdev, N, cdev ); hipMemcpy ( c, cdev, numBytes, hipMemcpyDeviceToHost ); hipEventRecord ( stop, 0 ); hipEventSynchronize ( stop ); hipEventElapsedTime ( &gpuTime, start, stop ); // print the cpu and gpu times printf("time spent executing by the GPU: %0.f millseconds\n", gpuTime ); // release resources hipEventDestroy ( start ); hipEventDestroy ( stop ); hipFree ( adev ); hipFree ( bdev ); hipFree ( cdev ); delete a; delete b; delete c; system("Pause"); return 0; }
d81722eab340a01e4c2c7c0c22b40afd21d8b680.cu
#include <cuda.h> #include <stdio.h> #include <windows.h> #define BLOCK_SIZE 16 // submatrix size #define N 4000 // matrix size is N*N __global__ void matMult ( double * a, double * b, int n, double * c ) { int bx = blockIdx.x; // block index int by = blockIdx.y; int tx = threadIdx.x; // thread index int ty = threadIdx.y; // Index of the first sub-matrix of A processed by the block int aBegin = n * BLOCK_SIZE * by; int aEnd = aBegin + n - 1; // Step size used to iterate through the sub-matrices of A int aStep = BLOCK_SIZE; // Index of the first sub-matrix of B processed by the block int bBegin = BLOCK_SIZE * bx; // Step size used to iterate through the sub-matrices of B int bStep = BLOCK_SIZE * n; double sum = 0; // computed subelement for ( int ia = aBegin, ib = bBegin; ia <= aEnd; ia += aStep, ib += bStep ) { // Shared memory for the sub-matrix of A __shared__ float as [BLOCK_SIZE][BLOCK_SIZE]; // Shared memory for the sub-matrix of B __shared__ float bs [BLOCK_SIZE][BLOCK_SIZE]; // Load the matrices from global memory to shared memory; as [ty][tx] = a [ia + n * ty + tx]; bs [ty][tx] = b [ib + n * ty + tx]; __syncthreads(); // Synchronize to make sure the matrices are loaded // Multiply the two matrices together; for ( int k = 0; k < BLOCK_SIZE; k++ ) sum += as [ty][k] * bs [k][tx]; // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write the block sub-matrix to global memory; // each thread writes one element int ic = n * BLOCK_SIZE * by + BLOCK_SIZE * bx; c [ic + n * ty + tx] = sum; } int main ( int argc, char * argv [] ) { int numBytes = N * N * sizeof ( double ); // allocate host memory double * a = new double [N*N]; double * b = new double [N*N]; double * c = new double [N*N]; for ( int i = 0; i < N; i++ ) for ( int j = 0; j < N; j++ ) { a [i] = 5*i+j*7+13; b [i] = 5*i+j*7+13; } // allocate device memory double * adev = NULL; double * bdev = NULL; double * cdev = NULL; cudaMalloc ( (void**)&adev, numBytes ); cudaMalloc ( (void**)&bdev, numBytes ); cudaMalloc ( (void**)&cdev, numBytes ); // set kernel launch configuration dim3 threads ( BLOCK_SIZE, BLOCK_SIZE ); dim3 blocks ( N / threads.x, N / threads.y); // create cuda event handles cudaEvent_t start, stop; float gpuTime = 0.0f; cudaEventCreate ( &start ); cudaEventCreate ( &stop ); // asynchronously issue work to the GPU (all to stream 0) cudaEventRecord ( start, 0 ); cudaMemcpy ( adev, a, numBytes, cudaMemcpyHostToDevice ); cudaMemcpy ( bdev, b, numBytes, cudaMemcpyHostToDevice ); matMult<<<blocks, threads>>> ( adev, bdev, N, cdev ); cudaMemcpy ( c, cdev, numBytes, cudaMemcpyDeviceToHost ); cudaEventRecord ( stop, 0 ); cudaEventSynchronize ( stop ); cudaEventElapsedTime ( &gpuTime, start, stop ); // print the cpu and gpu times printf("time spent executing by the GPU: %0.f millseconds\n", gpuTime ); // release resources cudaEventDestroy ( start ); cudaEventDestroy ( stop ); cudaFree ( adev ); cudaFree ( bdev ); cudaFree ( cdev ); delete a; delete b; delete c; system("Pause"); return 0; }
ba2cdb590447e8f153951b043159aacf4547016f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kernel.h" #define TX 32 #define TY 32 #define RAD 1 int divUp(int a, int b) { return (a + b - 1) / b; } __device__ float clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); } __device__ int idxClip(int idx, int idxMax) { return idx > (idxMax-1) ? (idxMax-1) : (idx < 0 ? 0 : idx); } __device__ int flatten(int col, int row, int width, int height) { return idxClip(col, width) + idxClip(row, height)*width; } __global__ void sharpenKernel(float4 *d_out, const float4 *d_in,const float *d_filter, int w, int h) { const int c = threadIdx.x + blockDim.x * blockIdx.x; const int r = threadIdx.y + blockDim.y * blockIdx.y; if ((c >= w) || (r >= h)) return; const int i = flatten(c, r, w, h); const int fltSz = 2*RAD + 1; float rgb[3] = {0.f, 0.f, 0.f}; for (int rd = -RAD; rd <= RAD; ++rd) { for (int cd = -RAD; cd <= RAD; ++cd) { int imgIdx = flatten(c + cd, r + rd, w, h); int fltIdx = flatten(RAD + cd, RAD + rd, fltSz, fltSz); float4 color = d_in[imgIdx]; float weight = d_filter[fltIdx]; rgb[0] += weight*color.x; rgb[1] += weight*color.y; rgb[2] += weight*color.z; } } d_out[i].x = clip(rgb[0]); d_out[i].y = clip(rgb[1]); d_out[i].z = clip(rgb[2]); } void sharpenParallel(float4 *arr, int w, int h) { const int fltSz = 2 * RAD + 1; const float filter[9] = {-0.5, 1.0, 0.5, 1.0, -4.0, 1.0, 0.5, 1.0, -0.5}; float4 *d_in = 0, *d_out = 0; float *d_filter = 0; hipMalloc(&d_in, w*h*sizeof(float4)); hipMemcpy(d_in, arr, w*h*sizeof(float4), hipMemcpyHostToDevice); hipMalloc(&d_out, w*h*sizeof(float4)); hipMalloc(&d_filter, fltSz*fltSz*sizeof(float)); hipMemcpy(d_filter, filter, fltSz*fltSz*sizeof(float),hipMemcpyHostToDevice); const dim3 blockSize(TX, TY); const dim3 gridSize(divUp(w, blockSize.x), divUp(h, blockSize.y)); hipLaunchKernelGGL(( sharpenKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_out, d_in, d_filter, w, h); hipMemcpy(arr, d_out, w*h*sizeof(float4), hipMemcpyDeviceToHost); hipFree(d_in); hipFree(d_out); hipFree(d_filter); }
ba2cdb590447e8f153951b043159aacf4547016f.cu
#include "kernel.h" #define TX 32 #define TY 32 #define RAD 1 int divUp(int a, int b) { return (a + b - 1) / b; } __device__ float clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); } __device__ int idxClip(int idx, int idxMax) { return idx > (idxMax-1) ? (idxMax-1) : (idx < 0 ? 0 : idx); } __device__ int flatten(int col, int row, int width, int height) { return idxClip(col, width) + idxClip(row, height)*width; } __global__ void sharpenKernel(float4 *d_out, const float4 *d_in,const float *d_filter, int w, int h) { const int c = threadIdx.x + blockDim.x * blockIdx.x; const int r = threadIdx.y + blockDim.y * blockIdx.y; if ((c >= w) || (r >= h)) return; const int i = flatten(c, r, w, h); const int fltSz = 2*RAD + 1; float rgb[3] = {0.f, 0.f, 0.f}; for (int rd = -RAD; rd <= RAD; ++rd) { for (int cd = -RAD; cd <= RAD; ++cd) { int imgIdx = flatten(c + cd, r + rd, w, h); int fltIdx = flatten(RAD + cd, RAD + rd, fltSz, fltSz); float4 color = d_in[imgIdx]; float weight = d_filter[fltIdx]; rgb[0] += weight*color.x; rgb[1] += weight*color.y; rgb[2] += weight*color.z; } } d_out[i].x = clip(rgb[0]); d_out[i].y = clip(rgb[1]); d_out[i].z = clip(rgb[2]); } void sharpenParallel(float4 *arr, int w, int h) { const int fltSz = 2 * RAD + 1; const float filter[9] = {-0.5, 1.0, 0.5, 1.0, -4.0, 1.0, 0.5, 1.0, -0.5}; float4 *d_in = 0, *d_out = 0; float *d_filter = 0; cudaMalloc(&d_in, w*h*sizeof(float4)); cudaMemcpy(d_in, arr, w*h*sizeof(float4), cudaMemcpyHostToDevice); cudaMalloc(&d_out, w*h*sizeof(float4)); cudaMalloc(&d_filter, fltSz*fltSz*sizeof(float)); cudaMemcpy(d_filter, filter, fltSz*fltSz*sizeof(float),cudaMemcpyHostToDevice); const dim3 blockSize(TX, TY); const dim3 gridSize(divUp(w, blockSize.x), divUp(h, blockSize.y)); sharpenKernel<<<gridSize, blockSize>>>(d_out, d_in, d_filter, w, h); cudaMemcpy(arr, d_out, w*h*sizeof(float4), cudaMemcpyDeviceToHost); cudaFree(d_in); cudaFree(d_out); cudaFree(d_filter); }
d018471241bd4c918572ea768e8e21e79dce8388.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <assert.h> #include <vector> #include <iostream> #include "yololayer.h" #include "cuda_utils.h" namespace Tn { template<typename T> void write(char*& buffer, const T& val) { *reinterpret_cast<T*>(buffer) = val; buffer += sizeof(T); } template<typename T> void read(const char*& buffer, T& val) { val = *reinterpret_cast<const T*>(buffer); buffer += sizeof(T); } } using namespace Yolo; namespace nvinfer1 { YoloLayerPlugin::YoloLayerPlugin(int classCount, int netWidth, int netHeight, int maxOut, const std::vector<Yolo::YoloKernel> &vYoloKernel) { mClassCount = classCount; mYoloV5NetWidth = netWidth; mYoloV5NetHeight = netHeight; mMaxOutObject = maxOut; mYoloKernel = vYoloKernel; mKernelCount = vYoloKernel.size(); CUDA_CHECK(hipHostMalloc(&mAnchor, mKernelCount * sizeof(void*))); size_t AnchorLen = sizeof(float)* CHECK_COUNT * 2; for (int ii = 0; ii < mKernelCount; ii++) { CUDA_CHECK(hipMalloc(&mAnchor[ii], AnchorLen)); const auto& yolo = mYoloKernel[ii]; CUDA_CHECK(hipMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, hipMemcpyHostToDevice)); } } YoloLayerPlugin::~YoloLayerPlugin() { for (int ii = 0; ii < mKernelCount; ii++) { CUDA_CHECK(hipFree(mAnchor[ii])); } CUDA_CHECK(hipHostFree(mAnchor)); } // create the plugin at runtime from a byte stream YoloLayerPlugin::YoloLayerPlugin(const void* data, size_t length) { using namespace Tn; const char *d = reinterpret_cast<const char *>(data), *a = d; read(d, mClassCount); read(d, mThreadCount); read(d, mKernelCount); read(d, mYoloV5NetWidth); read(d, mYoloV5NetHeight); read(d, mMaxOutObject); mYoloKernel.resize(mKernelCount); auto kernelSize = mKernelCount * sizeof(YoloKernel); memcpy(mYoloKernel.data(), d, kernelSize); d += kernelSize; CUDA_CHECK(hipHostMalloc(&mAnchor, mKernelCount * sizeof(void*))); size_t AnchorLen = sizeof(float)* CHECK_COUNT * 2; for (int ii = 0; ii < mKernelCount; ii++) { CUDA_CHECK(hipMalloc(&mAnchor[ii], AnchorLen)); const auto& yolo = mYoloKernel[ii]; CUDA_CHECK(hipMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, hipMemcpyHostToDevice)); } assert(d == a + length); } void YoloLayerPlugin::serialize(void* buffer) const TRT_NOEXCEPT { using namespace Tn; char* d = static_cast<char*>(buffer), *a = d; write(d, mClassCount); write(d, mThreadCount); write(d, mKernelCount); write(d, mYoloV5NetWidth); write(d, mYoloV5NetHeight); write(d, mMaxOutObject); auto kernelSize = mKernelCount * sizeof(YoloKernel); memcpy(d, mYoloKernel.data(), kernelSize); d += kernelSize; assert(d == a + getSerializationSize()); } size_t YoloLayerPlugin::getSerializationSize() const TRT_NOEXCEPT { return sizeof(mClassCount) + sizeof(mThreadCount) + sizeof(mKernelCount) + sizeof(Yolo::YoloKernel) * mYoloKernel.size() + sizeof(mYoloV5NetWidth) + sizeof(mYoloV5NetHeight) + sizeof(mMaxOutObject); } int YoloLayerPlugin::initialize() TRT_NOEXCEPT { return 0; } Dims YoloLayerPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims) TRT_NOEXCEPT { assert(index < 2); //output the result to channel if (index == 0) { return Dims3(mMaxOutObject, 1, 4); } return DimsHW(mMaxOutObject, mClassCount); } // Set plugin namespace void YoloLayerPlugin::setPluginNamespace(const char* pluginNamespace) TRT_NOEXCEPT { mPluginNamespace = pluginNamespace; } const char* YoloLayerPlugin::getPluginNamespace() const TRT_NOEXCEPT { return mPluginNamespace; } // Return the DataType of the plugin output at the requested index DataType YoloLayerPlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const TRT_NOEXCEPT { return DataType::kFLOAT; } // Return true if output tensor is broadcast across a batch. bool YoloLayerPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const TRT_NOEXCEPT { return false; } // Return true if plugin can use input that is broadcast across batch without replication. bool YoloLayerPlugin::canBroadcastInputAcrossBatch(int inputIndex) const TRT_NOEXCEPT { return false; } void YoloLayerPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput) TRT_NOEXCEPT { } // Attach the plugin object to an execution context and grant the plugin the access to some context resource. void YoloLayerPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator) TRT_NOEXCEPT { } // Detach the plugin object from its execution context. void YoloLayerPlugin::detachFromContext() TRT_NOEXCEPT {} const char* YoloLayerPlugin::getPluginType() const TRT_NOEXCEPT { return "YoloLayer_TRT"; } const char* YoloLayerPlugin::getPluginVersion() const TRT_NOEXCEPT { return "1"; } void YoloLayerPlugin::destroy() TRT_NOEXCEPT { delete this; } // Clone the plugin IPluginV2IOExt* YoloLayerPlugin::clone() const TRT_NOEXCEPT { YoloLayerPlugin* p = new YoloLayerPlugin(mClassCount, mYoloV5NetWidth, mYoloV5NetHeight, mMaxOutObject, mYoloKernel); p->setPluginNamespace(mPluginNamespace); return p; } __device__ float Logist(float data) { return 1.0f / (1.0f + expf(-data)); }; __global__ void CalDetection(const float *input, float *bboxData, float *scoreData, int *countData, int noElements, const int netwidth, const int netheight, int maxoutobject, int yoloWidth, int yoloHeight, const float anchors[CHECK_COUNT * 2], int classes) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx >= noElements) return; int total_grid = yoloWidth * yoloHeight; int bnIdx = idx / total_grid; idx = idx - total_grid * bnIdx; int info_len_i = 5 + classes; // 85 const float* curInput = input + bnIdx * (info_len_i * total_grid * CHECK_COUNT); // b*h*w*3*85 for (int k = 0; k < CHECK_COUNT; ++k) { float box_prob = Logist(curInput[idx + k * info_len_i * total_grid + 4 * total_grid]); if (box_prob < IGNORE_THRESH) continue; int *res_count = countData + bnIdx; int count = (int)atomicAdd(res_count, 1); if (count >= maxoutobject) return; float *curBbox = bboxData + bnIdx * maxoutobject * 4 + count * 4; float *curScore = scoreData + bnIdx * maxoutobject * classes + count * classes; for (int i = 5; i < info_len_i; ++i) { float p = Logist(curInput[idx + k * info_len_i * total_grid + i * total_grid]); curScore[i - 5] = p * box_prob; } int row = idx / yoloWidth; int col = idx % yoloWidth; //Location // pytorch: // y = x[i].sigmoid() // y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i].to(x[i].device)) * self.stride[i] # xy // y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh // X: (sigmoid(tx) + cx)/FeaturemapW * netwidth float cx = (col - 0.5f + 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 0 * total_grid])) * netwidth / yoloWidth; float cy = (row - 0.5f + 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 1 * total_grid])) * netheight / yoloHeight; // W: (Pw * e^tw) / FeaturemapW * netwidth // v5: https://github.com/ultralytics/yolov5/issues/471 float w = 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 2 * total_grid]); w = w * w * anchors[2 * k]; float h = 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 3 * total_grid]); h = h * h * anchors[2 * k + 1]; // cx,cy,w,h to x1,y1,x2,y2 curBbox[0] = cx - 0.5 * w; curBbox[1] = cy - 0.5 * h; curBbox[2] = cx + 0.5 * w; curBbox[3] = cy + 0.5 * h; } } void YoloLayerPlugin::forwardGpu(const float* const* inputs, void** outputs, void* workspace, hipStream_t stream, int batchSize) { float *bboxData = (float *)outputs[0]; float *scoreData = (float *)outputs[1]; int *countData = (int *)workspace; CUDA_CHECK(hipMemset(countData, 0, sizeof(int) * batchSize)); CUDA_CHECK(hipMemset(bboxData, 0, sizeof(float) * mMaxOutObject * 4 * batchSize)); CUDA_CHECK(hipMemset(scoreData, 0, sizeof(float) * mMaxOutObject * mClassCount * batchSize)); int numElem = 0; for (unsigned int i = 0; i < mYoloKernel.size(); ++i){ const auto& yolo = mYoloKernel[i]; numElem = yolo.width * yolo.height * batchSize; if (numElem < mThreadCount) mThreadCount = numElem; hipLaunchKernelGGL(( CalDetection), dim3((numElem + mThreadCount - 1) / mThreadCount), dim3(mThreadCount), 0, 0, inputs[i], bboxData, scoreData, countData, numElem, mYoloV5NetWidth, mYoloV5NetHeight, mMaxOutObject, yolo.width, yolo.height, (float*)mAnchor[i], mClassCount); } } int YoloLayerPlugin::enqueue(int batchSize, const void *const *inputs, void ** outputs, void* workspace, hipStream_t stream) TRT_NOEXCEPT { forwardGpu((const float* const*)inputs, outputs, workspace, stream, batchSize); return 0; } PluginFieldCollection YoloPluginCreator::mFC{}; std::vector<PluginField> YoloPluginCreator::mPluginAttributes; YoloPluginCreator::YoloPluginCreator() { mPluginAttributes.clear(); mFC.nbFields = mPluginAttributes.size(); mFC.fields = mPluginAttributes.data(); } const char* YoloPluginCreator::getPluginName() const TRT_NOEXCEPT { return "YoloLayer_TRT"; } const char* YoloPluginCreator::getPluginVersion() const TRT_NOEXCEPT { return "1"; } const PluginFieldCollection* YoloPluginCreator::getFieldNames() TRT_NOEXCEPT { return &mFC; } IPluginV2IOExt* YoloPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc) TRT_NOEXCEPT { assert(fc->nbFields == 2); assert(strcmp(fc->fields[0].name, "netinfo") == 0); assert(strcmp(fc->fields[1].name, "kernels") == 0); int *p_netinfo = (int*)(fc->fields[0].data); int class_count = p_netinfo[0]; int input_w = p_netinfo[1]; int input_h = p_netinfo[2]; int max_output_object_count = p_netinfo[3]; std::vector<Yolo::YoloKernel> kernels(fc->fields[1].length); memcpy(&kernels[0], fc->fields[1].data, kernels.size() * sizeof(Yolo::YoloKernel)); YoloLayerPlugin* obj = new YoloLayerPlugin(class_count, input_w, input_h, max_output_object_count, kernels); obj->setPluginNamespace(mNamespace.c_str()); return obj; } IPluginV2IOExt* YoloPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength) TRT_NOEXCEPT { // This object will be deleted when the network is destroyed, which will // call YoloLayerPlugin::destroy() YoloLayerPlugin* obj = new YoloLayerPlugin(serialData, serialLength); obj->setPluginNamespace(mNamespace.c_str()); return obj; } }
d018471241bd4c918572ea768e8e21e79dce8388.cu
#include <assert.h> #include <vector> #include <iostream> #include "yololayer.h" #include "cuda_utils.h" namespace Tn { template<typename T> void write(char*& buffer, const T& val) { *reinterpret_cast<T*>(buffer) = val; buffer += sizeof(T); } template<typename T> void read(const char*& buffer, T& val) { val = *reinterpret_cast<const T*>(buffer); buffer += sizeof(T); } } using namespace Yolo; namespace nvinfer1 { YoloLayerPlugin::YoloLayerPlugin(int classCount, int netWidth, int netHeight, int maxOut, const std::vector<Yolo::YoloKernel> &vYoloKernel) { mClassCount = classCount; mYoloV5NetWidth = netWidth; mYoloV5NetHeight = netHeight; mMaxOutObject = maxOut; mYoloKernel = vYoloKernel; mKernelCount = vYoloKernel.size(); CUDA_CHECK(cudaMallocHost(&mAnchor, mKernelCount * sizeof(void*))); size_t AnchorLen = sizeof(float)* CHECK_COUNT * 2; for (int ii = 0; ii < mKernelCount; ii++) { CUDA_CHECK(cudaMalloc(&mAnchor[ii], AnchorLen)); const auto& yolo = mYoloKernel[ii]; CUDA_CHECK(cudaMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, cudaMemcpyHostToDevice)); } } YoloLayerPlugin::~YoloLayerPlugin() { for (int ii = 0; ii < mKernelCount; ii++) { CUDA_CHECK(cudaFree(mAnchor[ii])); } CUDA_CHECK(cudaFreeHost(mAnchor)); } // create the plugin at runtime from a byte stream YoloLayerPlugin::YoloLayerPlugin(const void* data, size_t length) { using namespace Tn; const char *d = reinterpret_cast<const char *>(data), *a = d; read(d, mClassCount); read(d, mThreadCount); read(d, mKernelCount); read(d, mYoloV5NetWidth); read(d, mYoloV5NetHeight); read(d, mMaxOutObject); mYoloKernel.resize(mKernelCount); auto kernelSize = mKernelCount * sizeof(YoloKernel); memcpy(mYoloKernel.data(), d, kernelSize); d += kernelSize; CUDA_CHECK(cudaMallocHost(&mAnchor, mKernelCount * sizeof(void*))); size_t AnchorLen = sizeof(float)* CHECK_COUNT * 2; for (int ii = 0; ii < mKernelCount; ii++) { CUDA_CHECK(cudaMalloc(&mAnchor[ii], AnchorLen)); const auto& yolo = mYoloKernel[ii]; CUDA_CHECK(cudaMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, cudaMemcpyHostToDevice)); } assert(d == a + length); } void YoloLayerPlugin::serialize(void* buffer) const TRT_NOEXCEPT { using namespace Tn; char* d = static_cast<char*>(buffer), *a = d; write(d, mClassCount); write(d, mThreadCount); write(d, mKernelCount); write(d, mYoloV5NetWidth); write(d, mYoloV5NetHeight); write(d, mMaxOutObject); auto kernelSize = mKernelCount * sizeof(YoloKernel); memcpy(d, mYoloKernel.data(), kernelSize); d += kernelSize; assert(d == a + getSerializationSize()); } size_t YoloLayerPlugin::getSerializationSize() const TRT_NOEXCEPT { return sizeof(mClassCount) + sizeof(mThreadCount) + sizeof(mKernelCount) + sizeof(Yolo::YoloKernel) * mYoloKernel.size() + sizeof(mYoloV5NetWidth) + sizeof(mYoloV5NetHeight) + sizeof(mMaxOutObject); } int YoloLayerPlugin::initialize() TRT_NOEXCEPT { return 0; } Dims YoloLayerPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims) TRT_NOEXCEPT { assert(index < 2); //output the result to channel if (index == 0) { return Dims3(mMaxOutObject, 1, 4); } return DimsHW(mMaxOutObject, mClassCount); } // Set plugin namespace void YoloLayerPlugin::setPluginNamespace(const char* pluginNamespace) TRT_NOEXCEPT { mPluginNamespace = pluginNamespace; } const char* YoloLayerPlugin::getPluginNamespace() const TRT_NOEXCEPT { return mPluginNamespace; } // Return the DataType of the plugin output at the requested index DataType YoloLayerPlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const TRT_NOEXCEPT { return DataType::kFLOAT; } // Return true if output tensor is broadcast across a batch. bool YoloLayerPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const TRT_NOEXCEPT { return false; } // Return true if plugin can use input that is broadcast across batch without replication. bool YoloLayerPlugin::canBroadcastInputAcrossBatch(int inputIndex) const TRT_NOEXCEPT { return false; } void YoloLayerPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput) TRT_NOEXCEPT { } // Attach the plugin object to an execution context and grant the plugin the access to some context resource. void YoloLayerPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator) TRT_NOEXCEPT { } // Detach the plugin object from its execution context. void YoloLayerPlugin::detachFromContext() TRT_NOEXCEPT {} const char* YoloLayerPlugin::getPluginType() const TRT_NOEXCEPT { return "YoloLayer_TRT"; } const char* YoloLayerPlugin::getPluginVersion() const TRT_NOEXCEPT { return "1"; } void YoloLayerPlugin::destroy() TRT_NOEXCEPT { delete this; } // Clone the plugin IPluginV2IOExt* YoloLayerPlugin::clone() const TRT_NOEXCEPT { YoloLayerPlugin* p = new YoloLayerPlugin(mClassCount, mYoloV5NetWidth, mYoloV5NetHeight, mMaxOutObject, mYoloKernel); p->setPluginNamespace(mPluginNamespace); return p; } __device__ float Logist(float data) { return 1.0f / (1.0f + expf(-data)); }; __global__ void CalDetection(const float *input, float *bboxData, float *scoreData, int *countData, int noElements, const int netwidth, const int netheight, int maxoutobject, int yoloWidth, int yoloHeight, const float anchors[CHECK_COUNT * 2], int classes) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx >= noElements) return; int total_grid = yoloWidth * yoloHeight; int bnIdx = idx / total_grid; idx = idx - total_grid * bnIdx; int info_len_i = 5 + classes; // 85 const float* curInput = input + bnIdx * (info_len_i * total_grid * CHECK_COUNT); // b*h*w*3*85 for (int k = 0; k < CHECK_COUNT; ++k) { float box_prob = Logist(curInput[idx + k * info_len_i * total_grid + 4 * total_grid]); if (box_prob < IGNORE_THRESH) continue; int *res_count = countData + bnIdx; int count = (int)atomicAdd(res_count, 1); if (count >= maxoutobject) return; float *curBbox = bboxData + bnIdx * maxoutobject * 4 + count * 4; float *curScore = scoreData + bnIdx * maxoutobject * classes + count * classes; for (int i = 5; i < info_len_i; ++i) { float p = Logist(curInput[idx + k * info_len_i * total_grid + i * total_grid]); curScore[i - 5] = p * box_prob; } int row = idx / yoloWidth; int col = idx % yoloWidth; //Location // pytorch: // y = x[i].sigmoid() // y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i].to(x[i].device)) * self.stride[i] # xy // y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh // X: (sigmoid(tx) + cx)/FeaturemapW * netwidth float cx = (col - 0.5f + 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 0 * total_grid])) * netwidth / yoloWidth; float cy = (row - 0.5f + 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 1 * total_grid])) * netheight / yoloHeight; // W: (Pw * e^tw) / FeaturemapW * netwidth // v5: https://github.com/ultralytics/yolov5/issues/471 float w = 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 2 * total_grid]); w = w * w * anchors[2 * k]; float h = 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 3 * total_grid]); h = h * h * anchors[2 * k + 1]; // cx,cy,w,h to x1,y1,x2,y2 curBbox[0] = cx - 0.5 * w; curBbox[1] = cy - 0.5 * h; curBbox[2] = cx + 0.5 * w; curBbox[3] = cy + 0.5 * h; } } void YoloLayerPlugin::forwardGpu(const float* const* inputs, void** outputs, void* workspace, cudaStream_t stream, int batchSize) { float *bboxData = (float *)outputs[0]; float *scoreData = (float *)outputs[1]; int *countData = (int *)workspace; CUDA_CHECK(cudaMemset(countData, 0, sizeof(int) * batchSize)); CUDA_CHECK(cudaMemset(bboxData, 0, sizeof(float) * mMaxOutObject * 4 * batchSize)); CUDA_CHECK(cudaMemset(scoreData, 0, sizeof(float) * mMaxOutObject * mClassCount * batchSize)); int numElem = 0; for (unsigned int i = 0; i < mYoloKernel.size(); ++i){ const auto& yolo = mYoloKernel[i]; numElem = yolo.width * yolo.height * batchSize; if (numElem < mThreadCount) mThreadCount = numElem; CalDetection<<< (numElem + mThreadCount - 1) / mThreadCount, mThreadCount>>> (inputs[i], bboxData, scoreData, countData, numElem, mYoloV5NetWidth, mYoloV5NetHeight, mMaxOutObject, yolo.width, yolo.height, (float*)mAnchor[i], mClassCount); } } int YoloLayerPlugin::enqueue(int batchSize, const void *const *inputs, void ** outputs, void* workspace, cudaStream_t stream) TRT_NOEXCEPT { forwardGpu((const float* const*)inputs, outputs, workspace, stream, batchSize); return 0; } PluginFieldCollection YoloPluginCreator::mFC{}; std::vector<PluginField> YoloPluginCreator::mPluginAttributes; YoloPluginCreator::YoloPluginCreator() { mPluginAttributes.clear(); mFC.nbFields = mPluginAttributes.size(); mFC.fields = mPluginAttributes.data(); } const char* YoloPluginCreator::getPluginName() const TRT_NOEXCEPT { return "YoloLayer_TRT"; } const char* YoloPluginCreator::getPluginVersion() const TRT_NOEXCEPT { return "1"; } const PluginFieldCollection* YoloPluginCreator::getFieldNames() TRT_NOEXCEPT { return &mFC; } IPluginV2IOExt* YoloPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc) TRT_NOEXCEPT { assert(fc->nbFields == 2); assert(strcmp(fc->fields[0].name, "netinfo") == 0); assert(strcmp(fc->fields[1].name, "kernels") == 0); int *p_netinfo = (int*)(fc->fields[0].data); int class_count = p_netinfo[0]; int input_w = p_netinfo[1]; int input_h = p_netinfo[2]; int max_output_object_count = p_netinfo[3]; std::vector<Yolo::YoloKernel> kernels(fc->fields[1].length); memcpy(&kernels[0], fc->fields[1].data, kernels.size() * sizeof(Yolo::YoloKernel)); YoloLayerPlugin* obj = new YoloLayerPlugin(class_count, input_w, input_h, max_output_object_count, kernels); obj->setPluginNamespace(mNamespace.c_str()); return obj; } IPluginV2IOExt* YoloPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength) TRT_NOEXCEPT { // This object will be deleted when the network is destroyed, which will // call YoloLayerPlugin::destroy() YoloLayerPlugin* obj = new YoloLayerPlugin(serialData, serialLength); obj->setPluginNamespace(mNamespace.c_str()); return obj; } }
1e16c70b1fcd1f4d29980b3071741ecc31f6745f.hip
// !!! This is a file automatically generated by hipify!!! #if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<32>; using LayoutFilter = cutlass::layout::TensorCxRSKx<32>; using LayoutDst = cutlass::layout::TensorNCxHWx<4>; using ThreadBlockShape = cutlass::gemm::GemmShape<128, 256, 64>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< int8_t, 4, int32_t, int32_t, float>; using Convolution = cutlass::convolution::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutDst, int32_t, LayoutDst, int32_t, cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle< cutlass::convolution::ConvType::kConvolution>, 2, 16, 16, true, cutlass::arch::OpMultiplyAddSaturate>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, hipStream_t stream); #pragma GCC diagnostic pop #endif
1e16c70b1fcd1f4d29980b3071741ecc31f6745f.cu
#if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<32>; using LayoutFilter = cutlass::layout::TensorCxRSKx<32>; using LayoutDst = cutlass::layout::TensorNCxHWx<4>; using ThreadBlockShape = cutlass::gemm::GemmShape<128, 256, 64>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< int8_t, 4, int32_t, int32_t, float>; using Convolution = cutlass::convolution::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutDst, int32_t, LayoutDst, int32_t, cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle< cutlass::convolution::ConvType::kConvolution>, 2, 16, 16, true, cutlass::arch::OpMultiplyAddSaturate>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, cudaStream_t stream); #pragma GCC diagnostic pop #endif
d4bb071f93a8c3c41c570c51510660156d9d8a41.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/pooling_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void MaxPoolForward(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const top_data, int* mask, Dtype* top_mask) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; const int hend = min(hstart + kernel_h, height); const int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); Dtype maxval = -FLT_MAX; int maxidx = -1; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { if (bottom_slice[h * width + w] > maxval) { maxidx = h * width + w; maxval = bottom_slice[maxidx]; } } } top_data[index] = maxval; if (mask) { mask[index] = maxidx; } else { top_mask[index] = maxidx; } } } template <typename Dtype> __global__ void AvePoolForward(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); const int pool_size = (hend - hstart) * (wend - wstart); hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, height); wend = min(wend, width); Dtype aveval = 0; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { aveval += bottom_slice[h * width + w]; } } top_data[index] = aveval / pool_size; } } template <typename Dtype> __global__ void StoPoolForwardTrain(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, Dtype* const rand_idx, Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; const int hstart = ph * stride_h; const int hend = min(hstart + kernel_h, height); const int wstart = pw * stride_w; const int wend = min(wstart + kernel_w, width); Dtype cumsum = 0.; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; // First pass: get sum for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { cumsum += bottom_slice[h * width + w]; } } const float thres = rand_idx[index] * cumsum; // Second pass: get value, and set index. cumsum = 0; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { cumsum += bottom_slice[h * width + w]; if (cumsum >= thres) { rand_idx[index] = ((n * channels + c) * height + h) * width + w; top_data[index] = bottom_slice[h * width + w]; return; } } } } } template <typename Dtype> __global__ void StoPoolForwardTest(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; const int hstart = ph * stride_h; const int hend = min(hstart + kernel_h, height); const int wstart = pw * stride_w; const int wend = min(wstart + kernel_w, width); // We set cumsum to be 0 to avoid divide-by-zero problems Dtype cumsum = 0.; Dtype cumvalues = 0.; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; // First pass: get sum for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { cumsum += bottom_slice[h * width + w]; cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w]; } } top_data[index] = (cumsum > 0.) ? cumvalues / cumsum : 0.; } } template <typename Dtype> void PoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int count = top[0]->count(); // We'll output the mask to top[1] if it's of size >1. const bool use_top_mask = top.size() > 1; int* mask = NULL; Dtype* top_mask = NULL; switch (this->layer_param_.pooling_param().pool()) { case PoolingParameter_PoolMethod_MAX: if (use_top_mask) { top_mask = top[1]->mutable_gpu_data(); } else { mask = max_idx_.mutable_gpu_data(); } // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( MaxPoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data, mask, top_mask); break; case PoolingParameter_PoolMethod_AVE: // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( AvePoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data); break; case PoolingParameter_PoolMethod_STOCHASTIC: if (this->phase_ == TRAIN) { // We need to create the random index as well. caffe_gpu_rng_uniform(count, Dtype(0), Dtype(1), rand_idx_.mutable_gpu_data()); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( StoPoolForwardTrain<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, rand_idx_.mutable_gpu_data(), top_data); } else { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( StoPoolForwardTest<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, top_data); } break; default: LOG(FATAL) << "Unknown pooling method."; } CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void MaxPoolBackward(const int nthreads, const Dtype* const top_diff, const int* const mask, const Dtype* const top_mask, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width; const int h = (index / width) % height; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; const int phend = min((h + pad_h) / stride_h + 1, pooled_height); const int pwstart = (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; const int pwend = min((w + pad_w) / stride_w + 1, pooled_width); Dtype gradient = 0; const int offset = (n * channels + c) * pooled_height * pooled_width; const Dtype* const top_diff_slice = top_diff + offset; if (mask) { const int* const mask_slice = mask + offset; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (mask_slice[ph * pooled_width + pw] == h * width + w) { gradient += top_diff_slice[ph * pooled_width + pw]; } } } } else { const Dtype* const top_mask_slice = top_mask + offset; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (top_mask_slice[ph * pooled_width + pw] == h * width + w) { gradient += top_diff_slice[ph * pooled_width + pw]; } } } } bottom_diff[index] = gradient; } } template <typename Dtype> __global__ void AvePoolBackward(const int nthreads, const Dtype* const top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width + pad_w; const int h = (index / width) % height + pad_h; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); Dtype gradient = 0; const Dtype* const top_diff_slice = top_diff + (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); int pool_size = (hend - hstart) * (wend - wstart); gradient += top_diff_slice[ph * pooled_width + pw] / pool_size; } } bottom_diff[index] = gradient; } } template <typename Dtype> __global__ void StoPoolBackward(const int nthreads, const Dtype* const rand_idx, const Dtype* const top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width; const int h = (index / width) % height; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); Dtype gradient = 0; const Dtype* const rand_idx_slice = rand_idx + (n * channels + c) * pooled_height * pooled_width; const Dtype* const top_diff_slice = top_diff + (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { gradient += top_diff_slice[ph * pooled_width + pw] * (index == static_cast<int>(rand_idx_slice[ph * pooled_width + pw])); } } bottom_diff[index] = gradient; } } template <typename Dtype> void PoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (!propagate_down[0]) { return; } const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); caffe_gpu_set(count, Dtype(0.), bottom_diff); // We'll output the mask to top[1] if it's of size >1. const bool use_top_mask = top.size() > 1; const int* mask = NULL; const Dtype* top_mask = NULL; switch (this->layer_param_.pooling_param().pool()) { case PoolingParameter_PoolMethod_MAX: if (use_top_mask) { top_mask = top[1]->gpu_data(); } else { mask = max_idx_.gpu_data(); } // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( MaxPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, mask, top_mask, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff); break; case PoolingParameter_PoolMethod_AVE: // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( AvePoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff); break; case PoolingParameter_PoolMethod_STOCHASTIC: // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( StoPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, rand_idx_.gpu_data(), top_diff, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, bottom_diff); break; default: LOG(FATAL) << "Unknown pooling method."; } CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(PoolingLayer); } // namespace caffe
d4bb071f93a8c3c41c570c51510660156d9d8a41.cu
#include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/pooling_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void MaxPoolForward(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const top_data, int* mask, Dtype* top_mask) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; const int hend = min(hstart + kernel_h, height); const int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); Dtype maxval = -FLT_MAX; int maxidx = -1; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { if (bottom_slice[h * width + w] > maxval) { maxidx = h * width + w; maxval = bottom_slice[maxidx]; } } } top_data[index] = maxval; if (mask) { mask[index] = maxidx; } else { top_mask[index] = maxidx; } } } template <typename Dtype> __global__ void AvePoolForward(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); const int pool_size = (hend - hstart) * (wend - wstart); hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, height); wend = min(wend, width); Dtype aveval = 0; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { aveval += bottom_slice[h * width + w]; } } top_data[index] = aveval / pool_size; } } template <typename Dtype> __global__ void StoPoolForwardTrain(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, Dtype* const rand_idx, Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; const int hstart = ph * stride_h; const int hend = min(hstart + kernel_h, height); const int wstart = pw * stride_w; const int wend = min(wstart + kernel_w, width); Dtype cumsum = 0.; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; // First pass: get sum for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { cumsum += bottom_slice[h * width + w]; } } const float thres = rand_idx[index] * cumsum; // Second pass: get value, and set index. cumsum = 0; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { cumsum += bottom_slice[h * width + w]; if (cumsum >= thres) { rand_idx[index] = ((n * channels + c) * height + h) * width + w; top_data[index] = bottom_slice[h * width + w]; return; } } } } } template <typename Dtype> __global__ void StoPoolForwardTest(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; const int hstart = ph * stride_h; const int hend = min(hstart + kernel_h, height); const int wstart = pw * stride_w; const int wend = min(wstart + kernel_w, width); // We set cumsum to be 0 to avoid divide-by-zero problems Dtype cumsum = 0.; Dtype cumvalues = 0.; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; // First pass: get sum for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { cumsum += bottom_slice[h * width + w]; cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w]; } } top_data[index] = (cumsum > 0.) ? cumvalues / cumsum : 0.; } } template <typename Dtype> void PoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int count = top[0]->count(); // We'll output the mask to top[1] if it's of size >1. const bool use_top_mask = top.size() > 1; int* mask = NULL; Dtype* top_mask = NULL; switch (this->layer_param_.pooling_param().pool()) { case PoolingParameter_PoolMethod_MAX: if (use_top_mask) { top_mask = top[1]->mutable_gpu_data(); } else { mask = max_idx_.mutable_gpu_data(); } // NOLINT_NEXT_LINE(whitespace/operators) MaxPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data, mask, top_mask); break; case PoolingParameter_PoolMethod_AVE: // NOLINT_NEXT_LINE(whitespace/operators) AvePoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data); break; case PoolingParameter_PoolMethod_STOCHASTIC: if (this->phase_ == TRAIN) { // We need to create the random index as well. caffe_gpu_rng_uniform(count, Dtype(0), Dtype(1), rand_idx_.mutable_gpu_data()); // NOLINT_NEXT_LINE(whitespace/operators) StoPoolForwardTrain<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, rand_idx_.mutable_gpu_data(), top_data); } else { // NOLINT_NEXT_LINE(whitespace/operators) StoPoolForwardTest<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, top_data); } break; default: LOG(FATAL) << "Unknown pooling method."; } CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void MaxPoolBackward(const int nthreads, const Dtype* const top_diff, const int* const mask, const Dtype* const top_mask, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width; const int h = (index / width) % height; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; const int phend = min((h + pad_h) / stride_h + 1, pooled_height); const int pwstart = (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; const int pwend = min((w + pad_w) / stride_w + 1, pooled_width); Dtype gradient = 0; const int offset = (n * channels + c) * pooled_height * pooled_width; const Dtype* const top_diff_slice = top_diff + offset; if (mask) { const int* const mask_slice = mask + offset; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (mask_slice[ph * pooled_width + pw] == h * width + w) { gradient += top_diff_slice[ph * pooled_width + pw]; } } } } else { const Dtype* const top_mask_slice = top_mask + offset; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (top_mask_slice[ph * pooled_width + pw] == h * width + w) { gradient += top_diff_slice[ph * pooled_width + pw]; } } } } bottom_diff[index] = gradient; } } template <typename Dtype> __global__ void AvePoolBackward(const int nthreads, const Dtype* const top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width + pad_w; const int h = (index / width) % height + pad_h; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); Dtype gradient = 0; const Dtype* const top_diff_slice = top_diff + (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); int pool_size = (hend - hstart) * (wend - wstart); gradient += top_diff_slice[ph * pooled_width + pw] / pool_size; } } bottom_diff[index] = gradient; } } template <typename Dtype> __global__ void StoPoolBackward(const int nthreads, const Dtype* const rand_idx, const Dtype* const top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width; const int h = (index / width) % height; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); Dtype gradient = 0; const Dtype* const rand_idx_slice = rand_idx + (n * channels + c) * pooled_height * pooled_width; const Dtype* const top_diff_slice = top_diff + (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { gradient += top_diff_slice[ph * pooled_width + pw] * (index == static_cast<int>(rand_idx_slice[ph * pooled_width + pw])); } } bottom_diff[index] = gradient; } } template <typename Dtype> void PoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (!propagate_down[0]) { return; } const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); caffe_gpu_set(count, Dtype(0.), bottom_diff); // We'll output the mask to top[1] if it's of size >1. const bool use_top_mask = top.size() > 1; const int* mask = NULL; const Dtype* top_mask = NULL; switch (this->layer_param_.pooling_param().pool()) { case PoolingParameter_PoolMethod_MAX: if (use_top_mask) { top_mask = top[1]->gpu_data(); } else { mask = max_idx_.gpu_data(); } // NOLINT_NEXT_LINE(whitespace/operators) MaxPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff, mask, top_mask, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff); break; case PoolingParameter_PoolMethod_AVE: // NOLINT_NEXT_LINE(whitespace/operators) AvePoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff); break; case PoolingParameter_PoolMethod_STOCHASTIC: // NOLINT_NEXT_LINE(whitespace/operators) StoPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, rand_idx_.gpu_data(), top_diff, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, bottom_diff); break; default: LOG(FATAL) << "Unknown pooling method."; } CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(PoolingLayer); } // namespace caffe
305e26350b91c9d91ddcfe6d9d9ea813eafafbf9.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ //#ifdef NVGRAPH_PARTITION #define _USE_MATH_DEFINES #include <math.h> #include "lanczos.hxx" #include <stdio.h> #include <time.h> #include <hip/hip_runtime.h> #define USE_CURAND 1 #ifdef USE_CURAND #include <hiprand/hiprand.h> #endif #include "nvgraph_error.hxx" #include "nvgraph_vector.hxx" #include "nvgraph_vector_kernels.hxx" #include "nvgraph_cublas.hxx" #include "nvgraph_lapack.hxx" #include "debug_macros.h" // ========================================================= // Useful macros // ========================================================= // Get index of matrix entry #define IDX(i,j,lda) ((i)+(j)*(lda)) // ========================================================= // Macros and functions for cuRAND // ========================================================= //#ifdef USE_CURAND //namespace { // // /// Get message string from cuRAND status code // //static // //const char* curandGetErrorString(hiprandStatus_t e) { // // switch(e) { // // case HIPRAND_STATUS_SUCCESS: // // return "HIPRAND_STATUS_SUCCESS"; // // case HIPRAND_STATUS_VERSION_MISMATCH: // // return "HIPRAND_STATUS_VERSION_MISMATCH"; // // case HIPRAND_STATUS_NOT_INITIALIZED: // // return "HIPRAND_STATUS_NOT_INITIALIZED"; // // case HIPRAND_STATUS_ALLOCATION_FAILED: // // return "HIPRAND_STATUS_ALLOCATION_FAILED"; // // case HIPRAND_STATUS_TYPE_ERROR: // // return "HIPRAND_STATUS_TYPE_ERROR"; // // case HIPRAND_STATUS_OUT_OF_RANGE: // // return "HIPRAND_STATUS_OUT_OF_RANGE"; // // case HIPRAND_STATUS_LENGTH_NOT_MULTIPLE: // // return "HIPRAND_STATUS_LENGTH_NOT_MULTIPLE"; // // case HIPRAND_STATUS_DOUBLE_PRECISION_REQUIRED: // // return "HIPRAND_STATUS_DOUBLE_PRECISION_REQUIRED"; // // case HIPRAND_STATUS_LAUNCH_FAILURE: // // return "HIPRAND_STATUS_LAUNCH_FAILURE"; // // case HIPRAND_STATUS_PREEXISTING_FAILURE: // // return "HIPRAND_STATUS_PREEXISTING_FAILURE"; // // case HIPRAND_STATUS_INITIALIZATION_FAILED: // // return "HIPRAND_STATUS_INITIALIZATION_FAILED"; // // case HIPRAND_STATUS_ARCH_MISMATCH: // // return "HIPRAND_STATUS_ARCH_MISMATCH"; // // case HIPRAND_STATUS_INTERNAL_ERROR: // // return "HIPRAND_STATUS_INTERNAL_ERROR"; // // default: // // return "unknown cuRAND error"; // // } // //} // // // curandGeneratorNormalX // inline static // hiprandStatus_t // curandGenerateNormalX(hiprandGenerator_t generator, // float * outputPtr, size_t n, // float mean, float stddev) { // return hiprandGenerateNormal(generator, outputPtr, n, mean, stddev); // } // inline static // hiprandStatus_t // curandGenerateNormalX(hiprandGenerator_t generator, // double * outputPtr, size_t n, // double mean, double stddev) { // return hiprandGenerateNormalDouble(generator, outputPtr, // n, mean, stddev); // } // //} //#endif namespace nvgraph { namespace { // ========================================================= // Helper functions // ========================================================= /// Perform Lanczos iteration /** Lanczos iteration is performed on a shifted matrix A+shift*I. * * @param A Matrix. * @param iter Pointer to current Lanczos iteration. On exit, the * variable is set equal to the final Lanczos iteration. * @param maxIter Maximum Lanczos iteration. This function will * perform a maximum of maxIter-*iter iterations. * @param shift Matrix shift. * @param tol Convergence tolerance. Lanczos iteration will * terminate when the residual norm (i.e. entry in beta_host) is * less than tol. * @param reorthogonalize Whether to reorthogonalize Lanczos * vectors. * @param alpha_host (Output, host memory, maxIter entries) * Diagonal entries of Lanczos system. * @param beta_host (Output, host memory, maxIter entries) * Off-diagonal entries of Lanczos system. * @param lanczosVecs_dev (Input/output, device memory, * n*(maxIter+1) entries) Lanczos vectors. Vectors are stored as * columns of a column-major matrix with dimensions * n x (maxIter+1). * @param work_dev (Output, device memory, maxIter entries) * Workspace. Not needed if full reorthogonalization is disabled. * @return Zero if successful. Otherwise non-zero. */ template <typename IndexType_, typename ValueType_> static int performLanczosIteration(const Matrix<IndexType_, ValueType_> * A, IndexType_ * iter, IndexType_ maxIter, ValueType_ shift, ValueType_ tol, bool reorthogonalize, ValueType_ * __restrict__ alpha_host, ValueType_ * __restrict__ beta_host, ValueType_ * __restrict__ lanczosVecs_dev, ValueType_ * __restrict__ work_dev) { // ------------------------------------------------------- // Variable declaration // ------------------------------------------------------- // Useful variables const ValueType_ one = 1; const ValueType_ negOne = -1; const ValueType_ zero = 0; IndexType_ n = A->n; // ------------------------------------------------------- // Compute second Lanczos vector // ------------------------------------------------------- if(*iter<=0) { *iter = 1; // Apply matrix if(shift != 0) CHECK_CUDA(hipMemcpyAsync(lanczosVecs_dev+n, lanczosVecs_dev, n*sizeof(ValueType_), hipMemcpyDeviceToDevice)); A->mv(1, lanczosVecs_dev, shift, lanczosVecs_dev+n); // Orthogonalize Lanczos vector Cublas::dot(n, lanczosVecs_dev, 1, lanczosVecs_dev+IDX(0,1,n), 1, alpha_host); Cublas::axpy(n, -alpha_host[0], lanczosVecs_dev, 1, lanczosVecs_dev+IDX(0,1,n), 1); beta_host[0] = Cublas::nrm2(n, lanczosVecs_dev+IDX(0,1,n), 1); // Check if Lanczos has converged if(beta_host[0] <= tol) return 0; // Normalize Lanczos vector Cublas::scal(n, 1/beta_host[0], lanczosVecs_dev+IDX(0,1,n), 1); } // ------------------------------------------------------- // Compute remaining Lanczos vectors // ------------------------------------------------------- while(*iter<maxIter) { ++(*iter); // Apply matrix if(shift != 0) CHECK_CUDA(hipMemcpyAsync(lanczosVecs_dev+(*iter)*n, lanczosVecs_dev+(*iter-1)*n, n*sizeof(ValueType_), hipMemcpyDeviceToDevice)); A->mv(1, lanczosVecs_dev+IDX(0,*iter-1,n), shift, lanczosVecs_dev+IDX(0,*iter,n)); // Full reorthogonalization // "Twice is enough" algorithm per Kahan and Parlett if(reorthogonalize) { Cublas::gemv(true, n, *iter, &one, lanczosVecs_dev, n, lanczosVecs_dev+IDX(0,*iter,n), 1, &zero, work_dev, 1); Cublas::gemv(false, n, *iter, &negOne, lanczosVecs_dev, n, work_dev, 1, &one, lanczosVecs_dev+IDX(0,*iter,n), 1); CHECK_CUDA(hipMemcpyAsync(alpha_host+(*iter-1), work_dev+(*iter-1), sizeof(ValueType_), hipMemcpyDeviceToHost)); Cublas::gemv(true, n, *iter, &one, lanczosVecs_dev, n, lanczosVecs_dev+IDX(0,*iter,n), 1, &zero, work_dev, 1); Cublas::gemv(false, n, *iter, &negOne, lanczosVecs_dev, n, work_dev, 1, &one, lanczosVecs_dev+IDX(0,*iter,n), 1); } // Orthogonalization with 3-term recurrence relation else { Cublas::dot(n, lanczosVecs_dev+IDX(0,*iter-1,n), 1, lanczosVecs_dev+IDX(0,*iter,n), 1, alpha_host+(*iter-1)); Cublas::axpy(n, -alpha_host[*iter-1], lanczosVecs_dev+IDX(0,*iter-1,n), 1, lanczosVecs_dev+IDX(0,*iter,n), 1); Cublas::axpy(n, -beta_host[*iter-2], lanczosVecs_dev+IDX(0,*iter-2,n), 1, lanczosVecs_dev+IDX(0,*iter,n), 1); } // Compute residual beta_host[*iter-1] = Cublas::nrm2(n, lanczosVecs_dev+IDX(0,*iter,n), 1); // Check if Lanczos has converged if(beta_host[*iter-1] <= tol) break; // Normalize Lanczos vector Cublas::scal(n, 1/beta_host[*iter-1], lanczosVecs_dev+IDX(0,*iter,n), 1); } CHECK_CUDA(hipDeviceSynchronize()); return 0; } /// Find Householder transform for 3-dimensional system /** Given an input vector v=[x,y,z]', this function finds a * Householder transform P such that P*v is a multiple of * e_1=[1,0,0]'. The input vector v is overwritten with the * Householder vector such that P=I-2*v*v'. * * @param v (Input/output, host memory, 3 entries) Input * 3-dimensional vector. On exit, the vector is set to the * Householder vector. * @param Pv (Output, host memory, 1 entry) First entry of P*v * (here v is the input vector). Either equal to ||v||_2 or * -||v||_2. * @param P (Output, host memory, 9 entries) Householder transform * matrix. Matrix dimensions are 3 x 3. */ template <typename IndexType_, typename ValueType_> static void findHouseholder3(ValueType_ * v, ValueType_ * Pv, ValueType_ * P) { // Compute norm of vector *Pv = std::sqrt(v[0]*v[0]+v[1]*v[1]+v[2]*v[2]); // Choose whether to reflect to e_1 or -e_1 // This choice avoids catastrophic cancellation if(v[0] >= 0) *Pv = -(*Pv); v[0] -= *Pv; // Normalize Householder vector ValueType_ normHouseholder = std::sqrt(v[0]*v[0]+v[1]*v[1]+v[2]*v[2]); if(normHouseholder != 0) { v[0] /= normHouseholder; v[1] /= normHouseholder; v[2] /= normHouseholder; } else { v[0] = 0; v[1] = 0; v[2] = 0; } // Construct Householder matrix IndexType_ i, j; for(j=0; j<3; ++j) for(i=0; i<3; ++i) P[IDX(i,j,3)] = -2*v[i]*v[j]; for(i=0; i<3; ++i) P[IDX(i,i,3)] += 1; } /// Apply 3-dimensional Householder transform to 4 x 4 matrix /** The Householder transform is pre-applied to the top three rows * of the matrix and post-applied to the left three columns. The * 4 x 4 matrix is intended to contain the bulge that is produced * in the Francis QR algorithm. * * @param v (Input, host memory, 3 entries) Householder vector. * @param A (Input/output, host memory, 16 entries) 4 x 4 matrix. */ template <typename IndexType_, typename ValueType_> static void applyHouseholder3(const ValueType_ * v, ValueType_ * A) { // Loop indices IndexType_ i, j; // Dot product between Householder vector and matrix row/column ValueType_ vDotA; // Pre-apply Householder transform for(j=0; j<4; ++j) { vDotA = 0; for(i=0; i<3; ++i) vDotA += v[i]*A[IDX(i,j,4)]; for(i=0; i<3; ++i) A[IDX(i,j,4)] -= 2*v[i]*vDotA; } // Post-apply Householder transform for(i=0; i<4; ++i) { vDotA = 0; for(j=0; j<3; ++j) vDotA += A[IDX(i,j,4)]*v[j]; for(j=0; j<3; ++j) A[IDX(i,j,4)] -= 2*vDotA*v[j]; } } /// Perform one step of Francis QR algorithm /** Equivalent to two steps of the classical QR algorithm on a * tridiagonal matrix. * * @param n Matrix dimension. * @param shift1 QR algorithm shift. * @param shift2 QR algorithm shift. * @param alpha (Input/output, host memory, n entries) Diagonal * entries of tridiagonal matrix. * @param beta (Input/output, host memory, n-1 entries) * Off-diagonal entries of tridiagonal matrix. * @param V (Input/output, host memory, n*n entries) Orthonormal * transforms from previous steps of QR algorithm. Matrix * dimensions are n x n. On exit, the orthonormal transform from * this Francis QR step is post-applied to the matrix. * @param work (Output, host memory, 3*n entries) Workspace. * @return Zero if successful. Otherwise non-zero. */ template <typename IndexType_, typename ValueType_> static int francisQRIteration(IndexType_ n, ValueType_ shift1, ValueType_ shift2, ValueType_ * alpha, ValueType_ * beta, ValueType_ * V, ValueType_ * work) { // ------------------------------------------------------- // Variable declaration // ------------------------------------------------------- // Temporary storage of 4x4 bulge and Householder vector ValueType_ bulge[16]; // Householder vector ValueType_ householder[3]; // Householder matrix ValueType_ householderMatrix[3*3]; // Shifts are roots of the polynomial p(x)=x^2+b*x+c ValueType_ b = -shift1 - shift2; ValueType_ c = shift1*shift2; // Loop indices IndexType_ i, j, pos; // Temporary variable ValueType_ temp; // ------------------------------------------------------- // Implementation // ------------------------------------------------------- // Compute initial Householder transform householder[0] = alpha[0]*alpha[0] + beta[0]*beta[0] + b*alpha[0] + c; householder[1] = beta[0]*(alpha[0]+alpha[1]+b); householder[2] = beta[0]*beta[1]; findHouseholder3<IndexType_,ValueType_>(householder, &temp, householderMatrix); // Apply initial Householder transform to create bulge memset(bulge, 0, 16*sizeof(ValueType_)); for(i=0; i<4; ++i) bulge[IDX(i,i,4)] = alpha[i]; for(i=0; i<3; ++i) { bulge[IDX(i+1,i,4)] = beta[i]; bulge[IDX(i,i+1,4)] = beta[i]; } applyHouseholder3<IndexType_,ValueType_>(householder, bulge); Lapack<ValueType_>::gemm(false, false, n, 3, 3, 1, V, n, householderMatrix, 3, 0, work, n); memcpy(V, work, 3*n*sizeof(ValueType_)); // Chase bulge to bottom-right of matrix with Householder transforms for(pos=0; pos<n-4; ++pos) { // Move to next position alpha[pos] = bulge[IDX(0,0,4)]; householder[0] = bulge[IDX(1,0,4)]; householder[1] = bulge[IDX(2,0,4)]; householder[2] = bulge[IDX(3,0,4)]; for(j=0; j<3; ++j) for(i=0; i<3; ++i) bulge[IDX(i,j,4)] = bulge[IDX(i+1,j+1,4)]; bulge[IDX(3,0,4)] = 0; bulge[IDX(3,1,4)] = 0; bulge[IDX(3,2,4)] = beta[pos+3]; bulge[IDX(0,3,4)] = 0; bulge[IDX(1,3,4)] = 0; bulge[IDX(2,3,4)] = beta[pos+3]; bulge[IDX(3,3,4)] = alpha[pos+4]; // Apply Householder transform findHouseholder3<IndexType_,ValueType_>(householder, beta+pos, householderMatrix); applyHouseholder3<IndexType_,ValueType_>(householder, bulge); Lapack<ValueType_>::gemm(false, false, n, 3, 3, 1, V+IDX(0,pos+1,n), n, householderMatrix, 3, 0, work, n); memcpy(V+IDX(0,pos+1,n), work, 3*n*sizeof(ValueType_)); } // Apply penultimate Householder transform // Values in the last row and column are zero alpha[n-4] = bulge[IDX(0,0,4)]; householder[0] = bulge[IDX(1,0,4)]; householder[1] = bulge[IDX(2,0,4)]; householder[2] = bulge[IDX(3,0,4)]; for(j=0; j<3; ++j) for(i=0; i<3; ++i) bulge[IDX(i,j,4)] = bulge[IDX(i+1,j+1,4)]; bulge[IDX(3,0,4)] = 0; bulge[IDX(3,1,4)] = 0; bulge[IDX(3,2,4)] = 0; bulge[IDX(0,3,4)] = 0; bulge[IDX(1,3,4)] = 0; bulge[IDX(2,3,4)] = 0; bulge[IDX(3,3,4)] = 0; findHouseholder3<IndexType_,ValueType_>(householder, beta+n-4, householderMatrix); applyHouseholder3<IndexType_,ValueType_>(householder, bulge); Lapack<ValueType_>::gemm(false, false, n, 3, 3, 1, V+IDX(0,n-3,n), n, householderMatrix, 3, 0, work, n); memcpy(V+IDX(0,n-3,n), work, 3*n*sizeof(ValueType_)); // Apply final Householder transform // Values in the last two rows and columns are zero alpha[n-3] = bulge[IDX(0,0,4)]; householder[0] = bulge[IDX(1,0,4)]; householder[1] = bulge[IDX(2,0,4)]; householder[2] = 0; for(j=0; j<3; ++j) for(i=0; i<3; ++i) bulge[IDX(i,j,4)] = bulge[IDX(i+1,j+1,4)]; findHouseholder3<IndexType_,ValueType_>(householder, beta+n-3, householderMatrix); applyHouseholder3<IndexType_,ValueType_>(householder, bulge); Lapack<ValueType_>::gemm(false, false, n, 2, 2, 1, V+IDX(0,n-2,n), n, householderMatrix, 3, 0, work, n); memcpy(V+IDX(0,n-2,n), work, 2*n*sizeof(ValueType_)); // Bulge has been eliminated alpha[n-2] = bulge[IDX(0,0,4)]; alpha[n-1] = bulge[IDX(1,1,4)]; beta[n-2] = bulge[IDX(1,0,4)]; return 0; } /// Perform implicit restart of Lanczos algorithm /** Shifts are Chebyshev nodes of unwanted region of matrix spectrum. * * @param n Matrix dimension. * @param iter Current Lanczos iteration. * @param iter_new Lanczos iteration after restart. * @param shiftUpper Pointer to upper bound for unwanted * region. Value is ignored if less than *shiftLower. If a * stronger upper bound has been found, the value is updated on * exit. * @param shiftLower Pointer to lower bound for unwanted * region. Value is ignored if greater than *shiftUpper. If a * stronger lower bound has been found, the value is updated on * exit. * @param alpha_host (Input/output, host memory, iter entries) * Diagonal entries of Lanczos system. * @param beta_host (Input/output, host memory, iter entries) * Off-diagonal entries of Lanczos system. * @param V_host (Output, host memory, iter*iter entries) * Orthonormal transform used to obtain restarted system. Matrix * dimensions are iter x iter. * @param work_host (Output, host memory, 4*iter entries) * Workspace. * @param lanczosVecs_dev (Input/output, device memory, n*(iter+1) * entries) Lanczos vectors. Vectors are stored as columns of a * column-major matrix with dimensions n x (iter+1). * @param work_dev (Output, device memory, (n+iter)*iter entries) * Workspace. */ template <typename IndexType_, typename ValueType_> static int lanczosRestart(IndexType_ n, IndexType_ iter, IndexType_ iter_new, ValueType_ * shiftUpper, ValueType_ * shiftLower, ValueType_ * __restrict__ alpha_host, ValueType_ * __restrict__ beta_host, ValueType_ * __restrict__ V_host, ValueType_ * __restrict__ work_host, ValueType_ * __restrict__ lanczosVecs_dev, ValueType_ * __restrict__ work_dev, bool smallest_eig) { // ------------------------------------------------------- // Variable declaration // ------------------------------------------------------- // Useful constants const ValueType_ zero = 0; const ValueType_ one = 1; // Loop index IndexType_ i; // Number of implicit restart steps // Assumed to be even since each call to Francis algorithm is // equivalent to two calls of QR algorithm IndexType_ restartSteps = iter - iter_new; // Ritz values from Lanczos method ValueType_ * ritzVals_host = work_host + 3*iter; // Shifts for implicit restart ValueType_ * shifts_host; // Orthonormal matrix for similarity transform ValueType_ * V_dev = work_dev + n*iter; // ------------------------------------------------------- // Implementation // ------------------------------------------------------- // Compute Ritz values memcpy(ritzVals_host, alpha_host, iter*sizeof(ValueType_)); memcpy(work_host, beta_host, (iter-1)*sizeof(ValueType_)); Lapack<ValueType_>::sterf(iter, ritzVals_host, work_host); // Debug: Print largest eigenvalues //for (int i = iter-iter_new; i < iter; ++i) // std::cout <<*(ritzVals_host+i)<< " "; //std::cout <<std::endl; // Initialize similarity transform with identity matrix memset(V_host, 0, iter*iter*sizeof(ValueType_)); for(i=0; i<iter; ++i) V_host[IDX(i,i,iter)] = 1; // Determine interval to suppress eigenvalues if (smallest_eig) { if(*shiftLower > *shiftUpper) { *shiftUpper = ritzVals_host[iter-1]; *shiftLower = ritzVals_host[iter_new]; } else { *shiftUpper = max(*shiftUpper, ritzVals_host[iter-1]); *shiftLower = min(*shiftLower, ritzVals_host[iter_new]); } } else { if(*shiftLower > *shiftUpper) { *shiftUpper = ritzVals_host[iter-iter_new-1]; *shiftLower = ritzVals_host[0]; } else { *shiftUpper = max(*shiftUpper, ritzVals_host[iter-iter_new-1]); *shiftLower = min(*shiftLower, ritzVals_host[0]); } } // Calculate Chebyshev nodes as shifts shifts_host = ritzVals_host; for(i=0; i<restartSteps; ++i) { shifts_host[i] = cos((i+0.5)*static_cast<ValueType_>(M_PI)/restartSteps); shifts_host[i] *= 0.5*((*shiftUpper)-(*shiftLower)); shifts_host[i] += 0.5*((*shiftUpper)+(*shiftLower)); } // Apply Francis QR algorithm to implicitly restart Lanczos for(i=0; i<restartSteps; i+=2) if(francisQRIteration(iter, shifts_host[i], shifts_host[i+1], alpha_host, beta_host, V_host, work_host)) WARNING("error in implicitly shifted QR algorithm"); // Obtain new residual CHECK_CUDA(hipMemcpyAsync(V_dev, V_host, iter*iter*sizeof(ValueType_), hipMemcpyHostToDevice)); beta_host[iter-1] = beta_host[iter-1]*V_host[IDX(iter-1,iter_new-1,iter)]; Cublas::gemv(false, n, iter, beta_host+iter_new-1, lanczosVecs_dev, n, V_dev+IDX(0,iter_new,iter), 1, beta_host+iter-1, lanczosVecs_dev+IDX(0,iter,n), 1); // Obtain new Lanczos vectors Cublas::gemm(false, false, n, iter_new, iter, &one, lanczosVecs_dev, n, V_dev, iter, &zero, work_dev, n); CHECK_CUDA(hipMemcpyAsync(lanczosVecs_dev, work_dev, n*iter_new*sizeof(ValueType_), hipMemcpyDeviceToDevice)); // Normalize residual to obtain new Lanczos vector CHECK_CUDA(hipMemcpyAsync(lanczosVecs_dev+IDX(0,iter_new,n), lanczosVecs_dev+IDX(0,iter,n), n*sizeof(ValueType_), hipMemcpyDeviceToDevice)); beta_host[iter_new-1] = Cublas::nrm2(n, lanczosVecs_dev+IDX(0,iter_new,n), 1); Cublas::scal(n, 1/beta_host[iter_new-1], lanczosVecs_dev+IDX(0,iter_new,n), 1); return 0; } } // ========================================================= // Eigensolver // ========================================================= /// Compute smallest eigenvectors of symmetric matrix /** Computes eigenvalues and eigenvectors that are least * positive. If matrix is positive definite or positive * semidefinite, the computed eigenvalues are smallest in * magnitude. * * The largest eigenvalue is estimated by performing several * Lanczos iterations. An implicitly restarted Lanczos method is * then applied to A+s*I, where s is negative the largest * eigenvalue. * * @param A Matrix. * @param nEigVecs Number of eigenvectors to compute. * @param maxIter Maximum number of Lanczos steps. Does not include * Lanczos steps used to estimate largest eigenvalue. * @param restartIter Maximum size of Lanczos system before * performing an implicit restart. Should be at least 4. * @param tol Convergence tolerance. Lanczos iteration will * terminate when the residual norm is less than tol*theta, where * theta is an estimate for the smallest unwanted eigenvalue * (i.e. the (nEigVecs+1)th smallest eigenvalue). * @param reorthogonalize Whether to reorthogonalize Lanczos * vectors. * @param effIter On exit, pointer to final size of Lanczos system. * @param totalIter On exit, pointer to total number of Lanczos * iterations performed. Does not include Lanczos steps used to * estimate largest eigenvalue. * @param shift On exit, pointer to matrix shift (estimate for * largest eigenvalue). * @param alpha_host (Output, host memory, restartIter entries) * Diagonal entries of Lanczos system. * @param beta_host (Output, host memory, restartIter entries) * Off-diagonal entries of Lanczos system. * @param lanczosVecs_dev (Output, device memory, n*(restartIter+1) * entries) Lanczos vectors. Vectors are stored as columns of a * column-major matrix with dimensions n x (restartIter+1). * @param work_dev (Output, device memory, * (n+restartIter)*restartIter entries) Workspace. * @param eigVals_dev (Output, device memory, nEigVecs entries) * Largest eigenvalues of matrix. * @param eigVecs_dev (Output, device memory, n*nEigVecs entries) * Eigenvectors corresponding to smallest eigenvalues of * matrix. Vectors are stored as columns of a column-major matrix * with dimensions n x nEigVecs. * @return NVGRAPH error flag. */ template <typename IndexType_, typename ValueType_> NVGRAPH_ERROR computeSmallestEigenvectors(const Matrix<IndexType_,ValueType_> * A, IndexType_ nEigVecs, IndexType_ maxIter, IndexType_ restartIter, ValueType_ tol, bool reorthogonalize, IndexType_ * effIter, IndexType_ * totalIter, ValueType_ * shift, ValueType_ * __restrict__ alpha_host, ValueType_ * __restrict__ beta_host, ValueType_ * __restrict__ lanczosVecs_dev, ValueType_ * __restrict__ work_dev, ValueType_ * __restrict__ eigVals_dev, ValueType_ * __restrict__ eigVecs_dev) { // ------------------------------------------------------- // Variable declaration // ------------------------------------------------------- // Useful constants const ValueType_ one = 1; const ValueType_ zero = 0; // Matrix dimension IndexType_ n = A->n; // Shift for implicit restart ValueType_ shiftUpper; ValueType_ shiftLower; // Lanczos iteration counters IndexType_ maxIter_curr = restartIter; // Maximum size of Lanczos system // Status flags int status; // Loop index IndexType_ i; // Host memory ValueType_ * Z_host; // Eigenvectors in Lanczos basis ValueType_ * work_host; // Workspace // ------------------------------------------------------- // Check that LAPACK is enabled // ------------------------------------------------------- //Lapack<ValueType_>::check_lapack_enabled(); // ------------------------------------------------------- // Check that parameters are valid // ------------------------------------------------------- if(A->m != A->n) { WARNING("invalid parameter (matrix is not square)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(nEigVecs < 1) { WARNING("invalid parameter (nEigVecs<1)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(restartIter < 1) { WARNING("invalid parameter (restartIter<4)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(tol < 0) { WARNING("invalid parameter (tol<0)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(nEigVecs > n) { WARNING("invalid parameters (nEigVecs>n)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(maxIter < nEigVecs) { WARNING("invalid parameters (maxIter<nEigVecs)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(restartIter < nEigVecs) { WARNING("invalid parameters (restartIter<nEigVecs)"); return NVGRAPH_ERR_BAD_PARAMETERS; } // ------------------------------------------------------- // Variable initialization // ------------------------------------------------------- // Total number of Lanczos iterations *totalIter = 0; // Allocate host memory Z_host = (ValueType_*) malloc(restartIter*restartIter *sizeof(ValueType_)); if(Z_host==NULL) WARNING("could not allocate host memory"); work_host = (ValueType_*) malloc(4*restartIter*sizeof(ValueType_)); if(work_host==NULL) WARNING("could not allocate host memory"); // Initialize cuBLAS Cublas::set_pointer_mode_host(); // ------------------------------------------------------- // Compute largest eigenvalue to determine shift // ------------------------------------------------------- #ifdef USE_CURAND // Random number generator hiprandGenerator_t randGen; // Initialize random number generator CHECK_CURAND(hiprandCreateGenerator(&randGen, HIPRAND_RNG_PSEUDO_PHILOX4_32_10)); CHECK_CURAND(hiprandSetPseudoRandomGeneratorSeed(randGen, 123456/*time(NULL)*/)); // Initialize initial Lanczos vector CHECK_CURAND(curandGenerateNormalX(randGen, lanczosVecs_dev, n+n%2, zero, one)); ValueType_ normQ1 = Cublas::nrm2(n, lanczosVecs_dev, 1); Cublas::scal(n, 1/normQ1, lanczosVecs_dev, 1); #else fill_raw_vec (lanczosVecs_dev, n, (ValueType_)1.0/n); // doesn't work #endif // Estimate number of Lanczos iterations // See bounds in Kuczynski and Wozniakowski (1992). //const ValueType_ relError = 0.25; // Relative error //const ValueType_ failProb = 1e-4; // Probability of failure //maxIter_curr = log(n/pow(failProb,2))/(4*std::sqrt(relError)) + 1; //maxIter_curr = min(maxIter_curr, restartIter); // Obtain tridiagonal matrix with Lanczos *effIter = 0; *shift = 0; status = performLanczosIteration<IndexType_, ValueType_> (A, effIter, maxIter_curr, *shift, 0.0, reorthogonalize, alpha_host, beta_host, lanczosVecs_dev, work_dev); if(status) WARNING("error in Lanczos iteration"); // Determine largest eigenvalue Lapack<ValueType_>::sterf(*effIter, alpha_host, beta_host); *shift = -alpha_host[*effIter-1]; //std::cout << *shift <<std::endl; // ------------------------------------------------------- // Compute eigenvectors of shifted matrix // ------------------------------------------------------- // Obtain tridiagonal matrix with Lanczos *effIter = 0; //maxIter_curr = min(maxIter, restartIter); status = performLanczosIteration<IndexType_, ValueType_> (A, effIter, maxIter_curr, *shift, 0, reorthogonalize, alpha_host, beta_host, lanczosVecs_dev, work_dev); if(status) WARNING("error in Lanczos iteration"); *totalIter += *effIter; // Apply Lanczos method until convergence shiftLower = 1; shiftUpper = -1; while(*totalIter<maxIter && beta_host[*effIter-1]>tol*shiftLower) { // Determine number of restart steps // Number of steps must be even due to Francis algorithm IndexType_ iter_new = nEigVecs+1; if(restartIter-(maxIter-*totalIter) > nEigVecs+1) iter_new = restartIter-(maxIter-*totalIter); if((restartIter-iter_new) % 2) iter_new -= 1; if(iter_new==*effIter) break; // Implicit restart of Lanczos method status = lanczosRestart<IndexType_, ValueType_> (n, *effIter, iter_new, &shiftUpper, &shiftLower, alpha_host, beta_host, Z_host, work_host, lanczosVecs_dev, work_dev, true); if(status) WARNING("error in Lanczos implicit restart"); *effIter = iter_new; // Check for convergence if(beta_host[*effIter-1] <= tol*fabs(shiftLower)) break; // Proceed with Lanczos method //maxIter_curr = min(restartIter, maxIter-*totalIter+*effIter); status = performLanczosIteration<IndexType_, ValueType_> (A, effIter, maxIter_curr, *shift, tol*fabs(shiftLower), reorthogonalize, alpha_host, beta_host, lanczosVecs_dev, work_dev); if(status) WARNING("error in Lanczos iteration"); *totalIter += *effIter-iter_new; } // Warning if Lanczos has failed to converge if(beta_host[*effIter-1] > tol*fabs(shiftLower)) { WARNING("implicitly restarted Lanczos failed to converge"); } // Solve tridiagonal system memcpy(work_host+2*(*effIter), alpha_host, (*effIter)*sizeof(ValueType_)); memcpy(work_host+3*(*effIter), beta_host, (*effIter-1)*sizeof(ValueType_)); Lapack<ValueType_>::steqr('I', *effIter, work_host+2*(*effIter), work_host+3*(*effIter), Z_host, *effIter, work_host); // Obtain desired eigenvalues by applying shift for(i=0; i<*effIter; ++i) work_host[i+2*(*effIter)] -= *shift; for(i=*effIter; i<nEigVecs; ++i) work_host[i+2*(*effIter)] = 0; // Copy results to device memory CHECK_CUDA(hipMemcpy(eigVals_dev, work_host+2*(*effIter), nEigVecs*sizeof(ValueType_), hipMemcpyHostToDevice)); //for (int i = 0; i < nEigVecs; ++i) //{ // std::cout <<*(work_host+(2*(*effIter)+i))<< std::endl; //} CHECK_CUDA(hipMemcpy(work_dev, Z_host, (*effIter)*nEigVecs*sizeof(ValueType_), hipMemcpyHostToDevice)); // Convert eigenvectors from Lanczos basis to standard basis Cublas::gemm(false, false, n, nEigVecs, *effIter, &one, lanczosVecs_dev, n, work_dev, *effIter, &zero, eigVecs_dev, n); // Clean up and exit free(Z_host); free(work_host); #ifdef USE_CURAND CHECK_CURAND(hiprandDestroyGenerator(randGen)); #endif return NVGRAPH_OK; } /// Compute smallest eigenvectors of symmetric matrix /** Computes eigenvalues and eigenvectors that are least * positive. If matrix is positive definite or positive * semidefinite, the computed eigenvalues are smallest in * magnitude. * * The largest eigenvalue is estimated by performing several * Lanczos iterations. An implicitly restarted Lanczos method is * then applied to A+s*I, where s is negative the largest * eigenvalue. * * CNMEM must be initialized before calling this function. * * @param A Matrix. * @param nEigVecs Number of eigenvectors to compute. * @param maxIter Maximum number of Lanczos steps. Does not include * Lanczos steps used to estimate largest eigenvalue. * @param restartIter Maximum size of Lanczos system before * performing an implicit restart. Should be at least 4. * @param tol Convergence tolerance. Lanczos iteration will * terminate when the residual norm is less than tol*theta, where * theta is an estimate for the smallest unwanted eigenvalue * (i.e. the (nEigVecs+1)th smallest eigenvalue). * @param reorthogonalize Whether to reorthogonalize Lanczos * vectors. * @param iter On exit, pointer to total number of Lanczos * iterations performed. Does not include Lanczos steps used to * estimate largest eigenvalue. * @param eigVals_dev (Output, device memory, nEigVecs entries) * Smallest eigenvalues of matrix. * @param eigVecs_dev (Output, device memory, n*nEigVecs entries) * Eigenvectors corresponding to smallest eigenvalues of * matrix. Vectors are stored as columns of a column-major matrix * with dimensions n x nEigVecs. * @return NVGRAPH error flag. */ template <typename IndexType_, typename ValueType_> NVGRAPH_ERROR computeSmallestEigenvectors(const Matrix<IndexType_,ValueType_> & A, IndexType_ nEigVecs, IndexType_ maxIter, IndexType_ restartIter, ValueType_ tol, bool reorthogonalize, IndexType_ & iter, ValueType_ * __restrict__ eigVals_dev, ValueType_ * __restrict__ eigVecs_dev) { // CUDA stream // TODO: handle non-zero streams hipStream_t stream = 0; // Matrix dimension IndexType_ n = A.n; // Check that parameters are valid if(A.m != A.n) { WARNING("invalid parameter (matrix is not square)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(nEigVecs < 1) { WARNING("invalid parameter (nEigVecs<1)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(restartIter < 1) { WARNING("invalid parameter (restartIter<4)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(tol < 0) { WARNING("invalid parameter (tol<0)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(nEigVecs > n) { WARNING("invalid parameters (nEigVecs>n)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(maxIter < nEigVecs) { WARNING("invalid parameters (maxIter<nEigVecs)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(restartIter < nEigVecs) { WARNING("invalid parameters (restartIter<nEigVecs)"); return NVGRAPH_ERR_BAD_PARAMETERS; } // Allocate memory ValueType_ * alpha_host = (ValueType_*) malloc(restartIter*sizeof(ValueType_)); ValueType_ * beta_host = (ValueType_*) malloc(restartIter*sizeof(ValueType_)); Vector<ValueType_> lanczosVecs_dev(n*(restartIter+1), stream); Vector<ValueType_> work_dev((n+restartIter)*restartIter, stream); // Perform Lanczos method IndexType_ effIter; ValueType_ shift; NVGRAPH_ERROR status = computeSmallestEigenvectors(&A, nEigVecs, maxIter, restartIter, tol, reorthogonalize, &effIter, &iter, &shift, alpha_host, beta_host, lanczosVecs_dev.raw(), work_dev.raw(), eigVals_dev, eigVecs_dev); // Clean up and return free(alpha_host); free(beta_host); return status; } // ========================================================= // Eigensolver // ========================================================= /// Compute largest eigenvectors of symmetric matrix /** Computes eigenvalues and eigenvectors that are least * positive. If matrix is positive definite or positive * semidefinite, the computed eigenvalues are largest in * magnitude. * * The largest eigenvalue is estimated by performing several * Lanczos iterations. An implicitly restarted Lanczos method is * then applied. * * @param A Matrix. * @param nEigVecs Number of eigenvectors to compute. * @param maxIter Maximum number of Lanczos steps. * @param restartIter Maximum size of Lanczos system before * performing an implicit restart. Should be at least 4. * @param tol Convergence tolerance. Lanczos iteration will * terminate when the residual norm is less than tol*theta, where * theta is an estimate for the largest unwanted eigenvalue * (i.e. the (nEigVecs+1)th largest eigenvalue). * @param reorthogonalize Whether to reorthogonalize Lanczos * vectors. * @param effIter On exit, pointer to final size of Lanczos system. * @param totalIter On exit, pointer to total number of Lanczos * iterations performed. * @param alpha_host (Output, host memory, restartIter entries) * Diagonal entries of Lanczos system. * @param beta_host (Output, host memory, restartIter entries) * Off-diagonal entries of Lanczos system. * @param lanczosVecs_dev (Output, device memory, n*(restartIter+1) * entries) Lanczos vectors. Vectors are stored as columns of a * column-major matrix with dimensions n x (restartIter+1). * @param work_dev (Output, device memory, * (n+restartIter)*restartIter entries) Workspace. * @param eigVals_dev (Output, device memory, nEigVecs entries) * Largest eigenvalues of matrix. * @param eigVecs_dev (Output, device memory, n*nEigVecs entries) * Eigenvectors corresponding to largest eigenvalues of * matrix. Vectors are stored as columns of a column-major matrix * with dimensions n x nEigVecs. * @return NVGRAPH error flag. */ template <typename IndexType_, typename ValueType_> NVGRAPH_ERROR computeLargestEigenvectors(const Matrix<IndexType_,ValueType_> * A, IndexType_ nEigVecs, IndexType_ maxIter, IndexType_ restartIter, ValueType_ tol, bool reorthogonalize, IndexType_ * effIter, IndexType_ * totalIter, ValueType_ * __restrict__ alpha_host, ValueType_ * __restrict__ beta_host, ValueType_ * __restrict__ lanczosVecs_dev, ValueType_ * __restrict__ work_dev, ValueType_ * __restrict__ eigVals_dev, ValueType_ * __restrict__ eigVecs_dev) { // ------------------------------------------------------- // Variable declaration // ------------------------------------------------------- // Useful constants const ValueType_ one = 1; const ValueType_ zero = 0; // Matrix dimension IndexType_ n = A->n; // Lanczos iteration counters IndexType_ maxIter_curr = restartIter; // Maximum size of Lanczos system // Status flags int status; // Loop index IndexType_ i; // Host memory ValueType_ * Z_host; // Eigenvectors in Lanczos basis ValueType_ * work_host; // Workspace // ------------------------------------------------------- // Check that LAPACK is enabled // ------------------------------------------------------- //Lapack<ValueType_>::check_lapack_enabled(); // ------------------------------------------------------- // Check that parameters are valid // ------------------------------------------------------- if(A->m != A->n) { WARNING("invalid parameter (matrix is not square)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(nEigVecs < 1) { WARNING("invalid parameter (nEigVecs<1)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(restartIter < 1) { WARNING("invalid parameter (restartIter<4)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(tol < 0) { WARNING("invalid parameter (tol<0)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(nEigVecs > n) { WARNING("invalid parameters (nEigVecs>n)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(maxIter < nEigVecs) { WARNING("invalid parameters (maxIter<nEigVecs)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(restartIter <= nEigVecs) { WARNING("invalid parameters (restartIter<=nEigVecs)"); return NVGRAPH_ERR_BAD_PARAMETERS; } // ------------------------------------------------------- // Variable initialization // ------------------------------------------------------- // Total number of Lanczos iterations *totalIter = 0; // Allocate host memory Z_host = (ValueType_*) malloc(restartIter*restartIter *sizeof(ValueType_)); if(Z_host==NULL) WARNING("could not allocate host memory"); work_host = (ValueType_*) malloc(4*restartIter*sizeof(ValueType_)); if(work_host==NULL) WARNING("could not allocate host memory"); // Initialize cuBLAS Cublas::set_pointer_mode_host(); // ------------------------------------------------------- // Compute largest eigenvalue // ------------------------------------------------------- #ifdef USE_CURAND // Random number generator hiprandGenerator_t randGen; // Initialize random number generator CHECK_CURAND(hiprandCreateGenerator(&randGen, HIPRAND_RNG_PSEUDO_PHILOX4_32_10)); CHECK_CURAND(hiprandSetPseudoRandomGeneratorSeed(randGen, 123456)); // Initialize initial Lanczos vector CHECK_CURAND(curandGenerateNormalX(randGen, lanczosVecs_dev, n+n%2, zero, one)); ValueType_ normQ1 = Cublas::nrm2(n, lanczosVecs_dev, 1); Cublas::scal(n, 1/normQ1, lanczosVecs_dev, 1); #else fill_raw_vec (lanczosVecs_dev, n, (ValueType_)1.0/n); // doesn't work #endif // Estimate number of Lanczos iterations // See bounds in Kuczynski and Wozniakowski (1992). //const ValueType_ relError = 0.25; // Relative error //const ValueType_ failProb = 1e-4; // Probability of failure //maxIter_curr = log(n/pow(failProb,2))/(4*std::sqrt(relError)) + 1; //maxIter_curr = min(maxIter_curr, restartIter); // Obtain tridiagonal matrix with Lanczos *effIter = 0; ValueType_ shift_val=0.0; ValueType_ *shift = &shift_val; //maxIter_curr = min(maxIter, restartIter); status = performLanczosIteration<IndexType_, ValueType_> (A, effIter, maxIter_curr, *shift, 0, reorthogonalize, alpha_host, beta_host, lanczosVecs_dev, work_dev); if(status) WARNING("error in Lanczos iteration"); *totalIter += *effIter; // Apply Lanczos method until convergence ValueType_ shiftLower = 1; ValueType_ shiftUpper = -1; while(*totalIter<maxIter && beta_host[*effIter-1]>tol*shiftLower) { // Determine number of restart steps // Number of steps must be even due to Francis algorithm IndexType_ iter_new = nEigVecs+1; if(restartIter-(maxIter-*totalIter) > nEigVecs+1) iter_new = restartIter-(maxIter-*totalIter); if((restartIter-iter_new) % 2) iter_new -= 1; if(iter_new==*effIter) break; // Implicit restart of Lanczos method status = lanczosRestart<IndexType_, ValueType_> (n, *effIter, iter_new, &shiftUpper, &shiftLower, alpha_host, beta_host, Z_host, work_host, lanczosVecs_dev, work_dev, false); if(status) WARNING("error in Lanczos implicit restart"); *effIter = iter_new; // Check for convergence if(beta_host[*effIter-1] <= tol*fabs(shiftLower)) break; // Proceed with Lanczos method //maxIter_curr = min(restartIter, maxIter-*totalIter+*effIter); status = performLanczosIteration<IndexType_, ValueType_> (A, effIter, maxIter_curr, *shift, tol*fabs(shiftLower), reorthogonalize, alpha_host, beta_host, lanczosVecs_dev, work_dev); if(status) WARNING("error in Lanczos iteration"); *totalIter += *effIter-iter_new; } // Warning if Lanczos has failed to converge if(beta_host[*effIter-1] > tol*fabs(shiftLower)) { WARNING("implicitly restarted Lanczos failed to converge"); } for (int i = 0; i < restartIter; ++i) { for (int j = 0; j < restartIter; ++j) Z_host[i*restartIter+j] = 0; } // Solve tridiagonal system memcpy(work_host+2*(*effIter), alpha_host, (*effIter)*sizeof(ValueType_)); memcpy(work_host+3*(*effIter), beta_host, (*effIter-1)*sizeof(ValueType_)); Lapack<ValueType_>::steqr('I', *effIter, work_host+2*(*effIter), work_host+3*(*effIter), Z_host, *effIter, work_host); // note: We need to pick the top nEigVecs eigenvalues // but effItter can be larger than nEigVecs // hence we add an offset for that case, because we want to access top nEigVecs eigenpairs in the matrix of size effIter. // remember the array is sorted, so it is not needed for smallest eigenvalues case because the first ones are the smallest ones IndexType_ top_eigenparis_idx_offset = *effIter - nEigVecs; //Debug : print nEigVecs largest eigenvalues //for (int i = top_eigenparis_idx_offset; i < *effIter; ++i) // std::cout <<*(work_host+(2*(*effIter)+i))<< " "; //std::cout <<std::endl; //Debug : print nEigVecs largest eigenvectors //for (int i = top_eigenparis_idx_offset; i < *effIter; ++i) //{ // for (int j = 0; j < *effIter; ++j) // std::cout <<Z_host[i*(*effIter)+j]<< " "; // std::cout <<std::endl; //} // Obtain desired eigenvalues by applying shift for(i=0; i<*effIter; ++i) work_host[i+2*(*effIter)] -= *shift; for(i=0; i<top_eigenparis_idx_offset; ++i) work_host[i+2*(*effIter)] = 0; // Copy results to device memory // skip smallest eigenvalue if needed CHECK_CUDA(hipMemcpy(eigVals_dev, work_host+2*(*effIter)+top_eigenparis_idx_offset, nEigVecs*sizeof(ValueType_), hipMemcpyHostToDevice)); // skip smallest eigenvector if needed CHECK_CUDA(hipMemcpy(work_dev, Z_host+(top_eigenparis_idx_offset*(*effIter)), (*effIter)*nEigVecs*sizeof(ValueType_), hipMemcpyHostToDevice)); // Convert eigenvectors from Lanczos basis to standard basis Cublas::gemm(false, false, n, nEigVecs, *effIter, &one, lanczosVecs_dev, n, work_dev, *effIter, &zero, eigVecs_dev, n); // Clean up and exit free(Z_host); free(work_host); #ifdef USE_CURAND CHECK_CURAND(hiprandDestroyGenerator(randGen)); #endif return NVGRAPH_OK; } /// Compute largest eigenvectors of symmetric matrix /** Computes eigenvalues and eigenvectors that are least * positive. If matrix is positive definite or positive * semidefinite, the computed eigenvalues are largest in * magnitude. * * The largest eigenvalue is estimated by performing several * Lanczos iterations. An implicitly restarted Lanczos method is * then applied to A+s*I, where s is negative the largest * eigenvalue. * * CNMEM must be initialized before calling this function. * * @param A Matrix. * @param nEigVecs Number of eigenvectors to compute. * @param maxIter Maximum number of Lanczos steps. Does not include * Lanczos steps used to estimate largest eigenvalue. * @param restartIter Maximum size of Lanczos system before * performing an implicit restart. Should be at least 4. * @param tol Convergence tolerance. Lanczos iteration will * terminate when the residual norm is less than tol*theta, where * theta is an estimate for the largest unwanted eigenvalue * (i.e. the (nEigVecs+1)th largest eigenvalue). * @param reorthogonalize Whether to reorthogonalize Lanczos * vectors. * @param iter On exit, pointer to total number of Lanczos * iterations performed. Does not include Lanczos steps used to * estimate largest eigenvalue. * @param eigVals_dev (Output, device memory, nEigVecs entries) * Largest eigenvalues of matrix. * @param eigVecs_dev (Output, device memory, n*nEigVecs entries) * Eigenvectors corresponding to largest eigenvalues of * matrix. Vectors are stored as columns of a column-major matrix * with dimensions n x nEigVecs. * @return NVGRAPH error flag. */ template <typename IndexType_, typename ValueType_> NVGRAPH_ERROR computeLargestEigenvectors(const Matrix<IndexType_,ValueType_> & A, IndexType_ nEigVecs, IndexType_ maxIter, IndexType_ restartIter, ValueType_ tol, bool reorthogonalize, IndexType_ & iter, ValueType_ * __restrict__ eigVals_dev, ValueType_ * __restrict__ eigVecs_dev) { // CUDA stream // TODO: handle non-zero streams hipStream_t stream = 0; // Matrix dimension IndexType_ n = A.n; // Check that parameters are valid if(A.m != A.n) { WARNING("invalid parameter (matrix is not square)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(nEigVecs < 1) { WARNING("invalid parameter (nEigVecs<1)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(restartIter < 1) { WARNING("invalid parameter (restartIter<4)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(tol < 0) { WARNING("invalid parameter (tol<0)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(nEigVecs > n) { WARNING("invalid parameters (nEigVecs>n)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(maxIter < nEigVecs) { WARNING("invalid parameters (maxIter<nEigVecs)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(restartIter < nEigVecs) { WARNING("invalid parameters (restartIter<nEigVecs)"); return NVGRAPH_ERR_BAD_PARAMETERS; } // Allocate memory ValueType_ * alpha_host = (ValueType_*) malloc(restartIter*sizeof(ValueType_)); ValueType_ * beta_host = (ValueType_*) malloc(restartIter*sizeof(ValueType_)); Vector<ValueType_> lanczosVecs_dev(n*(restartIter+1), stream); Vector<ValueType_> work_dev((n+restartIter)*restartIter, stream); // Perform Lanczos method IndexType_ effIter; NVGRAPH_ERROR status = computeLargestEigenvectors(&A, nEigVecs, maxIter, restartIter, tol, reorthogonalize, &effIter, &iter, alpha_host, beta_host, lanczosVecs_dev.raw(), work_dev.raw(), eigVals_dev, eigVecs_dev); // Clean up and return free(alpha_host); free(beta_host); return status; } // ========================================================= // Explicit instantiation // ========================================================= template NVGRAPH_ERROR computeSmallestEigenvectors<int,float> (const Matrix<int,float> * A, int nEigVecs, int maxIter, int restartIter, float tol, bool reorthogonalize, int * iter, int * totalIter, float * shift, float * __restrict__ alpha_host, float * __restrict__ beta_host, float * __restrict__ lanczosVecs_dev, float * __restrict__ work_dev, float * __restrict__ eigVals_dev, float * __restrict__ eigVecs_dev); template NVGRAPH_ERROR computeSmallestEigenvectors<int,double> (const Matrix<int,double> * A, int nEigVecs, int maxIter, int restartIter, double tol, bool reorthogonalize, int * iter, int * totalIter, double * shift, double * __restrict__ alpha_host, double * __restrict__ beta_host, double * __restrict__ lanczosVecs_dev, double * __restrict__ work_dev, double * __restrict__ eigVals_dev, double * __restrict__ eigVecs_dev); template NVGRAPH_ERROR computeSmallestEigenvectors<int, float> (const Matrix<int,float> & A, int nEigVecs, int maxIter, int restartIter, float tol, bool reorthogonalize, int & iter, float * __restrict__ eigVals_dev, float * __restrict__ eigVecs_dev); template NVGRAPH_ERROR computeSmallestEigenvectors<int, double> (const Matrix<int,double> & A, int nEigVecs, int maxIter, int restartIter, double tol, bool reorthogonalize, int & iter, double * __restrict__ eigVals_dev, double * __restrict__ eigVecs_dev); template NVGRAPH_ERROR computeLargestEigenvectors<int,float> (const Matrix<int,float> * A, int nEigVecs, int maxIter, int restartIter, float tol, bool reorthogonalize, int * iter, int * totalIter, float * __restrict__ alpha_host, float * __restrict__ beta_host, float * __restrict__ lanczosVecs_dev, float * __restrict__ work_dev, float * __restrict__ eigVals_dev, float * __restrict__ eigVecs_dev); template NVGRAPH_ERROR computeLargestEigenvectors<int,double> (const Matrix<int,double> * A, int nEigVecs, int maxIter, int restartIter, double tol, bool reorthogonalize, int * iter, int * totalIter, double * __restrict__ alpha_host, double * __restrict__ beta_host, double * __restrict__ lanczosVecs_dev, double * __restrict__ work_dev, double * __restrict__ eigVals_dev, double * __restrict__ eigVecs_dev); template NVGRAPH_ERROR computeLargestEigenvectors<int, float> (const Matrix<int,float> & A, int nEigVecs, int maxIter, int restartIter, float tol, bool reorthogonalize, int & iter, float * __restrict__ eigVals_dev, float * __restrict__ eigVecs_dev); template NVGRAPH_ERROR computeLargestEigenvectors<int, double> (const Matrix<int,double> & A, int nEigVecs, int maxIter, int restartIter, double tol, bool reorthogonalize, int & iter, double * __restrict__ eigVals_dev, double * __restrict__ eigVecs_dev); } //#endif //NVGRAPH_PARTITION
305e26350b91c9d91ddcfe6d9d9ea813eafafbf9.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ //#ifdef NVGRAPH_PARTITION #define _USE_MATH_DEFINES #include <math.h> #include "lanczos.hxx" #include <stdio.h> #include <time.h> #include <cuda.h> #define USE_CURAND 1 #ifdef USE_CURAND #include <curand.h> #endif #include "nvgraph_error.hxx" #include "nvgraph_vector.hxx" #include "nvgraph_vector_kernels.hxx" #include "nvgraph_cublas.hxx" #include "nvgraph_lapack.hxx" #include "debug_macros.h" // ========================================================= // Useful macros // ========================================================= // Get index of matrix entry #define IDX(i,j,lda) ((i)+(j)*(lda)) // ========================================================= // Macros and functions for cuRAND // ========================================================= //#ifdef USE_CURAND //namespace { // // /// Get message string from cuRAND status code // //static // //const char* curandGetErrorString(curandStatus_t e) { // // switch(e) { // // case CURAND_STATUS_SUCCESS: // // return "CURAND_STATUS_SUCCESS"; // // case CURAND_STATUS_VERSION_MISMATCH: // // return "CURAND_STATUS_VERSION_MISMATCH"; // // case CURAND_STATUS_NOT_INITIALIZED: // // return "CURAND_STATUS_NOT_INITIALIZED"; // // case CURAND_STATUS_ALLOCATION_FAILED: // // return "CURAND_STATUS_ALLOCATION_FAILED"; // // case CURAND_STATUS_TYPE_ERROR: // // return "CURAND_STATUS_TYPE_ERROR"; // // case CURAND_STATUS_OUT_OF_RANGE: // // return "CURAND_STATUS_OUT_OF_RANGE"; // // case CURAND_STATUS_LENGTH_NOT_MULTIPLE: // // return "CURAND_STATUS_LENGTH_NOT_MULTIPLE"; // // case CURAND_STATUS_DOUBLE_PRECISION_REQUIRED: // // return "CURAND_STATUS_DOUBLE_PRECISION_REQUIRED"; // // case CURAND_STATUS_LAUNCH_FAILURE: // // return "CURAND_STATUS_LAUNCH_FAILURE"; // // case CURAND_STATUS_PREEXISTING_FAILURE: // // return "CURAND_STATUS_PREEXISTING_FAILURE"; // // case CURAND_STATUS_INITIALIZATION_FAILED: // // return "CURAND_STATUS_INITIALIZATION_FAILED"; // // case CURAND_STATUS_ARCH_MISMATCH: // // return "CURAND_STATUS_ARCH_MISMATCH"; // // case CURAND_STATUS_INTERNAL_ERROR: // // return "CURAND_STATUS_INTERNAL_ERROR"; // // default: // // return "unknown cuRAND error"; // // } // //} // // // curandGeneratorNormalX // inline static // curandStatus_t // curandGenerateNormalX(curandGenerator_t generator, // float * outputPtr, size_t n, // float mean, float stddev) { // return curandGenerateNormal(generator, outputPtr, n, mean, stddev); // } // inline static // curandStatus_t // curandGenerateNormalX(curandGenerator_t generator, // double * outputPtr, size_t n, // double mean, double stddev) { // return curandGenerateNormalDouble(generator, outputPtr, // n, mean, stddev); // } // //} //#endif namespace nvgraph { namespace { // ========================================================= // Helper functions // ========================================================= /// Perform Lanczos iteration /** Lanczos iteration is performed on a shifted matrix A+shift*I. * * @param A Matrix. * @param iter Pointer to current Lanczos iteration. On exit, the * variable is set equal to the final Lanczos iteration. * @param maxIter Maximum Lanczos iteration. This function will * perform a maximum of maxIter-*iter iterations. * @param shift Matrix shift. * @param tol Convergence tolerance. Lanczos iteration will * terminate when the residual norm (i.e. entry in beta_host) is * less than tol. * @param reorthogonalize Whether to reorthogonalize Lanczos * vectors. * @param alpha_host (Output, host memory, maxIter entries) * Diagonal entries of Lanczos system. * @param beta_host (Output, host memory, maxIter entries) * Off-diagonal entries of Lanczos system. * @param lanczosVecs_dev (Input/output, device memory, * n*(maxIter+1) entries) Lanczos vectors. Vectors are stored as * columns of a column-major matrix with dimensions * n x (maxIter+1). * @param work_dev (Output, device memory, maxIter entries) * Workspace. Not needed if full reorthogonalization is disabled. * @return Zero if successful. Otherwise non-zero. */ template <typename IndexType_, typename ValueType_> static int performLanczosIteration(const Matrix<IndexType_, ValueType_> * A, IndexType_ * iter, IndexType_ maxIter, ValueType_ shift, ValueType_ tol, bool reorthogonalize, ValueType_ * __restrict__ alpha_host, ValueType_ * __restrict__ beta_host, ValueType_ * __restrict__ lanczosVecs_dev, ValueType_ * __restrict__ work_dev) { // ------------------------------------------------------- // Variable declaration // ------------------------------------------------------- // Useful variables const ValueType_ one = 1; const ValueType_ negOne = -1; const ValueType_ zero = 0; IndexType_ n = A->n; // ------------------------------------------------------- // Compute second Lanczos vector // ------------------------------------------------------- if(*iter<=0) { *iter = 1; // Apply matrix if(shift != 0) CHECK_CUDA(cudaMemcpyAsync(lanczosVecs_dev+n, lanczosVecs_dev, n*sizeof(ValueType_), cudaMemcpyDeviceToDevice)); A->mv(1, lanczosVecs_dev, shift, lanczosVecs_dev+n); // Orthogonalize Lanczos vector Cublas::dot(n, lanczosVecs_dev, 1, lanczosVecs_dev+IDX(0,1,n), 1, alpha_host); Cublas::axpy(n, -alpha_host[0], lanczosVecs_dev, 1, lanczosVecs_dev+IDX(0,1,n), 1); beta_host[0] = Cublas::nrm2(n, lanczosVecs_dev+IDX(0,1,n), 1); // Check if Lanczos has converged if(beta_host[0] <= tol) return 0; // Normalize Lanczos vector Cublas::scal(n, 1/beta_host[0], lanczosVecs_dev+IDX(0,1,n), 1); } // ------------------------------------------------------- // Compute remaining Lanczos vectors // ------------------------------------------------------- while(*iter<maxIter) { ++(*iter); // Apply matrix if(shift != 0) CHECK_CUDA(cudaMemcpyAsync(lanczosVecs_dev+(*iter)*n, lanczosVecs_dev+(*iter-1)*n, n*sizeof(ValueType_), cudaMemcpyDeviceToDevice)); A->mv(1, lanczosVecs_dev+IDX(0,*iter-1,n), shift, lanczosVecs_dev+IDX(0,*iter,n)); // Full reorthogonalization // "Twice is enough" algorithm per Kahan and Parlett if(reorthogonalize) { Cublas::gemv(true, n, *iter, &one, lanczosVecs_dev, n, lanczosVecs_dev+IDX(0,*iter,n), 1, &zero, work_dev, 1); Cublas::gemv(false, n, *iter, &negOne, lanczosVecs_dev, n, work_dev, 1, &one, lanczosVecs_dev+IDX(0,*iter,n), 1); CHECK_CUDA(cudaMemcpyAsync(alpha_host+(*iter-1), work_dev+(*iter-1), sizeof(ValueType_), cudaMemcpyDeviceToHost)); Cublas::gemv(true, n, *iter, &one, lanczosVecs_dev, n, lanczosVecs_dev+IDX(0,*iter,n), 1, &zero, work_dev, 1); Cublas::gemv(false, n, *iter, &negOne, lanczosVecs_dev, n, work_dev, 1, &one, lanczosVecs_dev+IDX(0,*iter,n), 1); } // Orthogonalization with 3-term recurrence relation else { Cublas::dot(n, lanczosVecs_dev+IDX(0,*iter-1,n), 1, lanczosVecs_dev+IDX(0,*iter,n), 1, alpha_host+(*iter-1)); Cublas::axpy(n, -alpha_host[*iter-1], lanczosVecs_dev+IDX(0,*iter-1,n), 1, lanczosVecs_dev+IDX(0,*iter,n), 1); Cublas::axpy(n, -beta_host[*iter-2], lanczosVecs_dev+IDX(0,*iter-2,n), 1, lanczosVecs_dev+IDX(0,*iter,n), 1); } // Compute residual beta_host[*iter-1] = Cublas::nrm2(n, lanczosVecs_dev+IDX(0,*iter,n), 1); // Check if Lanczos has converged if(beta_host[*iter-1] <= tol) break; // Normalize Lanczos vector Cublas::scal(n, 1/beta_host[*iter-1], lanczosVecs_dev+IDX(0,*iter,n), 1); } CHECK_CUDA(cudaDeviceSynchronize()); return 0; } /// Find Householder transform for 3-dimensional system /** Given an input vector v=[x,y,z]', this function finds a * Householder transform P such that P*v is a multiple of * e_1=[1,0,0]'. The input vector v is overwritten with the * Householder vector such that P=I-2*v*v'. * * @param v (Input/output, host memory, 3 entries) Input * 3-dimensional vector. On exit, the vector is set to the * Householder vector. * @param Pv (Output, host memory, 1 entry) First entry of P*v * (here v is the input vector). Either equal to ||v||_2 or * -||v||_2. * @param P (Output, host memory, 9 entries) Householder transform * matrix. Matrix dimensions are 3 x 3. */ template <typename IndexType_, typename ValueType_> static void findHouseholder3(ValueType_ * v, ValueType_ * Pv, ValueType_ * P) { // Compute norm of vector *Pv = std::sqrt(v[0]*v[0]+v[1]*v[1]+v[2]*v[2]); // Choose whether to reflect to e_1 or -e_1 // This choice avoids catastrophic cancellation if(v[0] >= 0) *Pv = -(*Pv); v[0] -= *Pv; // Normalize Householder vector ValueType_ normHouseholder = std::sqrt(v[0]*v[0]+v[1]*v[1]+v[2]*v[2]); if(normHouseholder != 0) { v[0] /= normHouseholder; v[1] /= normHouseholder; v[2] /= normHouseholder; } else { v[0] = 0; v[1] = 0; v[2] = 0; } // Construct Householder matrix IndexType_ i, j; for(j=0; j<3; ++j) for(i=0; i<3; ++i) P[IDX(i,j,3)] = -2*v[i]*v[j]; for(i=0; i<3; ++i) P[IDX(i,i,3)] += 1; } /// Apply 3-dimensional Householder transform to 4 x 4 matrix /** The Householder transform is pre-applied to the top three rows * of the matrix and post-applied to the left three columns. The * 4 x 4 matrix is intended to contain the bulge that is produced * in the Francis QR algorithm. * * @param v (Input, host memory, 3 entries) Householder vector. * @param A (Input/output, host memory, 16 entries) 4 x 4 matrix. */ template <typename IndexType_, typename ValueType_> static void applyHouseholder3(const ValueType_ * v, ValueType_ * A) { // Loop indices IndexType_ i, j; // Dot product between Householder vector and matrix row/column ValueType_ vDotA; // Pre-apply Householder transform for(j=0; j<4; ++j) { vDotA = 0; for(i=0; i<3; ++i) vDotA += v[i]*A[IDX(i,j,4)]; for(i=0; i<3; ++i) A[IDX(i,j,4)] -= 2*v[i]*vDotA; } // Post-apply Householder transform for(i=0; i<4; ++i) { vDotA = 0; for(j=0; j<3; ++j) vDotA += A[IDX(i,j,4)]*v[j]; for(j=0; j<3; ++j) A[IDX(i,j,4)] -= 2*vDotA*v[j]; } } /// Perform one step of Francis QR algorithm /** Equivalent to two steps of the classical QR algorithm on a * tridiagonal matrix. * * @param n Matrix dimension. * @param shift1 QR algorithm shift. * @param shift2 QR algorithm shift. * @param alpha (Input/output, host memory, n entries) Diagonal * entries of tridiagonal matrix. * @param beta (Input/output, host memory, n-1 entries) * Off-diagonal entries of tridiagonal matrix. * @param V (Input/output, host memory, n*n entries) Orthonormal * transforms from previous steps of QR algorithm. Matrix * dimensions are n x n. On exit, the orthonormal transform from * this Francis QR step is post-applied to the matrix. * @param work (Output, host memory, 3*n entries) Workspace. * @return Zero if successful. Otherwise non-zero. */ template <typename IndexType_, typename ValueType_> static int francisQRIteration(IndexType_ n, ValueType_ shift1, ValueType_ shift2, ValueType_ * alpha, ValueType_ * beta, ValueType_ * V, ValueType_ * work) { // ------------------------------------------------------- // Variable declaration // ------------------------------------------------------- // Temporary storage of 4x4 bulge and Householder vector ValueType_ bulge[16]; // Householder vector ValueType_ householder[3]; // Householder matrix ValueType_ householderMatrix[3*3]; // Shifts are roots of the polynomial p(x)=x^2+b*x+c ValueType_ b = -shift1 - shift2; ValueType_ c = shift1*shift2; // Loop indices IndexType_ i, j, pos; // Temporary variable ValueType_ temp; // ------------------------------------------------------- // Implementation // ------------------------------------------------------- // Compute initial Householder transform householder[0] = alpha[0]*alpha[0] + beta[0]*beta[0] + b*alpha[0] + c; householder[1] = beta[0]*(alpha[0]+alpha[1]+b); householder[2] = beta[0]*beta[1]; findHouseholder3<IndexType_,ValueType_>(householder, &temp, householderMatrix); // Apply initial Householder transform to create bulge memset(bulge, 0, 16*sizeof(ValueType_)); for(i=0; i<4; ++i) bulge[IDX(i,i,4)] = alpha[i]; for(i=0; i<3; ++i) { bulge[IDX(i+1,i,4)] = beta[i]; bulge[IDX(i,i+1,4)] = beta[i]; } applyHouseholder3<IndexType_,ValueType_>(householder, bulge); Lapack<ValueType_>::gemm(false, false, n, 3, 3, 1, V, n, householderMatrix, 3, 0, work, n); memcpy(V, work, 3*n*sizeof(ValueType_)); // Chase bulge to bottom-right of matrix with Householder transforms for(pos=0; pos<n-4; ++pos) { // Move to next position alpha[pos] = bulge[IDX(0,0,4)]; householder[0] = bulge[IDX(1,0,4)]; householder[1] = bulge[IDX(2,0,4)]; householder[2] = bulge[IDX(3,0,4)]; for(j=0; j<3; ++j) for(i=0; i<3; ++i) bulge[IDX(i,j,4)] = bulge[IDX(i+1,j+1,4)]; bulge[IDX(3,0,4)] = 0; bulge[IDX(3,1,4)] = 0; bulge[IDX(3,2,4)] = beta[pos+3]; bulge[IDX(0,3,4)] = 0; bulge[IDX(1,3,4)] = 0; bulge[IDX(2,3,4)] = beta[pos+3]; bulge[IDX(3,3,4)] = alpha[pos+4]; // Apply Householder transform findHouseholder3<IndexType_,ValueType_>(householder, beta+pos, householderMatrix); applyHouseholder3<IndexType_,ValueType_>(householder, bulge); Lapack<ValueType_>::gemm(false, false, n, 3, 3, 1, V+IDX(0,pos+1,n), n, householderMatrix, 3, 0, work, n); memcpy(V+IDX(0,pos+1,n), work, 3*n*sizeof(ValueType_)); } // Apply penultimate Householder transform // Values in the last row and column are zero alpha[n-4] = bulge[IDX(0,0,4)]; householder[0] = bulge[IDX(1,0,4)]; householder[1] = bulge[IDX(2,0,4)]; householder[2] = bulge[IDX(3,0,4)]; for(j=0; j<3; ++j) for(i=0; i<3; ++i) bulge[IDX(i,j,4)] = bulge[IDX(i+1,j+1,4)]; bulge[IDX(3,0,4)] = 0; bulge[IDX(3,1,4)] = 0; bulge[IDX(3,2,4)] = 0; bulge[IDX(0,3,4)] = 0; bulge[IDX(1,3,4)] = 0; bulge[IDX(2,3,4)] = 0; bulge[IDX(3,3,4)] = 0; findHouseholder3<IndexType_,ValueType_>(householder, beta+n-4, householderMatrix); applyHouseholder3<IndexType_,ValueType_>(householder, bulge); Lapack<ValueType_>::gemm(false, false, n, 3, 3, 1, V+IDX(0,n-3,n), n, householderMatrix, 3, 0, work, n); memcpy(V+IDX(0,n-3,n), work, 3*n*sizeof(ValueType_)); // Apply final Householder transform // Values in the last two rows and columns are zero alpha[n-3] = bulge[IDX(0,0,4)]; householder[0] = bulge[IDX(1,0,4)]; householder[1] = bulge[IDX(2,0,4)]; householder[2] = 0; for(j=0; j<3; ++j) for(i=0; i<3; ++i) bulge[IDX(i,j,4)] = bulge[IDX(i+1,j+1,4)]; findHouseholder3<IndexType_,ValueType_>(householder, beta+n-3, householderMatrix); applyHouseholder3<IndexType_,ValueType_>(householder, bulge); Lapack<ValueType_>::gemm(false, false, n, 2, 2, 1, V+IDX(0,n-2,n), n, householderMatrix, 3, 0, work, n); memcpy(V+IDX(0,n-2,n), work, 2*n*sizeof(ValueType_)); // Bulge has been eliminated alpha[n-2] = bulge[IDX(0,0,4)]; alpha[n-1] = bulge[IDX(1,1,4)]; beta[n-2] = bulge[IDX(1,0,4)]; return 0; } /// Perform implicit restart of Lanczos algorithm /** Shifts are Chebyshev nodes of unwanted region of matrix spectrum. * * @param n Matrix dimension. * @param iter Current Lanczos iteration. * @param iter_new Lanczos iteration after restart. * @param shiftUpper Pointer to upper bound for unwanted * region. Value is ignored if less than *shiftLower. If a * stronger upper bound has been found, the value is updated on * exit. * @param shiftLower Pointer to lower bound for unwanted * region. Value is ignored if greater than *shiftUpper. If a * stronger lower bound has been found, the value is updated on * exit. * @param alpha_host (Input/output, host memory, iter entries) * Diagonal entries of Lanczos system. * @param beta_host (Input/output, host memory, iter entries) * Off-diagonal entries of Lanczos system. * @param V_host (Output, host memory, iter*iter entries) * Orthonormal transform used to obtain restarted system. Matrix * dimensions are iter x iter. * @param work_host (Output, host memory, 4*iter entries) * Workspace. * @param lanczosVecs_dev (Input/output, device memory, n*(iter+1) * entries) Lanczos vectors. Vectors are stored as columns of a * column-major matrix with dimensions n x (iter+1). * @param work_dev (Output, device memory, (n+iter)*iter entries) * Workspace. */ template <typename IndexType_, typename ValueType_> static int lanczosRestart(IndexType_ n, IndexType_ iter, IndexType_ iter_new, ValueType_ * shiftUpper, ValueType_ * shiftLower, ValueType_ * __restrict__ alpha_host, ValueType_ * __restrict__ beta_host, ValueType_ * __restrict__ V_host, ValueType_ * __restrict__ work_host, ValueType_ * __restrict__ lanczosVecs_dev, ValueType_ * __restrict__ work_dev, bool smallest_eig) { // ------------------------------------------------------- // Variable declaration // ------------------------------------------------------- // Useful constants const ValueType_ zero = 0; const ValueType_ one = 1; // Loop index IndexType_ i; // Number of implicit restart steps // Assumed to be even since each call to Francis algorithm is // equivalent to two calls of QR algorithm IndexType_ restartSteps = iter - iter_new; // Ritz values from Lanczos method ValueType_ * ritzVals_host = work_host + 3*iter; // Shifts for implicit restart ValueType_ * shifts_host; // Orthonormal matrix for similarity transform ValueType_ * V_dev = work_dev + n*iter; // ------------------------------------------------------- // Implementation // ------------------------------------------------------- // Compute Ritz values memcpy(ritzVals_host, alpha_host, iter*sizeof(ValueType_)); memcpy(work_host, beta_host, (iter-1)*sizeof(ValueType_)); Lapack<ValueType_>::sterf(iter, ritzVals_host, work_host); // Debug: Print largest eigenvalues //for (int i = iter-iter_new; i < iter; ++i) // std::cout <<*(ritzVals_host+i)<< " "; //std::cout <<std::endl; // Initialize similarity transform with identity matrix memset(V_host, 0, iter*iter*sizeof(ValueType_)); for(i=0; i<iter; ++i) V_host[IDX(i,i,iter)] = 1; // Determine interval to suppress eigenvalues if (smallest_eig) { if(*shiftLower > *shiftUpper) { *shiftUpper = ritzVals_host[iter-1]; *shiftLower = ritzVals_host[iter_new]; } else { *shiftUpper = max(*shiftUpper, ritzVals_host[iter-1]); *shiftLower = min(*shiftLower, ritzVals_host[iter_new]); } } else { if(*shiftLower > *shiftUpper) { *shiftUpper = ritzVals_host[iter-iter_new-1]; *shiftLower = ritzVals_host[0]; } else { *shiftUpper = max(*shiftUpper, ritzVals_host[iter-iter_new-1]); *shiftLower = min(*shiftLower, ritzVals_host[0]); } } // Calculate Chebyshev nodes as shifts shifts_host = ritzVals_host; for(i=0; i<restartSteps; ++i) { shifts_host[i] = cos((i+0.5)*static_cast<ValueType_>(M_PI)/restartSteps); shifts_host[i] *= 0.5*((*shiftUpper)-(*shiftLower)); shifts_host[i] += 0.5*((*shiftUpper)+(*shiftLower)); } // Apply Francis QR algorithm to implicitly restart Lanczos for(i=0; i<restartSteps; i+=2) if(francisQRIteration(iter, shifts_host[i], shifts_host[i+1], alpha_host, beta_host, V_host, work_host)) WARNING("error in implicitly shifted QR algorithm"); // Obtain new residual CHECK_CUDA(cudaMemcpyAsync(V_dev, V_host, iter*iter*sizeof(ValueType_), cudaMemcpyHostToDevice)); beta_host[iter-1] = beta_host[iter-1]*V_host[IDX(iter-1,iter_new-1,iter)]; Cublas::gemv(false, n, iter, beta_host+iter_new-1, lanczosVecs_dev, n, V_dev+IDX(0,iter_new,iter), 1, beta_host+iter-1, lanczosVecs_dev+IDX(0,iter,n), 1); // Obtain new Lanczos vectors Cublas::gemm(false, false, n, iter_new, iter, &one, lanczosVecs_dev, n, V_dev, iter, &zero, work_dev, n); CHECK_CUDA(cudaMemcpyAsync(lanczosVecs_dev, work_dev, n*iter_new*sizeof(ValueType_), cudaMemcpyDeviceToDevice)); // Normalize residual to obtain new Lanczos vector CHECK_CUDA(cudaMemcpyAsync(lanczosVecs_dev+IDX(0,iter_new,n), lanczosVecs_dev+IDX(0,iter,n), n*sizeof(ValueType_), cudaMemcpyDeviceToDevice)); beta_host[iter_new-1] = Cublas::nrm2(n, lanczosVecs_dev+IDX(0,iter_new,n), 1); Cublas::scal(n, 1/beta_host[iter_new-1], lanczosVecs_dev+IDX(0,iter_new,n), 1); return 0; } } // ========================================================= // Eigensolver // ========================================================= /// Compute smallest eigenvectors of symmetric matrix /** Computes eigenvalues and eigenvectors that are least * positive. If matrix is positive definite or positive * semidefinite, the computed eigenvalues are smallest in * magnitude. * * The largest eigenvalue is estimated by performing several * Lanczos iterations. An implicitly restarted Lanczos method is * then applied to A+s*I, where s is negative the largest * eigenvalue. * * @param A Matrix. * @param nEigVecs Number of eigenvectors to compute. * @param maxIter Maximum number of Lanczos steps. Does not include * Lanczos steps used to estimate largest eigenvalue. * @param restartIter Maximum size of Lanczos system before * performing an implicit restart. Should be at least 4. * @param tol Convergence tolerance. Lanczos iteration will * terminate when the residual norm is less than tol*theta, where * theta is an estimate for the smallest unwanted eigenvalue * (i.e. the (nEigVecs+1)th smallest eigenvalue). * @param reorthogonalize Whether to reorthogonalize Lanczos * vectors. * @param effIter On exit, pointer to final size of Lanczos system. * @param totalIter On exit, pointer to total number of Lanczos * iterations performed. Does not include Lanczos steps used to * estimate largest eigenvalue. * @param shift On exit, pointer to matrix shift (estimate for * largest eigenvalue). * @param alpha_host (Output, host memory, restartIter entries) * Diagonal entries of Lanczos system. * @param beta_host (Output, host memory, restartIter entries) * Off-diagonal entries of Lanczos system. * @param lanczosVecs_dev (Output, device memory, n*(restartIter+1) * entries) Lanczos vectors. Vectors are stored as columns of a * column-major matrix with dimensions n x (restartIter+1). * @param work_dev (Output, device memory, * (n+restartIter)*restartIter entries) Workspace. * @param eigVals_dev (Output, device memory, nEigVecs entries) * Largest eigenvalues of matrix. * @param eigVecs_dev (Output, device memory, n*nEigVecs entries) * Eigenvectors corresponding to smallest eigenvalues of * matrix. Vectors are stored as columns of a column-major matrix * with dimensions n x nEigVecs. * @return NVGRAPH error flag. */ template <typename IndexType_, typename ValueType_> NVGRAPH_ERROR computeSmallestEigenvectors(const Matrix<IndexType_,ValueType_> * A, IndexType_ nEigVecs, IndexType_ maxIter, IndexType_ restartIter, ValueType_ tol, bool reorthogonalize, IndexType_ * effIter, IndexType_ * totalIter, ValueType_ * shift, ValueType_ * __restrict__ alpha_host, ValueType_ * __restrict__ beta_host, ValueType_ * __restrict__ lanczosVecs_dev, ValueType_ * __restrict__ work_dev, ValueType_ * __restrict__ eigVals_dev, ValueType_ * __restrict__ eigVecs_dev) { // ------------------------------------------------------- // Variable declaration // ------------------------------------------------------- // Useful constants const ValueType_ one = 1; const ValueType_ zero = 0; // Matrix dimension IndexType_ n = A->n; // Shift for implicit restart ValueType_ shiftUpper; ValueType_ shiftLower; // Lanczos iteration counters IndexType_ maxIter_curr = restartIter; // Maximum size of Lanczos system // Status flags int status; // Loop index IndexType_ i; // Host memory ValueType_ * Z_host; // Eigenvectors in Lanczos basis ValueType_ * work_host; // Workspace // ------------------------------------------------------- // Check that LAPACK is enabled // ------------------------------------------------------- //Lapack<ValueType_>::check_lapack_enabled(); // ------------------------------------------------------- // Check that parameters are valid // ------------------------------------------------------- if(A->m != A->n) { WARNING("invalid parameter (matrix is not square)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(nEigVecs < 1) { WARNING("invalid parameter (nEigVecs<1)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(restartIter < 1) { WARNING("invalid parameter (restartIter<4)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(tol < 0) { WARNING("invalid parameter (tol<0)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(nEigVecs > n) { WARNING("invalid parameters (nEigVecs>n)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(maxIter < nEigVecs) { WARNING("invalid parameters (maxIter<nEigVecs)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(restartIter < nEigVecs) { WARNING("invalid parameters (restartIter<nEigVecs)"); return NVGRAPH_ERR_BAD_PARAMETERS; } // ------------------------------------------------------- // Variable initialization // ------------------------------------------------------- // Total number of Lanczos iterations *totalIter = 0; // Allocate host memory Z_host = (ValueType_*) malloc(restartIter*restartIter *sizeof(ValueType_)); if(Z_host==NULL) WARNING("could not allocate host memory"); work_host = (ValueType_*) malloc(4*restartIter*sizeof(ValueType_)); if(work_host==NULL) WARNING("could not allocate host memory"); // Initialize cuBLAS Cublas::set_pointer_mode_host(); // ------------------------------------------------------- // Compute largest eigenvalue to determine shift // ------------------------------------------------------- #ifdef USE_CURAND // Random number generator curandGenerator_t randGen; // Initialize random number generator CHECK_CURAND(curandCreateGenerator(&randGen, CURAND_RNG_PSEUDO_PHILOX4_32_10)); CHECK_CURAND(curandSetPseudoRandomGeneratorSeed(randGen, 123456/*time(NULL)*/)); // Initialize initial Lanczos vector CHECK_CURAND(curandGenerateNormalX(randGen, lanczosVecs_dev, n+n%2, zero, one)); ValueType_ normQ1 = Cublas::nrm2(n, lanczosVecs_dev, 1); Cublas::scal(n, 1/normQ1, lanczosVecs_dev, 1); #else fill_raw_vec (lanczosVecs_dev, n, (ValueType_)1.0/n); // doesn't work #endif // Estimate number of Lanczos iterations // See bounds in Kuczynski and Wozniakowski (1992). //const ValueType_ relError = 0.25; // Relative error //const ValueType_ failProb = 1e-4; // Probability of failure //maxIter_curr = log(n/pow(failProb,2))/(4*std::sqrt(relError)) + 1; //maxIter_curr = min(maxIter_curr, restartIter); // Obtain tridiagonal matrix with Lanczos *effIter = 0; *shift = 0; status = performLanczosIteration<IndexType_, ValueType_> (A, effIter, maxIter_curr, *shift, 0.0, reorthogonalize, alpha_host, beta_host, lanczosVecs_dev, work_dev); if(status) WARNING("error in Lanczos iteration"); // Determine largest eigenvalue Lapack<ValueType_>::sterf(*effIter, alpha_host, beta_host); *shift = -alpha_host[*effIter-1]; //std::cout << *shift <<std::endl; // ------------------------------------------------------- // Compute eigenvectors of shifted matrix // ------------------------------------------------------- // Obtain tridiagonal matrix with Lanczos *effIter = 0; //maxIter_curr = min(maxIter, restartIter); status = performLanczosIteration<IndexType_, ValueType_> (A, effIter, maxIter_curr, *shift, 0, reorthogonalize, alpha_host, beta_host, lanczosVecs_dev, work_dev); if(status) WARNING("error in Lanczos iteration"); *totalIter += *effIter; // Apply Lanczos method until convergence shiftLower = 1; shiftUpper = -1; while(*totalIter<maxIter && beta_host[*effIter-1]>tol*shiftLower) { // Determine number of restart steps // Number of steps must be even due to Francis algorithm IndexType_ iter_new = nEigVecs+1; if(restartIter-(maxIter-*totalIter) > nEigVecs+1) iter_new = restartIter-(maxIter-*totalIter); if((restartIter-iter_new) % 2) iter_new -= 1; if(iter_new==*effIter) break; // Implicit restart of Lanczos method status = lanczosRestart<IndexType_, ValueType_> (n, *effIter, iter_new, &shiftUpper, &shiftLower, alpha_host, beta_host, Z_host, work_host, lanczosVecs_dev, work_dev, true); if(status) WARNING("error in Lanczos implicit restart"); *effIter = iter_new; // Check for convergence if(beta_host[*effIter-1] <= tol*fabs(shiftLower)) break; // Proceed with Lanczos method //maxIter_curr = min(restartIter, maxIter-*totalIter+*effIter); status = performLanczosIteration<IndexType_, ValueType_> (A, effIter, maxIter_curr, *shift, tol*fabs(shiftLower), reorthogonalize, alpha_host, beta_host, lanczosVecs_dev, work_dev); if(status) WARNING("error in Lanczos iteration"); *totalIter += *effIter-iter_new; } // Warning if Lanczos has failed to converge if(beta_host[*effIter-1] > tol*fabs(shiftLower)) { WARNING("implicitly restarted Lanczos failed to converge"); } // Solve tridiagonal system memcpy(work_host+2*(*effIter), alpha_host, (*effIter)*sizeof(ValueType_)); memcpy(work_host+3*(*effIter), beta_host, (*effIter-1)*sizeof(ValueType_)); Lapack<ValueType_>::steqr('I', *effIter, work_host+2*(*effIter), work_host+3*(*effIter), Z_host, *effIter, work_host); // Obtain desired eigenvalues by applying shift for(i=0; i<*effIter; ++i) work_host[i+2*(*effIter)] -= *shift; for(i=*effIter; i<nEigVecs; ++i) work_host[i+2*(*effIter)] = 0; // Copy results to device memory CHECK_CUDA(cudaMemcpy(eigVals_dev, work_host+2*(*effIter), nEigVecs*sizeof(ValueType_), cudaMemcpyHostToDevice)); //for (int i = 0; i < nEigVecs; ++i) //{ // std::cout <<*(work_host+(2*(*effIter)+i))<< std::endl; //} CHECK_CUDA(cudaMemcpy(work_dev, Z_host, (*effIter)*nEigVecs*sizeof(ValueType_), cudaMemcpyHostToDevice)); // Convert eigenvectors from Lanczos basis to standard basis Cublas::gemm(false, false, n, nEigVecs, *effIter, &one, lanczosVecs_dev, n, work_dev, *effIter, &zero, eigVecs_dev, n); // Clean up and exit free(Z_host); free(work_host); #ifdef USE_CURAND CHECK_CURAND(curandDestroyGenerator(randGen)); #endif return NVGRAPH_OK; } /// Compute smallest eigenvectors of symmetric matrix /** Computes eigenvalues and eigenvectors that are least * positive. If matrix is positive definite or positive * semidefinite, the computed eigenvalues are smallest in * magnitude. * * The largest eigenvalue is estimated by performing several * Lanczos iterations. An implicitly restarted Lanczos method is * then applied to A+s*I, where s is negative the largest * eigenvalue. * * CNMEM must be initialized before calling this function. * * @param A Matrix. * @param nEigVecs Number of eigenvectors to compute. * @param maxIter Maximum number of Lanczos steps. Does not include * Lanczos steps used to estimate largest eigenvalue. * @param restartIter Maximum size of Lanczos system before * performing an implicit restart. Should be at least 4. * @param tol Convergence tolerance. Lanczos iteration will * terminate when the residual norm is less than tol*theta, where * theta is an estimate for the smallest unwanted eigenvalue * (i.e. the (nEigVecs+1)th smallest eigenvalue). * @param reorthogonalize Whether to reorthogonalize Lanczos * vectors. * @param iter On exit, pointer to total number of Lanczos * iterations performed. Does not include Lanczos steps used to * estimate largest eigenvalue. * @param eigVals_dev (Output, device memory, nEigVecs entries) * Smallest eigenvalues of matrix. * @param eigVecs_dev (Output, device memory, n*nEigVecs entries) * Eigenvectors corresponding to smallest eigenvalues of * matrix. Vectors are stored as columns of a column-major matrix * with dimensions n x nEigVecs. * @return NVGRAPH error flag. */ template <typename IndexType_, typename ValueType_> NVGRAPH_ERROR computeSmallestEigenvectors(const Matrix<IndexType_,ValueType_> & A, IndexType_ nEigVecs, IndexType_ maxIter, IndexType_ restartIter, ValueType_ tol, bool reorthogonalize, IndexType_ & iter, ValueType_ * __restrict__ eigVals_dev, ValueType_ * __restrict__ eigVecs_dev) { // CUDA stream // TODO: handle non-zero streams cudaStream_t stream = 0; // Matrix dimension IndexType_ n = A.n; // Check that parameters are valid if(A.m != A.n) { WARNING("invalid parameter (matrix is not square)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(nEigVecs < 1) { WARNING("invalid parameter (nEigVecs<1)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(restartIter < 1) { WARNING("invalid parameter (restartIter<4)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(tol < 0) { WARNING("invalid parameter (tol<0)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(nEigVecs > n) { WARNING("invalid parameters (nEigVecs>n)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(maxIter < nEigVecs) { WARNING("invalid parameters (maxIter<nEigVecs)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(restartIter < nEigVecs) { WARNING("invalid parameters (restartIter<nEigVecs)"); return NVGRAPH_ERR_BAD_PARAMETERS; } // Allocate memory ValueType_ * alpha_host = (ValueType_*) malloc(restartIter*sizeof(ValueType_)); ValueType_ * beta_host = (ValueType_*) malloc(restartIter*sizeof(ValueType_)); Vector<ValueType_> lanczosVecs_dev(n*(restartIter+1), stream); Vector<ValueType_> work_dev((n+restartIter)*restartIter, stream); // Perform Lanczos method IndexType_ effIter; ValueType_ shift; NVGRAPH_ERROR status = computeSmallestEigenvectors(&A, nEigVecs, maxIter, restartIter, tol, reorthogonalize, &effIter, &iter, &shift, alpha_host, beta_host, lanczosVecs_dev.raw(), work_dev.raw(), eigVals_dev, eigVecs_dev); // Clean up and return free(alpha_host); free(beta_host); return status; } // ========================================================= // Eigensolver // ========================================================= /// Compute largest eigenvectors of symmetric matrix /** Computes eigenvalues and eigenvectors that are least * positive. If matrix is positive definite or positive * semidefinite, the computed eigenvalues are largest in * magnitude. * * The largest eigenvalue is estimated by performing several * Lanczos iterations. An implicitly restarted Lanczos method is * then applied. * * @param A Matrix. * @param nEigVecs Number of eigenvectors to compute. * @param maxIter Maximum number of Lanczos steps. * @param restartIter Maximum size of Lanczos system before * performing an implicit restart. Should be at least 4. * @param tol Convergence tolerance. Lanczos iteration will * terminate when the residual norm is less than tol*theta, where * theta is an estimate for the largest unwanted eigenvalue * (i.e. the (nEigVecs+1)th largest eigenvalue). * @param reorthogonalize Whether to reorthogonalize Lanczos * vectors. * @param effIter On exit, pointer to final size of Lanczos system. * @param totalIter On exit, pointer to total number of Lanczos * iterations performed. * @param alpha_host (Output, host memory, restartIter entries) * Diagonal entries of Lanczos system. * @param beta_host (Output, host memory, restartIter entries) * Off-diagonal entries of Lanczos system. * @param lanczosVecs_dev (Output, device memory, n*(restartIter+1) * entries) Lanczos vectors. Vectors are stored as columns of a * column-major matrix with dimensions n x (restartIter+1). * @param work_dev (Output, device memory, * (n+restartIter)*restartIter entries) Workspace. * @param eigVals_dev (Output, device memory, nEigVecs entries) * Largest eigenvalues of matrix. * @param eigVecs_dev (Output, device memory, n*nEigVecs entries) * Eigenvectors corresponding to largest eigenvalues of * matrix. Vectors are stored as columns of a column-major matrix * with dimensions n x nEigVecs. * @return NVGRAPH error flag. */ template <typename IndexType_, typename ValueType_> NVGRAPH_ERROR computeLargestEigenvectors(const Matrix<IndexType_,ValueType_> * A, IndexType_ nEigVecs, IndexType_ maxIter, IndexType_ restartIter, ValueType_ tol, bool reorthogonalize, IndexType_ * effIter, IndexType_ * totalIter, ValueType_ * __restrict__ alpha_host, ValueType_ * __restrict__ beta_host, ValueType_ * __restrict__ lanczosVecs_dev, ValueType_ * __restrict__ work_dev, ValueType_ * __restrict__ eigVals_dev, ValueType_ * __restrict__ eigVecs_dev) { // ------------------------------------------------------- // Variable declaration // ------------------------------------------------------- // Useful constants const ValueType_ one = 1; const ValueType_ zero = 0; // Matrix dimension IndexType_ n = A->n; // Lanczos iteration counters IndexType_ maxIter_curr = restartIter; // Maximum size of Lanczos system // Status flags int status; // Loop index IndexType_ i; // Host memory ValueType_ * Z_host; // Eigenvectors in Lanczos basis ValueType_ * work_host; // Workspace // ------------------------------------------------------- // Check that LAPACK is enabled // ------------------------------------------------------- //Lapack<ValueType_>::check_lapack_enabled(); // ------------------------------------------------------- // Check that parameters are valid // ------------------------------------------------------- if(A->m != A->n) { WARNING("invalid parameter (matrix is not square)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(nEigVecs < 1) { WARNING("invalid parameter (nEigVecs<1)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(restartIter < 1) { WARNING("invalid parameter (restartIter<4)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(tol < 0) { WARNING("invalid parameter (tol<0)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(nEigVecs > n) { WARNING("invalid parameters (nEigVecs>n)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(maxIter < nEigVecs) { WARNING("invalid parameters (maxIter<nEigVecs)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(restartIter <= nEigVecs) { WARNING("invalid parameters (restartIter<=nEigVecs)"); return NVGRAPH_ERR_BAD_PARAMETERS; } // ------------------------------------------------------- // Variable initialization // ------------------------------------------------------- // Total number of Lanczos iterations *totalIter = 0; // Allocate host memory Z_host = (ValueType_*) malloc(restartIter*restartIter *sizeof(ValueType_)); if(Z_host==NULL) WARNING("could not allocate host memory"); work_host = (ValueType_*) malloc(4*restartIter*sizeof(ValueType_)); if(work_host==NULL) WARNING("could not allocate host memory"); // Initialize cuBLAS Cublas::set_pointer_mode_host(); // ------------------------------------------------------- // Compute largest eigenvalue // ------------------------------------------------------- #ifdef USE_CURAND // Random number generator curandGenerator_t randGen; // Initialize random number generator CHECK_CURAND(curandCreateGenerator(&randGen, CURAND_RNG_PSEUDO_PHILOX4_32_10)); CHECK_CURAND(curandSetPseudoRandomGeneratorSeed(randGen, 123456)); // Initialize initial Lanczos vector CHECK_CURAND(curandGenerateNormalX(randGen, lanczosVecs_dev, n+n%2, zero, one)); ValueType_ normQ1 = Cublas::nrm2(n, lanczosVecs_dev, 1); Cublas::scal(n, 1/normQ1, lanczosVecs_dev, 1); #else fill_raw_vec (lanczosVecs_dev, n, (ValueType_)1.0/n); // doesn't work #endif // Estimate number of Lanczos iterations // See bounds in Kuczynski and Wozniakowski (1992). //const ValueType_ relError = 0.25; // Relative error //const ValueType_ failProb = 1e-4; // Probability of failure //maxIter_curr = log(n/pow(failProb,2))/(4*std::sqrt(relError)) + 1; //maxIter_curr = min(maxIter_curr, restartIter); // Obtain tridiagonal matrix with Lanczos *effIter = 0; ValueType_ shift_val=0.0; ValueType_ *shift = &shift_val; //maxIter_curr = min(maxIter, restartIter); status = performLanczosIteration<IndexType_, ValueType_> (A, effIter, maxIter_curr, *shift, 0, reorthogonalize, alpha_host, beta_host, lanczosVecs_dev, work_dev); if(status) WARNING("error in Lanczos iteration"); *totalIter += *effIter; // Apply Lanczos method until convergence ValueType_ shiftLower = 1; ValueType_ shiftUpper = -1; while(*totalIter<maxIter && beta_host[*effIter-1]>tol*shiftLower) { // Determine number of restart steps // Number of steps must be even due to Francis algorithm IndexType_ iter_new = nEigVecs+1; if(restartIter-(maxIter-*totalIter) > nEigVecs+1) iter_new = restartIter-(maxIter-*totalIter); if((restartIter-iter_new) % 2) iter_new -= 1; if(iter_new==*effIter) break; // Implicit restart of Lanczos method status = lanczosRestart<IndexType_, ValueType_> (n, *effIter, iter_new, &shiftUpper, &shiftLower, alpha_host, beta_host, Z_host, work_host, lanczosVecs_dev, work_dev, false); if(status) WARNING("error in Lanczos implicit restart"); *effIter = iter_new; // Check for convergence if(beta_host[*effIter-1] <= tol*fabs(shiftLower)) break; // Proceed with Lanczos method //maxIter_curr = min(restartIter, maxIter-*totalIter+*effIter); status = performLanczosIteration<IndexType_, ValueType_> (A, effIter, maxIter_curr, *shift, tol*fabs(shiftLower), reorthogonalize, alpha_host, beta_host, lanczosVecs_dev, work_dev); if(status) WARNING("error in Lanczos iteration"); *totalIter += *effIter-iter_new; } // Warning if Lanczos has failed to converge if(beta_host[*effIter-1] > tol*fabs(shiftLower)) { WARNING("implicitly restarted Lanczos failed to converge"); } for (int i = 0; i < restartIter; ++i) { for (int j = 0; j < restartIter; ++j) Z_host[i*restartIter+j] = 0; } // Solve tridiagonal system memcpy(work_host+2*(*effIter), alpha_host, (*effIter)*sizeof(ValueType_)); memcpy(work_host+3*(*effIter), beta_host, (*effIter-1)*sizeof(ValueType_)); Lapack<ValueType_>::steqr('I', *effIter, work_host+2*(*effIter), work_host+3*(*effIter), Z_host, *effIter, work_host); // note: We need to pick the top nEigVecs eigenvalues // but effItter can be larger than nEigVecs // hence we add an offset for that case, because we want to access top nEigVecs eigenpairs in the matrix of size effIter. // remember the array is sorted, so it is not needed for smallest eigenvalues case because the first ones are the smallest ones IndexType_ top_eigenparis_idx_offset = *effIter - nEigVecs; //Debug : print nEigVecs largest eigenvalues //for (int i = top_eigenparis_idx_offset; i < *effIter; ++i) // std::cout <<*(work_host+(2*(*effIter)+i))<< " "; //std::cout <<std::endl; //Debug : print nEigVecs largest eigenvectors //for (int i = top_eigenparis_idx_offset; i < *effIter; ++i) //{ // for (int j = 0; j < *effIter; ++j) // std::cout <<Z_host[i*(*effIter)+j]<< " "; // std::cout <<std::endl; //} // Obtain desired eigenvalues by applying shift for(i=0; i<*effIter; ++i) work_host[i+2*(*effIter)] -= *shift; for(i=0; i<top_eigenparis_idx_offset; ++i) work_host[i+2*(*effIter)] = 0; // Copy results to device memory // skip smallest eigenvalue if needed CHECK_CUDA(cudaMemcpy(eigVals_dev, work_host+2*(*effIter)+top_eigenparis_idx_offset, nEigVecs*sizeof(ValueType_), cudaMemcpyHostToDevice)); // skip smallest eigenvector if needed CHECK_CUDA(cudaMemcpy(work_dev, Z_host+(top_eigenparis_idx_offset*(*effIter)), (*effIter)*nEigVecs*sizeof(ValueType_), cudaMemcpyHostToDevice)); // Convert eigenvectors from Lanczos basis to standard basis Cublas::gemm(false, false, n, nEigVecs, *effIter, &one, lanczosVecs_dev, n, work_dev, *effIter, &zero, eigVecs_dev, n); // Clean up and exit free(Z_host); free(work_host); #ifdef USE_CURAND CHECK_CURAND(curandDestroyGenerator(randGen)); #endif return NVGRAPH_OK; } /// Compute largest eigenvectors of symmetric matrix /** Computes eigenvalues and eigenvectors that are least * positive. If matrix is positive definite or positive * semidefinite, the computed eigenvalues are largest in * magnitude. * * The largest eigenvalue is estimated by performing several * Lanczos iterations. An implicitly restarted Lanczos method is * then applied to A+s*I, where s is negative the largest * eigenvalue. * * CNMEM must be initialized before calling this function. * * @param A Matrix. * @param nEigVecs Number of eigenvectors to compute. * @param maxIter Maximum number of Lanczos steps. Does not include * Lanczos steps used to estimate largest eigenvalue. * @param restartIter Maximum size of Lanczos system before * performing an implicit restart. Should be at least 4. * @param tol Convergence tolerance. Lanczos iteration will * terminate when the residual norm is less than tol*theta, where * theta is an estimate for the largest unwanted eigenvalue * (i.e. the (nEigVecs+1)th largest eigenvalue). * @param reorthogonalize Whether to reorthogonalize Lanczos * vectors. * @param iter On exit, pointer to total number of Lanczos * iterations performed. Does not include Lanczos steps used to * estimate largest eigenvalue. * @param eigVals_dev (Output, device memory, nEigVecs entries) * Largest eigenvalues of matrix. * @param eigVecs_dev (Output, device memory, n*nEigVecs entries) * Eigenvectors corresponding to largest eigenvalues of * matrix. Vectors are stored as columns of a column-major matrix * with dimensions n x nEigVecs. * @return NVGRAPH error flag. */ template <typename IndexType_, typename ValueType_> NVGRAPH_ERROR computeLargestEigenvectors(const Matrix<IndexType_,ValueType_> & A, IndexType_ nEigVecs, IndexType_ maxIter, IndexType_ restartIter, ValueType_ tol, bool reorthogonalize, IndexType_ & iter, ValueType_ * __restrict__ eigVals_dev, ValueType_ * __restrict__ eigVecs_dev) { // CUDA stream // TODO: handle non-zero streams cudaStream_t stream = 0; // Matrix dimension IndexType_ n = A.n; // Check that parameters are valid if(A.m != A.n) { WARNING("invalid parameter (matrix is not square)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(nEigVecs < 1) { WARNING("invalid parameter (nEigVecs<1)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(restartIter < 1) { WARNING("invalid parameter (restartIter<4)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(tol < 0) { WARNING("invalid parameter (tol<0)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(nEigVecs > n) { WARNING("invalid parameters (nEigVecs>n)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(maxIter < nEigVecs) { WARNING("invalid parameters (maxIter<nEigVecs)"); return NVGRAPH_ERR_BAD_PARAMETERS; } if(restartIter < nEigVecs) { WARNING("invalid parameters (restartIter<nEigVecs)"); return NVGRAPH_ERR_BAD_PARAMETERS; } // Allocate memory ValueType_ * alpha_host = (ValueType_*) malloc(restartIter*sizeof(ValueType_)); ValueType_ * beta_host = (ValueType_*) malloc(restartIter*sizeof(ValueType_)); Vector<ValueType_> lanczosVecs_dev(n*(restartIter+1), stream); Vector<ValueType_> work_dev((n+restartIter)*restartIter, stream); // Perform Lanczos method IndexType_ effIter; NVGRAPH_ERROR status = computeLargestEigenvectors(&A, nEigVecs, maxIter, restartIter, tol, reorthogonalize, &effIter, &iter, alpha_host, beta_host, lanczosVecs_dev.raw(), work_dev.raw(), eigVals_dev, eigVecs_dev); // Clean up and return free(alpha_host); free(beta_host); return status; } // ========================================================= // Explicit instantiation // ========================================================= template NVGRAPH_ERROR computeSmallestEigenvectors<int,float> (const Matrix<int,float> * A, int nEigVecs, int maxIter, int restartIter, float tol, bool reorthogonalize, int * iter, int * totalIter, float * shift, float * __restrict__ alpha_host, float * __restrict__ beta_host, float * __restrict__ lanczosVecs_dev, float * __restrict__ work_dev, float * __restrict__ eigVals_dev, float * __restrict__ eigVecs_dev); template NVGRAPH_ERROR computeSmallestEigenvectors<int,double> (const Matrix<int,double> * A, int nEigVecs, int maxIter, int restartIter, double tol, bool reorthogonalize, int * iter, int * totalIter, double * shift, double * __restrict__ alpha_host, double * __restrict__ beta_host, double * __restrict__ lanczosVecs_dev, double * __restrict__ work_dev, double * __restrict__ eigVals_dev, double * __restrict__ eigVecs_dev); template NVGRAPH_ERROR computeSmallestEigenvectors<int, float> (const Matrix<int,float> & A, int nEigVecs, int maxIter, int restartIter, float tol, bool reorthogonalize, int & iter, float * __restrict__ eigVals_dev, float * __restrict__ eigVecs_dev); template NVGRAPH_ERROR computeSmallestEigenvectors<int, double> (const Matrix<int,double> & A, int nEigVecs, int maxIter, int restartIter, double tol, bool reorthogonalize, int & iter, double * __restrict__ eigVals_dev, double * __restrict__ eigVecs_dev); template NVGRAPH_ERROR computeLargestEigenvectors<int,float> (const Matrix<int,float> * A, int nEigVecs, int maxIter, int restartIter, float tol, bool reorthogonalize, int * iter, int * totalIter, float * __restrict__ alpha_host, float * __restrict__ beta_host, float * __restrict__ lanczosVecs_dev, float * __restrict__ work_dev, float * __restrict__ eigVals_dev, float * __restrict__ eigVecs_dev); template NVGRAPH_ERROR computeLargestEigenvectors<int,double> (const Matrix<int,double> * A, int nEigVecs, int maxIter, int restartIter, double tol, bool reorthogonalize, int * iter, int * totalIter, double * __restrict__ alpha_host, double * __restrict__ beta_host, double * __restrict__ lanczosVecs_dev, double * __restrict__ work_dev, double * __restrict__ eigVals_dev, double * __restrict__ eigVecs_dev); template NVGRAPH_ERROR computeLargestEigenvectors<int, float> (const Matrix<int,float> & A, int nEigVecs, int maxIter, int restartIter, float tol, bool reorthogonalize, int & iter, float * __restrict__ eigVals_dev, float * __restrict__ eigVecs_dev); template NVGRAPH_ERROR computeLargestEigenvectors<int, double> (const Matrix<int,double> & A, int nEigVecs, int maxIter, int restartIter, double tol, bool reorthogonalize, int & iter, double * __restrict__ eigVals_dev, double * __restrict__ eigVecs_dev); } //#endif //NVGRAPH_PARTITION
50c3c669a498b56e848d4b7c86e25ceba679556a.hip
// !!! This is a file automatically generated by hipify!!! #include "kernel_hip.cuh" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #define err(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } template <typename FUNC> __global__ void f_kernel(float* a, float* b, int size, FUNC func) { for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < size; i += blockDim.x * gridDim.x) b[i] = func(a[i]); } template <typename FUNC> std::vector<float> f(std::vector<float> const& a, FUNC func) { std::vector<float> b(a.size()); const int bsize = a.size() * 4; float* da; float* db; err(hipMalloc(&da, bsize)); err(hipMalloc(&db, bsize)); err(hipMemcpy(da, a.data(), bsize, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( f_kernel), dim3(256), dim3(256), 0, 0, da, db, a.size(), func); err(hipDeviceSynchronize()); err(hipMemcpy(b.data(), db, bsize, hipMemcpyDeviceToHost)); err(hipFree(da)); err(hipFree(db)); return b; } // I have to explicit instantiate but don't know how to do this for lambdas
50c3c669a498b56e848d4b7c86e25ceba679556a.cu
#include "kernel.cuh" #include "cuda_runtime.h" #include "device_launch_parameters.h" #define err(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } template <typename FUNC> __global__ void f_kernel(float* a, float* b, int size, FUNC func) { for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < size; i += blockDim.x * gridDim.x) b[i] = func(a[i]); } template <typename FUNC> std::vector<float> f(std::vector<float> const& a, FUNC func) { std::vector<float> b(a.size()); const int bsize = a.size() * 4; float* da; float* db; err(cudaMalloc(&da, bsize)); err(cudaMalloc(&db, bsize)); err(cudaMemcpy(da, a.data(), bsize, cudaMemcpyHostToDevice)); f_kernel<<<256, 256>>>(da, db, a.size(), func); err(cudaDeviceSynchronize()); err(cudaMemcpy(b.data(), db, bsize, cudaMemcpyDeviceToHost)); err(cudaFree(da)); err(cudaFree(db)); return b; } // I have to explicit instantiate but don't know how to do this for lambdas
f8ec28d8de03d7d005572b32f0fe19e97d601dc6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include <opencv2/core/cuda.hpp> #include <opencv2/cudev/util/vec_traits.hpp> #include <opencv2/core/cuda_stream_accessor.hpp> #include <opencv2/core/cuda/utility.hpp> #include "Aquila/utilities/GPUSortingPriv.hpp" template<class T> void __global__ accum_kernel(const T* in, float* out) { *out += *in; } void __global__ divide_kernel(float* value, float count) { value[0] /= count; value[1] /= count; } template<class T, class U> struct Saturate { Saturate(const cv::cuda::PtrStepSz<float>& vals_, float out_dynamic_range): vals(vals_), out_dynamic_range(out_dynamic_range) {} typename cv::cudev::MakeVec<U, 3>::type operator()(const typename cv::cudev::MakeVec<T, 3>::type & vec_in) { typename cv::cudev::MakeVec<U, 3>::type vec; float beta = vals(0,0); float alpha = out_dynamic_range / (vals(0,1) - vals(0,0)); float val = alpha * (vec_in.x - beta); val = fmax(0.0f, val); val = fmin(out_dynamic_range, val); vec.x = val; beta = vals(1,0); alpha = out_dynamic_range / (vals(1,1) - vals(1,0)); val = alpha * (vec_in.y - beta); val = fmax(0.0f, val); val = fmin(out_dynamic_range, val); vec.y = val; beta = vals(2,0); alpha = out_dynamic_range / (vals(2,1) - vals(2,0)); val = alpha * (vec_in.z - beta); val = ::max(0.0f, val); val = ::min(out_dynamic_range, val); vec.z = val; return vec; } const cv::cuda::PtrStepSz<float> vals; const float out_dynamic_range; }; template<class T1, class T2> void __global__ transform_kernel(const cv::cuda::PtrStepSz<typename cv::cudev::MakeVec<T1, 3>::type> in, cv::cuda::PtrStepSz<typename cv::cudev::MakeVec<T2, 3>::type> out, cv::cuda::PtrStepSz<float> saturate, const float dynamic_range) { const int x = blockDim.x * blockIdx.x + threadIdx.x; const int y = blockDim.y * blockIdx.y + threadIdx.y; if(x < in.cols && y < in.rows) { float beta = saturate(0,0); float alpha = dynamic_range / (saturate(0,1) - saturate(0,0)); float val = alpha * (in(y,x).x - beta); val = fmax(0.0f, val); val = fmin(dynamic_range, val); out(y,x).x = val; beta = saturate(1,0); alpha = dynamic_range / (saturate(1,1) - saturate(0,0)); val = alpha * (in(y,x).y - beta); val = fmax(0.0f, val); val = fmin(dynamic_range, val); out(y,x).y = val; beta = saturate(2,0); alpha = dynamic_range / (saturate(2,1) - saturate(2,0)); val = alpha * (in(y,x).z - beta); val = fmax(0.0f, val); val = fmin(dynamic_range, val); out(y,x).z = val; } } template<class T1> void __global__ color_correct_kernel(cv::cuda::PtrStepSz<typename cv::cudev::MakeVec<T1, 3>::type> in, cv::cuda::PtrStepSz<float> mat) { const int x = blockDim.x * blockIdx.x + threadIdx.x; const int y = blockDim.y * blockIdx.y + threadIdx.y; typedef typename cv::cudev::MakeVec<T1, 3>::type WorkType; if(x < in.cols && y < in.rows) { WorkType& pix = in(y,x); WorkType tmp; tmp.x = mat(0,0) * pix.x + mat(0, 1) * pix.y + mat(0,2) * pix.z; tmp.y = mat(1,0) * pix.x + mat(1, 1) * pix.y + mat(1,2) * pix.z; tmp.z = mat(2,0) * pix.x + mat(2, 1) * pix.y + mat(2,2) * pix.z; in(y,x) = tmp; } } template<class T1, class T2> void transform(const cv::cuda::GpuMat& in, cv::cuda::GpuMat& out, const cv::cuda::PtrStepSzf& saturate, float dyn_range, hipStream_t stream) { dim3 block(32, 8); dim3 grid(cv::cuda::device::divUp(in.cols, block.x), cv::cuda::device::divUp(in.rows, block.y)); hipLaunchKernelGGL(( transform_kernel<T1,T2>), dim3(grid), dim3(block), 0, stream, in, out, saturate, dyn_range); } namespace aq { void applyWhiteBalance(const cv::cuda::GpuMat& input, cv::cuda::GpuMat& output, const cv::Scalar& lower, const cv::Scalar& upper, const std::vector<cv::Rect2f>& sample_regions, const std::vector<float>& sample_weights, int dtype, cv::cuda::Stream& stream_) { CV_Assert(input.channels() == 3); CV_Assert(input.depth() == CV_8U || input.depth() == CV_16U); for(int i = 0; i < 3; ++i) { CV_Assert(lower[i] >= 0 && lower[i] < 1.0f); CV_Assert(upper[i] >= 0 && upper[i] < 1.0f); } CV_Assert(sample_regions.size() == sample_weights.size()); std::vector<float> weights = sample_weights; float sum = 0; for(int i = 0; i < sample_weights.size(); ++i) { sum += sample_weights[i]; } sum /= sample_weights.size(); for(int i = 0; i < weights.size(); ++i) { weights[i] /= sum; } int width = input.cols; int height = input.rows; hipStream_t stream = cv::cuda::StreamAccessor::getStream(stream_); cv::cuda::GpuMat accumulation; cv::cuda::createContinuous(3,2, CV_32F, accumulation); accumulation.setTo(0.0, stream_); float* accum_ptr = accumulation.ptr<float>(); for(int i = 0; i < sample_regions.size(); ++i) { const cv::Rect2f& roif = sample_regions[i]; cv::Rect roi(roif.x*width, roif.y * height, roif.width * width, roif.height * height); cv::cuda::GpuMat flat; cv::cuda::createContinuous(roi.size(), input.type(), flat); input(roi).copyTo(flat, stream_); flat = flat.reshape(3, 1); for( int j = 0; j < 3; ++j) { int lower_offset = cvFloor(flat.cols * lower.val[j]) * 3 + j; int upper_offset = cvFloor(flat.cols * (1.0f - upper.val[j])) * 3 + j; if(input.depth() == CV_8U) { auto view = CreateView<uchar, 1>(flat, j); thrust::sort( thrust::system::cuda::par( cv::cuda::device::ThrustAllocator::getAllocator()).on(stream), view.begin(), view.end(), thrust::less<uchar>()); hipLaunchKernelGGL(( accum_kernel<uchar>), dim3(1),dim3(1),0, stream, flat.data + lower_offset, accum_ptr + 2*j); hipLaunchKernelGGL(( accum_kernel<uchar>), dim3(1),dim3(1),0, stream, flat.data + upper_offset, accum_ptr + 2*j + 1); }else if(input.depth() == CV_16U) { auto view = CreateView<ushort, 1>(flat, j); thrust::sort( thrust::system::cuda::par( cv::cuda::device::ThrustAllocator::getAllocator()).on(stream), view.begin(), view.end(), thrust::less<ushort>()); hipLaunchKernelGGL(( accum_kernel<ushort>), dim3(1),dim3(1),0, stream, reinterpret_cast<ushort*>(flat.data) + lower_offset, accumulation.ptr<float>(j)); hipLaunchKernelGGL(( accum_kernel<ushort>), dim3(1),dim3(1),0, stream, reinterpret_cast<ushort*>(flat.data) + upper_offset, accumulation.ptr<float>(j) + 1); } } } if(sample_regions.size() != 1) { for(int i = 0; i < 3; ++i) { hipLaunchKernelGGL(( divide_kernel), dim3(1),dim3(1),0, stream, accumulation.ptr<float>(i), sample_regions.size()); } } output.create(input.size(), CV_MAKE_TYPE(dtype, 3)); typedef void(*func_t)(const cv::cuda::GpuMat& in, cv::cuda::GpuMat& out, const cv::cuda::PtrStepSzf& saturate, float dyn_range, hipStream_t stream); static const func_t funcs[3][3] = { {transform<uchar, uchar>, 0, transform<ushort, uchar>}, {0, 0, 0}, {transform<uchar, ushort>, 0, transform<ushort, ushort>} }; CV_Assert(funcs[dtype][input.depth()]); funcs[dtype][input.depth()](input, output, accumulation, 255.0, stream); } void colorCorrect(cv::cuda::GpuMat& in_out, const cv::cuda::GpuMat& color_matrix, cv::cuda::Stream& stream_) { CV_Assert(color_matrix.rows == 3 && color_matrix.cols == 3); CV_Assert(in_out.channels() == 3); CV_Assert(in_out.depth() == CV_8U || in_out.depth() == CV_16U || in_out.depth() == CV_32F); dim3 block(32, 8); dim3 grid(cv::cuda::device::divUp(in_out.cols, block.x), cv::cuda::device::divUp(in_out.rows, block.y)); hipStream_t stream = cv::cuda::StreamAccessor::getStream(stream_); switch(in_out.depth()) { case CV_8U: returnhipLaunchKernelGGL(( color_correct_kernel<uchar>), dim3(grid), dim3(block), 0, stream, in_out, color_matrix); case CV_16U: returnhipLaunchKernelGGL(( color_correct_kernel<ushort>), dim3(grid), dim3(block), 0, stream, in_out, color_matrix); case CV_32F: returnhipLaunchKernelGGL(( color_correct_kernel<float>), dim3(grid), dim3(block), 0, stream, in_out, color_matrix); default: return; } } }
f8ec28d8de03d7d005572b32f0fe19e97d601dc6.cu
#include <vector> #include <opencv2/core/cuda.hpp> #include <opencv2/cudev/util/vec_traits.hpp> #include <opencv2/core/cuda_stream_accessor.hpp> #include <opencv2/core/cuda/utility.hpp> #include "Aquila/utilities/GPUSortingPriv.hpp" template<class T> void __global__ accum_kernel(const T* in, float* out) { *out += *in; } void __global__ divide_kernel(float* value, float count) { value[0] /= count; value[1] /= count; } template<class T, class U> struct Saturate { Saturate(const cv::cuda::PtrStepSz<float>& vals_, float out_dynamic_range): vals(vals_), out_dynamic_range(out_dynamic_range) {} typename cv::cudev::MakeVec<U, 3>::type operator()(const typename cv::cudev::MakeVec<T, 3>::type & vec_in) { typename cv::cudev::MakeVec<U, 3>::type vec; float beta = vals(0,0); float alpha = out_dynamic_range / (vals(0,1) - vals(0,0)); float val = alpha * (vec_in.x - beta); val = fmax(0.0f, val); val = fmin(out_dynamic_range, val); vec.x = val; beta = vals(1,0); alpha = out_dynamic_range / (vals(1,1) - vals(1,0)); val = alpha * (vec_in.y - beta); val = fmax(0.0f, val); val = fmin(out_dynamic_range, val); vec.y = val; beta = vals(2,0); alpha = out_dynamic_range / (vals(2,1) - vals(2,0)); val = alpha * (vec_in.z - beta); val = std::max(0.0f, val); val = std::min(out_dynamic_range, val); vec.z = val; return vec; } const cv::cuda::PtrStepSz<float> vals; const float out_dynamic_range; }; template<class T1, class T2> void __global__ transform_kernel(const cv::cuda::PtrStepSz<typename cv::cudev::MakeVec<T1, 3>::type> in, cv::cuda::PtrStepSz<typename cv::cudev::MakeVec<T2, 3>::type> out, cv::cuda::PtrStepSz<float> saturate, const float dynamic_range) { const int x = blockDim.x * blockIdx.x + threadIdx.x; const int y = blockDim.y * blockIdx.y + threadIdx.y; if(x < in.cols && y < in.rows) { float beta = saturate(0,0); float alpha = dynamic_range / (saturate(0,1) - saturate(0,0)); float val = alpha * (in(y,x).x - beta); val = fmax(0.0f, val); val = fmin(dynamic_range, val); out(y,x).x = val; beta = saturate(1,0); alpha = dynamic_range / (saturate(1,1) - saturate(0,0)); val = alpha * (in(y,x).y - beta); val = fmax(0.0f, val); val = fmin(dynamic_range, val); out(y,x).y = val; beta = saturate(2,0); alpha = dynamic_range / (saturate(2,1) - saturate(2,0)); val = alpha * (in(y,x).z - beta); val = fmax(0.0f, val); val = fmin(dynamic_range, val); out(y,x).z = val; } } template<class T1> void __global__ color_correct_kernel(cv::cuda::PtrStepSz<typename cv::cudev::MakeVec<T1, 3>::type> in, cv::cuda::PtrStepSz<float> mat) { const int x = blockDim.x * blockIdx.x + threadIdx.x; const int y = blockDim.y * blockIdx.y + threadIdx.y; typedef typename cv::cudev::MakeVec<T1, 3>::type WorkType; if(x < in.cols && y < in.rows) { WorkType& pix = in(y,x); WorkType tmp; tmp.x = mat(0,0) * pix.x + mat(0, 1) * pix.y + mat(0,2) * pix.z; tmp.y = mat(1,0) * pix.x + mat(1, 1) * pix.y + mat(1,2) * pix.z; tmp.z = mat(2,0) * pix.x + mat(2, 1) * pix.y + mat(2,2) * pix.z; in(y,x) = tmp; } } template<class T1, class T2> void transform(const cv::cuda::GpuMat& in, cv::cuda::GpuMat& out, const cv::cuda::PtrStepSzf& saturate, float dyn_range, cudaStream_t stream) { dim3 block(32, 8); dim3 grid(cv::cuda::device::divUp(in.cols, block.x), cv::cuda::device::divUp(in.rows, block.y)); transform_kernel<T1,T2><<<grid, block, 0, stream>>>(in, out, saturate, dyn_range); } namespace aq { void applyWhiteBalance(const cv::cuda::GpuMat& input, cv::cuda::GpuMat& output, const cv::Scalar& lower, const cv::Scalar& upper, const std::vector<cv::Rect2f>& sample_regions, const std::vector<float>& sample_weights, int dtype, cv::cuda::Stream& stream_) { CV_Assert(input.channels() == 3); CV_Assert(input.depth() == CV_8U || input.depth() == CV_16U); for(int i = 0; i < 3; ++i) { CV_Assert(lower[i] >= 0 && lower[i] < 1.0f); CV_Assert(upper[i] >= 0 && upper[i] < 1.0f); } CV_Assert(sample_regions.size() == sample_weights.size()); std::vector<float> weights = sample_weights; float sum = 0; for(int i = 0; i < sample_weights.size(); ++i) { sum += sample_weights[i]; } sum /= sample_weights.size(); for(int i = 0; i < weights.size(); ++i) { weights[i] /= sum; } int width = input.cols; int height = input.rows; cudaStream_t stream = cv::cuda::StreamAccessor::getStream(stream_); cv::cuda::GpuMat accumulation; cv::cuda::createContinuous(3,2, CV_32F, accumulation); accumulation.setTo(0.0, stream_); float* accum_ptr = accumulation.ptr<float>(); for(int i = 0; i < sample_regions.size(); ++i) { const cv::Rect2f& roif = sample_regions[i]; cv::Rect roi(roif.x*width, roif.y * height, roif.width * width, roif.height * height); cv::cuda::GpuMat flat; cv::cuda::createContinuous(roi.size(), input.type(), flat); input(roi).copyTo(flat, stream_); flat = flat.reshape(3, 1); for( int j = 0; j < 3; ++j) { int lower_offset = cvFloor(flat.cols * lower.val[j]) * 3 + j; int upper_offset = cvFloor(flat.cols * (1.0f - upper.val[j])) * 3 + j; if(input.depth() == CV_8U) { auto view = CreateView<uchar, 1>(flat, j); thrust::sort( thrust::system::cuda::par( cv::cuda::device::ThrustAllocator::getAllocator()).on(stream), view.begin(), view.end(), thrust::less<uchar>()); accum_kernel<uchar><<<1,1,0, stream>>>( flat.data + lower_offset, accum_ptr + 2*j); accum_kernel<uchar><<<1,1,0, stream>>>( flat.data + upper_offset, accum_ptr + 2*j + 1); }else if(input.depth() == CV_16U) { auto view = CreateView<ushort, 1>(flat, j); thrust::sort( thrust::system::cuda::par( cv::cuda::device::ThrustAllocator::getAllocator()).on(stream), view.begin(), view.end(), thrust::less<ushort>()); accum_kernel<ushort><<<1,1,0, stream>>>( reinterpret_cast<ushort*>(flat.data) + lower_offset, accumulation.ptr<float>(j)); accum_kernel<ushort><<<1,1,0, stream>>>( reinterpret_cast<ushort*>(flat.data) + upper_offset, accumulation.ptr<float>(j) + 1); } } } if(sample_regions.size() != 1) { for(int i = 0; i < 3; ++i) { divide_kernel<<<1,1,0, stream>>>(accumulation.ptr<float>(i), sample_regions.size()); } } output.create(input.size(), CV_MAKE_TYPE(dtype, 3)); typedef void(*func_t)(const cv::cuda::GpuMat& in, cv::cuda::GpuMat& out, const cv::cuda::PtrStepSzf& saturate, float dyn_range, cudaStream_t stream); static const func_t funcs[3][3] = { {transform<uchar, uchar>, 0, transform<ushort, uchar>}, {0, 0, 0}, {transform<uchar, ushort>, 0, transform<ushort, ushort>} }; CV_Assert(funcs[dtype][input.depth()]); funcs[dtype][input.depth()](input, output, accumulation, 255.0, stream); } void colorCorrect(cv::cuda::GpuMat& in_out, const cv::cuda::GpuMat& color_matrix, cv::cuda::Stream& stream_) { CV_Assert(color_matrix.rows == 3 && color_matrix.cols == 3); CV_Assert(in_out.channels() == 3); CV_Assert(in_out.depth() == CV_8U || in_out.depth() == CV_16U || in_out.depth() == CV_32F); dim3 block(32, 8); dim3 grid(cv::cuda::device::divUp(in_out.cols, block.x), cv::cuda::device::divUp(in_out.rows, block.y)); cudaStream_t stream = cv::cuda::StreamAccessor::getStream(stream_); switch(in_out.depth()) { case CV_8U: return color_correct_kernel<uchar><<<grid, block, 0, stream>>>(in_out, color_matrix); case CV_16U: return color_correct_kernel<ushort><<<grid, block, 0, stream>>>(in_out, color_matrix); case CV_32F: return color_correct_kernel<float><<<grid, block, 0, stream>>>(in_out, color_matrix); default: return; } } }
3c7a5ac986d078db87bfb6cfe4fbdc9761289017.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2009-2017 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. #include "ComputeFreeVolumeGPU.cuh" #include "IntegratorHPMCMonoGPU.cuh" #include "IntegratorHPMCMonoImplicitGPU.cuh" #include "IntegratorHPMCMonoImplicitNewGPU.cuh" #include "ShapeConvexPolygon.h" namespace hpmc { namespace detail { //! HPMC kernels for ShapeConvexPolygon template hipError_t gpu_hpmc_free_volume<ShapeConvexPolygon>(const hpmc_free_volume_args_t &args, const typename ShapeConvexPolygon::param_type *d_params); template hipError_t gpu_hpmc_update<ShapeConvexPolygon>(const hpmc_args_t& args, const typename ShapeConvexPolygon::param_type *d_params); template hipError_t gpu_hpmc_implicit_count_overlaps<ShapeConvexPolygon>(const hpmc_implicit_args_t& args, const typename ShapeConvexPolygon::param_type *d_params); template hipError_t gpu_hpmc_implicit_accept_reject<ShapeConvexPolygon>(const hpmc_implicit_args_t& args, const typename ShapeConvexPolygon::param_type *d_params); template hipError_t gpu_hpmc_insert_depletants_queue<ShapeConvexPolygon>(const hpmc_implicit_args_new_t& args, const typename ShapeConvexPolygon::param_type *d_params); template hipError_t gpu_hpmc_implicit_accept_reject_new<ShapeConvexPolygon>(const hpmc_implicit_args_new_t& args, const typename ShapeConvexPolygon::param_type *d_params); }; // end namespace detail } // end namespace hpmc
3c7a5ac986d078db87bfb6cfe4fbdc9761289017.cu
// Copyright (c) 2009-2017 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. #include "ComputeFreeVolumeGPU.cuh" #include "IntegratorHPMCMonoGPU.cuh" #include "IntegratorHPMCMonoImplicitGPU.cuh" #include "IntegratorHPMCMonoImplicitNewGPU.cuh" #include "ShapeConvexPolygon.h" namespace hpmc { namespace detail { //! HPMC kernels for ShapeConvexPolygon template cudaError_t gpu_hpmc_free_volume<ShapeConvexPolygon>(const hpmc_free_volume_args_t &args, const typename ShapeConvexPolygon::param_type *d_params); template cudaError_t gpu_hpmc_update<ShapeConvexPolygon>(const hpmc_args_t& args, const typename ShapeConvexPolygon::param_type *d_params); template cudaError_t gpu_hpmc_implicit_count_overlaps<ShapeConvexPolygon>(const hpmc_implicit_args_t& args, const typename ShapeConvexPolygon::param_type *d_params); template cudaError_t gpu_hpmc_implicit_accept_reject<ShapeConvexPolygon>(const hpmc_implicit_args_t& args, const typename ShapeConvexPolygon::param_type *d_params); template cudaError_t gpu_hpmc_insert_depletants_queue<ShapeConvexPolygon>(const hpmc_implicit_args_new_t& args, const typename ShapeConvexPolygon::param_type *d_params); template cudaError_t gpu_hpmc_implicit_accept_reject_new<ShapeConvexPolygon>(const hpmc_implicit_args_new_t& args, const typename ShapeConvexPolygon::param_type *d_params); }; // end namespace detail } // end namespace hpmc
d004d0e8077e40114b77ae8e5ce109d5d9a56b57.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef CUDART_VERSION #error CUDART_VERSION Undefined! #elif (CUDART_VERSION >= 11050) #include <hipcub/hipcub.hpp> #else #include "3rdparty/hipcub/hipcub.hpp" #endif #include "src/fastertransformer/kernels/decoder_masked_multihead_attention.h" #include "src/fastertransformer/kernels/decoder_masked_multihead_attention_utils.h" #include "src/fastertransformer/kernels/reduce_kernel_utils.cuh" #include "src/fastertransformer/layers/attention_layers/DecoderCrossAttentionLayer.h" #include "src/fastertransformer/utils/cuda_type_utils.cuh" namespace fastertransformer { const int WARP_SIZE = 32; const bool ATTENION_OPT = true; const int ATTENTION_BLOCK_SIZE = 256; /////////////////////////////////////////////////////////////////////////////////////////////////// template<int HALF_ELEMENTS_PER_WARP_LOAD> using Copy_half_t = typename std::conditional< HALF_ELEMENTS_PER_WARP_LOAD == 32, half, typename std::conditional<HALF_ELEMENTS_PER_WARP_LOAD == 64, int, typename std::conditional<HALF_ELEMENTS_PER_WARP_LOAD == 128, int2, int4>::type>::type>:: type; template<typename T, int ELEMENTS_PER_WARP_LOAD> using Copy_t = Copy_half_t<sizeof(T) / sizeof(half) * ELEMENTS_PER_WARP_LOAD>; /////////////////////////////////////////////////////////////////////////////////////////////////// template<typename T> __global__ void cross_attention_kernel(T* query_buf, const T* Q_bias, T* key_cache, const T* K_bias, T* value_cache, const T* V_bias, const int* length_per_sample, T* context_buf, const bool* finished, int batch_size, int head_num, int size_per_head, int step, const int seq_len, const T scalar, const int* ia3_tasks, const T* ia3_key_weights, const T* ia3_value_weights) { if (finished != nullptr && finished[blockIdx.x / head_num] == true) { return; } int tid = threadIdx.x; int bid = blockIdx.x / head_num; int head_id = blockIdx.x % head_num; const bool do_ia3 = step == 1 && ia3_tasks != nullptr; const int ia3_task = do_ia3 ? ia3_tasks[bid] : 0; extern __shared__ __align__(sizeof(float)) unsigned s_buf[]; // align on largest type T* sq = reinterpret_cast<T*>(s_buf); T* logits = reinterpret_cast<T*>(&sq[size_per_head]); int length = __ldg(&length_per_sample[bid]); int qkv_id = bid * head_num * size_per_head + head_id * size_per_head + tid; int qkv_bias_id = head_id * size_per_head + tid; if (tid < size_per_head) { sq[tid] = add(query_buf[qkv_id], Q_bias[qkv_bias_id]); } __syncthreads(); for (int ite = 0; ite < length; ++ite) { int key_id = bid * (seq_len * head_num * size_per_head) + ite * (head_num * size_per_head) + head_id * size_per_head + tid; T key = tid < size_per_head ? key_cache[key_id] : (T)(0.0f); // For the first step, we should add bias to key memory cache. // The KV memory cache only need to be updated at the first step. if (step == 1 && tid < size_per_head) { key = add(key, K_bias[head_id * size_per_head + tid]); if (do_ia3) { key = mmha::mul<T, T, T>(key, ia3_key_weights[(ia3_task * head_num + head_id) * size_per_head + tid]); } key_cache[key_id] = key; } T val = (tid < size_per_head) ? mul(key, sq[tid], scalar) : (T)(0.0f); T qk = blockReduceSum(val); if (threadIdx.x == 0) { logits[ite] = qk; } __syncthreads(); // try to remove } __syncthreads(); __shared__ float s_max_val, s_sum; float local_i = tid < length ? (float)logits[tid] : -1e20f; float max_val = blockReduceMax(local_i); if (tid == 0) { s_max_val = max_val; } __syncthreads(); local_i -= s_max_val; float local_o = tid < length ? __expf(local_i) : 0.0f; float val = blockReduceSum(local_o); if (tid == 0) { s_sum = val + 1e-6; } __syncthreads(); if (tid < length) { logits[tid] = local_o / s_sum; } __syncthreads(); if (tid < size_per_head) { T sum = (T)0.0f; for (int ite = 0; ite < length; ++ite) { int value_id = bid * seq_len * head_num * size_per_head + ite * head_num * size_per_head + head_id * size_per_head + tid; T value = value_cache[value_id]; // for the first step, we should add bias to key memory cache if (step == 1) { value = add(value, V_bias[head_id * size_per_head + tid]); if (do_ia3) { value = mmha::mul<T, T, T>( value, ia3_value_weights[(ia3_task * head_num + head_id) * size_per_head + tid]); } value_cache[value_id] = value; } sum = fma(value, logits[ite], sum); } context_buf[bid * head_num * size_per_head + head_id * size_per_head + tid] = sum; } } template<typename T, int size_per_head, int block_sz> __global__ void cross_attention_kernel_opt(T* __restrict query_buf, const T* __restrict Q_bias, T* __restrict key_cache, const T* __restrict K_bias, T* __restrict value_cache, const T* __restrict V_bias, const int* length_per_sample, T* __restrict context_buf, const bool* finished, int batch_size, int head_num, const int step, const int seq_len, const float scalar, const int* ia3_tasks, const T* ia3_key_weights, const T* ia3_value_weights) { if (finished != nullptr && finished[blockIdx.x / head_num] == true) { return; } typedef Copy_t<T, size_per_head> copy_t; const int elems_per_thread = size_per_head / WARP_SIZE; union Access_t { copy_t v; T x[elems_per_thread]; // supported size 1,2,4 }; typedef struct Float_n_t { float x[elems_per_thread]; // supported size 1,2,4 } float_n_t; __shared__ float_n_t sq[block_sz]; extern __shared__ float logits[]; // use to store the logits from [0~step] const int warp_id = threadIdx.x / WARP_SIZE; const int warp_num = block_sz / WARP_SIZE; typedef hipcub::BlockReduce<float, block_sz> MaxValBlockReduce; typedef hipcub::BlockReduce<float, block_sz> BlockReduce; __shared__ typename MaxValBlockReduce::TempStorage max_val_block_temp_storage; __shared__ typename BlockReduce::TempStorage block_temp_storage; __shared__ typename hipcub::WarpReduce<float>::TempStorage temp_storage[warp_num]; const int tid = threadIdx.x; const int bid = blockIdx.x / head_num; const int head_id = blockIdx.x % head_num; int length = __ldg(&length_per_sample[bid]); const int lane_id = tid % WARP_SIZE; int qkv_id = bid * head_num * size_per_head + head_id * size_per_head; int qkv_bias_id = head_id * size_per_head; int key_value_id = bid * (seq_len * head_num * size_per_head) + +head_id * size_per_head; const bool do_ia3 = step == 1 && ia3_tasks != nullptr; const int ia3_task = do_ia3 ? ia3_tasks[bid] : 0; query_buf = &query_buf[qkv_id]; K_bias = &K_bias[qkv_bias_id]; key_cache = &key_cache[key_value_id]; Q_bias = &Q_bias[qkv_bias_id]; V_bias = &V_bias[qkv_bias_id]; value_cache = &value_cache[key_value_id]; context_buf = &context_buf[qkv_id]; Access_t bias_r, key_val_r, query_buf_r; // each warp will have its own copy of sq query_buf_r.v = *((copy_t*)query_buf + lane_id); bias_r.v = *((copy_t*)Q_bias + lane_id); float qb_r[elems_per_thread]; for (int i = 0; i < elems_per_thread; ++i) { qb_r[i] = (float)query_buf_r.x[i] + (float)bias_r.x[i]; } // offset for each step int offset = head_num * size_per_head; bias_r.v = *((copy_t*)K_bias + lane_id); for (int ite = warp_id; ite < length; ite += warp_num) { key_val_r.v = *((copy_t*)&key_cache[ite * offset] + lane_id); // For the first step, we should add bias to key memory cache. // The KV memory cache only need to be updated at the first step. if (step == 1) { for (int i = 0; i < elems_per_thread; i++) { key_val_r.x[i] = (float)key_val_r.x[i] + (float)bias_r.x[i]; if (do_ia3) { key_val_r.x[i] = (float)key_val_r.x[i] * (float)(ia3_key_weights[(ia3_task * head_num + head_id) * size_per_head + tid]); } } *((copy_t*)&key_cache[ite * offset] + lane_id) = key_val_r.v; } float val = 0.f; for (int i = 0; i < elems_per_thread; i++) { val = val + (float)key_val_r.x[i] * qb_r[i] * scalar; } float qk = hipcub::WarpReduce<float>(temp_storage[warp_id]).Sum(val); if (lane_id == 0) { logits[ite] = qk; } } __syncthreads(); __shared__ float s_max_val, s_sum; float local_i = -1e20f; for (int i = tid; i < length; i += blockDim.x) { local_i = max(local_i, logits[i]); } float max_val = MaxValBlockReduce(max_val_block_temp_storage).Reduce(local_i, hipcub::Max()); if (tid == 0) { s_max_val = max_val; } __syncthreads(); float local_o = 0.0f; for (int i = tid; i < length; i += blockDim.x) { logits[i] = __expf(logits[i] - s_max_val); local_o += logits[i]; } float val = BlockReduce(block_temp_storage).Sum(local_o); if (tid == 0) { s_sum = val + 1e-6; } __syncthreads(); float s_sum_inverse = __fdividef(1.0f, s_sum); for (int i = tid; i < length; i += blockDim.x) { logits[i] = logits[i] * s_sum_inverse; } __syncthreads(); // This optimization introduces discrepancy because of different order in FP32 summation float sum_r[elems_per_thread] = {0.f}; bias_r.v = *((copy_t*)V_bias + lane_id); for (int ite = warp_id; ite < length; ite += warp_num) { key_val_r.v = *((copy_t*)&value_cache[ite * offset] + lane_id); // For the first step, we should add bias to key memory cache. if (step == 1) { for (int i = 0; i < elems_per_thread; i++) { key_val_r.x[i] = (float)key_val_r.x[i] + (float)bias_r.x[i]; if (do_ia3) { key_val_r.x[i] = (float)key_val_r.x[i] * (float)(ia3_value_weights[(ia3_task * head_num + head_id) * size_per_head + tid]); } } *((copy_t*)&value_cache[ite * offset] + lane_id) = key_val_r.v; } for (int i = 0; i < elems_per_thread; ++i) { sum_r[i] += (float)key_val_r.x[i] * logits[ite]; } } for (int i = 0; i < elems_per_thread; i++) { sq[warp_id * WARP_SIZE + lane_id].x[i] = sum_r[i]; } __syncthreads(); if (threadIdx.x < WARP_SIZE) { #pragma unroll for (int j = 1; j < warp_num; j++) { for (int i = 0; i < elems_per_thread; ++i) { sum_r[i] = sum_r[i] + (float)sq[j * WARP_SIZE + threadIdx.x].x[i]; } } } __syncthreads(); #pragma unroll for (int i = 0; i < elems_per_thread; i++) { key_val_r.x[i] = sum_r[i]; } if (threadIdx.x < WARP_SIZE) { *((copy_t*)context_buf + lane_id) = key_val_r.v; } } template<typename T> struct CATypeConverter { using Type = T; }; template<> struct CATypeConverter<half> { using Type = uint16_t; }; template<typename T> void cross_attention_dispatch(T* query_buf, const T* Q_bias, T* key_cache, const T* K_bias, T* value_cache, const T* V_bias, const int* length, T* context_buf, const bool* finished, const int max_batch_size, const int inference_batch_size, const int head_num, const int size_per_head, const int step, const int memory_max_len, const bool batch_major_cache, const float q_scaling, outputCrossAttentionParam<float> output_cross_attention_params, const int* ia3_tasks, const T* ia3_key_weights, const T* ia3_value_weights, hipStream_t stream) { if (!batch_major_cache) { const int block_sz = ATTENTION_BLOCK_SIZE; float scalar = 1.f / (sqrtf(size_per_head * 1.0f) * q_scaling); dim3 grid(inference_batch_size * head_num); int cond = size_per_head * ((ATTENION_OPT) ? 1 : 0); switch (cond) { case 32: hipLaunchKernelGGL(( cross_attention_kernel_opt<T, 32, block_sz>) , dim3(grid), dim3(block_sz), sizeof(float) * memory_max_len, stream, query_buf, Q_bias, key_cache, K_bias, value_cache, V_bias, length, context_buf, finished, max_batch_size, head_num, step, memory_max_len, scalar, ia3_tasks, ia3_key_weights, ia3_value_weights); break; case 64: hipLaunchKernelGGL(( cross_attention_kernel_opt<T, 64, block_sz>) , dim3(grid), dim3(block_sz), sizeof(float) * memory_max_len, stream, query_buf, Q_bias, key_cache, K_bias, value_cache, V_bias, length, context_buf, finished, max_batch_size, head_num, step, memory_max_len, scalar, ia3_tasks, ia3_key_weights, ia3_value_weights); break; case 128: hipLaunchKernelGGL(( cross_attention_kernel_opt<T, 128, block_sz>) , dim3(grid), dim3(block_sz), sizeof(float) * memory_max_len, stream, query_buf, Q_bias, key_cache, K_bias, value_cache, V_bias, length, context_buf, finished, max_batch_size, head_num, step, memory_max_len, scalar, ia3_tasks, ia3_key_weights, ia3_value_weights); break; default: // default path int block_size = 128; if (memory_max_len <= 64) { block_size = 64; } else if (memory_max_len <= 128 && memory_max_len > size_per_head) { block_size = 128; } else if (memory_max_len > 128 && memory_max_len <= 256) { block_size = 256; } else if (memory_max_len > 256 && memory_max_len <= 512) { block_size = 512; } else { block_size = 1024; } if (block_size < size_per_head) { block_size = size_per_head; } assert(block_size <= 1024); dim3 block(block_size); int shared_size = sizeof(T) * (size_per_head + memory_max_len); hipLaunchKernelGGL(( cross_attention_kernel<T>), dim3(grid), dim3(block), shared_size, stream, query_buf, Q_bias, key_cache, K_bias, value_cache, V_bias, length, context_buf, finished, max_batch_size, head_num, size_per_head, step, memory_max_len, scalar, ia3_tasks, ia3_key_weights, ia3_value_weights); } } else { assert(step > 0); // assert(size_per_head == 32 || size_per_head == 64 || size_per_head == 128); // using DataType = typename std::conditional<sizeof(T) == 4, float, uint16_t>::type; using DataType = typename CATypeConverter<T>::Type; // Prepare the parameters. Cross_multihead_attention_params<DataType> params; params.q_bias = reinterpret_cast<const DataType*>(Q_bias); params.k_bias = reinterpret_cast<const DataType*>(K_bias); params.v_bias = reinterpret_cast<const DataType*>(V_bias); // Set the output buffer. params.out = reinterpret_cast<DataType*>(context_buf); // Set the input buffers. params.q = reinterpret_cast<const DataType*>(query_buf); params.k = nullptr; params.v = nullptr; params.stride = 0; params.finished = const_cast<bool*>(finished); params.memory_length_per_sample = const_cast<int*>(length); params.k_cache = reinterpret_cast<DataType*>(key_cache); params.v_cache = reinterpret_cast<DataType*>(value_cache); params.batch_size = inference_batch_size; // TODO(bhsueh) We can use batch but not batch * beam_width in k/v cache in cross attention // because they are same for all beams. params.beam_width = 1; // We don't care the beam_width in cross attention, set to 1 is enough. params.memory_max_len = memory_max_len; params.timestep = step - 1; params.num_heads = head_num; params.hidden_size_per_head = size_per_head; params.inv_sqrt_dh = 1.F / (sqrtf((float)params.hidden_size_per_head) * q_scaling); // output cross attentions params.max_decoder_seq_len = output_cross_attention_params.max_decoder_seq_len; params.cross_attention_out = output_cross_attention_params.cross_attention_out; params.is_return_cross_attentions = output_cross_attention_params.is_return_cross_attentions; params.ia3_tasks = ia3_tasks; params.ia3_key_weights = reinterpret_cast<const DataType*>(ia3_key_weights); params.ia3_value_weights = reinterpret_cast<const DataType*>(ia3_value_weights); cross_multihead_attention(params, stream); } } template void cross_attention_dispatch(float* query_buf, const float* Q_bias, float* key_cache, const float* K_bias, float* value_cache, const float* V_bias, const int* length, float* context_buf, const bool* finished, const int max_batch_size, const int inference_batch_size, const int head_num, const int size_per_head, const int step, const int memory_max_len, const bool batch_major_cache, const float q_scaling, outputCrossAttentionParam<float> output_cross_attention_params, const int* ia3_tasks, const float* ia3_key_weights, const float* ia3_value_weights, hipStream_t stream); template void cross_attention_dispatch(half* query_buf, const half* Q_bias, half* key_cache, const half* K_bias, half* value_cache, const half* V_bias, const int* length, half* context_buf, const bool* finished, const int max_batch_size, const int inference_batch_size, const int head_num, const int size_per_head, const int step, const int memory_max_len, const bool batch_major_cache, const float q_scaling, outputCrossAttentionParam<float> output_cross_attention_params, const int* ia3_tasks, const half* ia3_key_weights, const half* ia3_value_weights, hipStream_t stream); #ifdef ENABLE_BF16 template void cross_attention_dispatch(__nv_bfloat16* query_buf, const __nv_bfloat16* Q_bias, __nv_bfloat16* key_cache, const __nv_bfloat16* K_bias, __nv_bfloat16* value_cache, const __nv_bfloat16* V_bias, const int* length, __nv_bfloat16* context_buf, const bool* finished, const int max_batch_size, const int inference_batch_size, const int head_num, const int size_per_head, const int step, const int memory_max_len, const bool batch_major_cache, const float q_scaling, outputCrossAttentionParam<float> output_cross_attention_params, const int* ia3_tasks, const __nv_bfloat16* ia3_key_weights, const __nv_bfloat16* ia3_value_weights, hipStream_t stream); #endif // Currently need to transpose at the first step in Cross attention template<typename T> __global__ void transpose_4d_batch_major_mem_k_cache( T* k_dst, const T* k_src, const int head_num, const int size_per_head, const int max_seq_len) { // B, L, H, Dh -> B, H, Dh/x, L, x const int batch_id = blockIdx.y; const int head_id = blockIdx.z; constexpr int X_ELEMS = (sizeof(T) == 4) ? 4 : 8; auto key_src = reinterpret_cast<const uint4*>(k_src + batch_id * head_num * size_per_head * max_seq_len + head_id * size_per_head); auto key_dst = reinterpret_cast<uint4*>(k_dst + batch_id * head_num * size_per_head * max_seq_len + head_id * size_per_head * max_seq_len); const int out_idx = blockIdx.x * blockDim.x + threadIdx.x; int size_per_head_div_x = size_per_head / X_ELEMS; if (out_idx >= size_per_head_div_x * max_seq_len) { return; } int idx = out_idx; const int k_seq_len_id = idx % max_seq_len; idx = (idx - k_seq_len_id) / max_seq_len; const int k_head_size_id = idx % size_per_head_div_x; key_dst[out_idx] = key_src[k_seq_len_id * head_num * size_per_head_div_x + k_head_size_id]; } template<typename T> __global__ void transpose_4d_batch_major_mem_v_cache( T* v_dst, const T* v_src, const int head_num, const int size_per_head, const int max_seq_len) { // B, L, H, Dh -> B, H, L, Dh const int batch_id = blockIdx.y; const int head_id = blockIdx.z; // 16 byte loads will handle "x" dimension auto val_src = reinterpret_cast<const uint4*>(v_src + batch_id * head_num * size_per_head * max_seq_len + head_id * size_per_head); auto val_dst = reinterpret_cast<uint4*>(v_dst + batch_id * head_num * size_per_head * max_seq_len + head_id * size_per_head * max_seq_len); // idx is over output dimension L * size_per_head / x for values const int out_idx = blockIdx.x * blockDim.x + threadIdx.x; constexpr int X_ELEMS = (sizeof(T) == 4) ? 4 : 8; const int size_per_head_div_x = size_per_head / X_ELEMS; if (out_idx >= size_per_head_div_x * max_seq_len) { return; } int idx = out_idx; const int v_head_size_id = idx % size_per_head_div_x; idx = (idx - v_head_size_id) / size_per_head_div_x; const int v_seq_len_id = idx % max_seq_len; val_dst[out_idx] = val_src[v_seq_len_id * head_num * size_per_head_div_x + v_head_size_id]; } template<typename T> void transpose_4d_batch_major_memory_kernelLauncher(T* dst, const T* src, const int local_batch_size, const int max_seq_len, const int size_per_head, const int local_head_num, const bool k_cache, hipStream_t stream) { constexpr int block_sz = 128; constexpr int x = (sizeof(T) == 4) ? 4 : 8; int size = max_seq_len * size_per_head / x; dim3 grid((size + block_sz - 1) / block_sz, local_batch_size, local_head_num); if (k_cache) { hipLaunchKernelGGL(( transpose_4d_batch_major_mem_k_cache), dim3(grid), dim3(block_sz), 0, stream, dst, src, local_head_num, size_per_head, max_seq_len); } else { hipLaunchKernelGGL(( transpose_4d_batch_major_mem_v_cache), dim3(grid), dim3(block_sz), 0, stream, dst, src, local_head_num, size_per_head, max_seq_len); } } template void transpose_4d_batch_major_memory_kernelLauncher(float* dst, const float* src, const int local_batch_size, const int max_seq_len, const int size_per_head, const int local_head_num, const bool k_cache, hipStream_t stream); template void transpose_4d_batch_major_memory_kernelLauncher(half* dst, const half* src, const int local_batch_size, const int max_seq_len, const int size_per_head, const int local_head_num, const bool k_cache, hipStream_t stream); #ifdef ENABLE_BF16 template void transpose_4d_batch_major_memory_kernelLauncher(__nv_bfloat16* dst, const __nv_bfloat16* src, const int local_batch_size, const int max_seq_len, const int size_per_head, const int local_head_num, const bool k_cache, hipStream_t stream); #endif template<typename T> void DecoderCrossAttentionLayer<T>::allocateBuffer() { FT_CHECK(false); if (is_allocate_buffer_ == false) { q_buf_ = reinterpret_cast<T*>(allocator_->reMalloc(q_buf_, sizeof(T) * max_batch_size_ * hidden_units_, false)); context_buf_ = reinterpret_cast<T*>( allocator_->reMalloc(context_buf_, sizeof(T) * max_batch_size_ * hidden_units_, false)); if (is_batch_major_cache_) { mem_cache_buf_ = reinterpret_cast<T*>(allocator_->reMalloc( mem_cache_buf_, sizeof(T) * max_batch_size_ * max_mem_seq_len_ * hidden_units_, false)); } is_allocate_buffer_ = true; } } template<typename T> void DecoderCrossAttentionLayer<T>::allocateBuffer(size_t batch_size, size_t max_mem_seq_len) { FT_LOG_DEBUG(__PRETTY_FUNCTION__); q_buf_ = reinterpret_cast<T*>(allocator_->reMalloc(q_buf_, sizeof(T) * batch_size * hidden_units_, false)); context_buf_ = reinterpret_cast<T*>(allocator_->reMalloc(context_buf_, sizeof(T) * batch_size * hidden_units_, false)); if (is_batch_major_cache_) { mem_cache_buf_ = reinterpret_cast<T*>( allocator_->reMalloc(mem_cache_buf_, sizeof(T) * batch_size * max_mem_seq_len * hidden_units_, false)); } is_allocate_buffer_ = true; } template<typename T> void DecoderCrossAttentionLayer<T>::freeBuffer() { if (is_allocate_buffer_) { allocator_->free((void**)(&q_buf_)); allocator_->free((void**)(&context_buf_)); if (is_batch_major_cache_) { allocator_->free((void**)(&mem_cache_buf_)); } is_allocate_buffer_ = false; } } template<typename T> DecoderCrossAttentionLayer<T>::DecoderCrossAttentionLayer(size_t max_batch_size, size_t head_num, size_t size_per_head, size_t d_model, const float q_scaling, hipStream_t stream, cublasMMWrapper* cublas_wrapper, IAllocator* allocator, bool is_free_buffer_after_forward): BaseAttentionLayer<T>(stream, cublas_wrapper, allocator, is_free_buffer_after_forward), max_batch_size_(max_batch_size), head_num_(head_num), size_per_head_(size_per_head), hidden_units_(head_num_ * size_per_head_), d_model_(d_model), q_scaling_(q_scaling) { FT_CHECK(size_per_head_ == 32 || size_per_head_ == 48 || size_per_head_ == 64 || size_per_head_ == 80 || size_per_head_ == 96 || size_per_head_ == 112 || size_per_head_ == 128 || size_per_head_ == 144 || size_per_head_ == 160 || size_per_head_ == 192 || size_per_head_ == 224 || size_per_head_ == 256); } template<typename T> DecoderCrossAttentionLayer<T>::DecoderCrossAttentionLayer(size_t max_batch_size, size_t head_num, size_t size_per_head, hipStream_t stream, cublasMMWrapper* cublas_wrapper, IAllocator* allocator, bool is_free_buffer_after_forward): DecoderCrossAttentionLayer<T>(max_batch_size, head_num, size_per_head, head_num * size_per_head, 1.0f, stream, cublas_wrapper, allocator, is_free_buffer_after_forward) { } template<typename T> DecoderCrossAttentionLayer<T>::DecoderCrossAttentionLayer(size_t max_batch_size, size_t head_num, size_t size_per_head, const float q_scaling, hipStream_t stream, cublasMMWrapper* cublas_wrapper, IAllocator* allocator, bool is_free_buffer_after_forward): DecoderCrossAttentionLayer<T>(max_batch_size, head_num, size_per_head, head_num * size_per_head, q_scaling, stream, cublas_wrapper, allocator, is_free_buffer_after_forward) { } template<typename T> DecoderCrossAttentionLayer<T>::DecoderCrossAttentionLayer(DecoderCrossAttentionLayer<T> const& attention_layer): DecoderCrossAttentionLayer(attention_layer.max_batch_size_, attention_layer.head_num_, attention_layer.size_per_head_, attention_layer.d_model_, attention_layer.q_scaling_, attention_layer.stream_, attention_layer.cublas_wrapper_, attention_layer.allocator_, attention_layer.is_free_buffer_after_forward_) { } template<typename T> DecoderCrossAttentionLayer<T>::~DecoderCrossAttentionLayer() { cublas_wrapper_ = nullptr; freeBuffer(); } template<typename T> void DecoderCrossAttentionLayer<T>::forward(TensorMap* output_tensors, TensorMap* input_tensors, const AttentionWeight<T>* attention_weights) { // input tensors: // attention_input [batch_size, d_model], // encoder_output [batch_size, mem_max_seq_len, memory_d_model], // encoder_sequence_length [batch_size], // step [1] on cpu // finished [batch_size] (optional) // ia3_tasks [batch_size] (optional) // output tensors: // decoder_layer_output [batch_size, d_model], // key_mem_cache [batch_size, head_num, size_per_head // x, mem_max_seq_len, x], where x = 16 / sizeof(T) // value_mem_cache [batch_size, head_num, mem_max_seq_len, size_per_head] // cross_attentions [batch_size, head_num, max_decoder_seq_len, mem_max_seq_len] optional float* FT_LOG_DEBUG("%s", __PRETTY_FUNCTION__); allocateBuffer(input_tensors->at("input_query").shape[0], input_tensors->at("encoder_output").shape[1]); const T* attention_input = input_tensors->getPtr<T>("input_query"); Tensor encoder_output_tensor = input_tensors->at("encoder_output"); const int* memory_sequence_length = input_tensors->getPtr<int>("encoder_sequence_length"); const int step = input_tensors->getVal<int>("step"); const bool* finished = input_tensors->getPtr<bool>("finished", nullptr); const bool has_ia3 = input_tensors->isExist("ia3_tasks"); T* attention_out = output_tensors->getPtr<T>("hidden_features"); T* key_mem_cache = output_tensors->getPtr<T>("key_cache"); T* value_mem_cache = output_tensors->getPtr<T>("value_cache"); const bool output_cross_attentions = output_tensors->isExist("cross_attentions"); const int max_decoder_seq_len = output_cross_attentions ? output_tensors->at("cross_attentions").shape[2] : 0; const int batch_size = input_tensors->at("input_query").shape[0]; const int mem_max_seq_len = encoder_output_tensor.shape[1]; cublas_wrapper_->Gemm(HIPBLAS_OP_N, HIPBLAS_OP_N, hidden_units_, // n batch_size, d_model_, // k attention_weights->query_weight.kernel, hidden_units_, // n attention_input, d_model_, // k q_buf_, hidden_units_ /* n */); if (step == 1) { if (is_batch_major_cache_) { cublas_wrapper_->Gemm(HIPBLAS_OP_N, HIPBLAS_OP_N, hidden_units_, batch_size * mem_max_seq_len, encoder_output_tensor.shape[2], attention_weights->key_weight.kernel, hidden_units_, encoder_output_tensor.getPtr<T>(), encoder_output_tensor.shape[2], mem_cache_buf_, hidden_units_); transpose_4d_batch_major_memory_kernelLauncher<T>( key_mem_cache, mem_cache_buf_, batch_size, mem_max_seq_len, size_per_head_, head_num_, true, stream_); sync_check_cuda_error(); cublas_wrapper_->Gemm(HIPBLAS_OP_N, HIPBLAS_OP_N, hidden_units_, batch_size * mem_max_seq_len, encoder_output_tensor.shape[2], attention_weights->value_weight.kernel, hidden_units_, encoder_output_tensor.getPtr<T>(), encoder_output_tensor.shape[2], mem_cache_buf_, hidden_units_); transpose_4d_batch_major_memory_kernelLauncher<T>(value_mem_cache, mem_cache_buf_, batch_size, mem_max_seq_len, size_per_head_, head_num_, false, stream_); sync_check_cuda_error(); } else { cublas_wrapper_->Gemm(HIPBLAS_OP_N, HIPBLAS_OP_N, hidden_units_, batch_size * mem_max_seq_len, encoder_output_tensor.shape[2], attention_weights->key_weight.kernel, hidden_units_, encoder_output_tensor.getPtr<T>(), encoder_output_tensor.shape[2], key_mem_cache, hidden_units_); cublas_wrapper_->Gemm(HIPBLAS_OP_N, HIPBLAS_OP_N, hidden_units_, batch_size * mem_max_seq_len, encoder_output_tensor.shape[2], attention_weights->value_weight.kernel, hidden_units_, encoder_output_tensor.getPtr<T>(), encoder_output_tensor.shape[2], value_mem_cache, hidden_units_); } } sync_check_cuda_error(); outputCrossAttentionParam<float> output_attention_param{}; // output cross attentions if (output_cross_attentions) { output_attention_param.max_decoder_seq_len = max_decoder_seq_len; output_attention_param.cross_attention_out = output_tensors->at("cross_attentions").getPtr<float>(); output_attention_param.is_return_cross_attentions = true; } cross_attention_dispatch<T>(q_buf_, attention_weights->query_weight.bias, key_mem_cache, attention_weights->key_weight.bias, value_mem_cache, attention_weights->value_weight.bias, memory_sequence_length, context_buf_, finished, batch_size, batch_size, head_num_, size_per_head_, step, mem_max_seq_len, is_batch_major_cache_, q_scaling_, output_attention_param, has_ia3 ? input_tensors->at("ia3_tasks").getPtr<const int>() : nullptr, has_ia3 ? attention_weights->ia3_key_weight.kernel : nullptr, has_ia3 ? attention_weights->ia3_value_weight.kernel : nullptr, stream_); sync_check_cuda_error(); cublas_wrapper_->Gemm(HIPBLAS_OP_N, HIPBLAS_OP_N, d_model_, // n batch_size, hidden_units_, // k attention_weights->attention_output_weight.kernel, d_model_, // n context_buf_, hidden_units_, // k attention_out, d_model_ /* n */); if (is_free_buffer_after_forward_ == true) { freeBuffer(); } } template class DecoderCrossAttentionLayer<float>; template class DecoderCrossAttentionLayer<half>; #ifdef ENABLE_BF16 template class DecoderCrossAttentionLayer<__nv_bfloat16>; #endif } // namespace fastertransformer
d004d0e8077e40114b77ae8e5ce109d5d9a56b57.cu
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef CUDART_VERSION #error CUDART_VERSION Undefined! #elif (CUDART_VERSION >= 11050) #include <cub/cub.cuh> #else #include "3rdparty/cub/cub.cuh" #endif #include "src/fastertransformer/kernels/decoder_masked_multihead_attention.h" #include "src/fastertransformer/kernels/decoder_masked_multihead_attention_utils.h" #include "src/fastertransformer/kernels/reduce_kernel_utils.cuh" #include "src/fastertransformer/layers/attention_layers/DecoderCrossAttentionLayer.h" #include "src/fastertransformer/utils/cuda_type_utils.cuh" namespace fastertransformer { const int WARP_SIZE = 32; const bool ATTENION_OPT = true; const int ATTENTION_BLOCK_SIZE = 256; /////////////////////////////////////////////////////////////////////////////////////////////////// template<int HALF_ELEMENTS_PER_WARP_LOAD> using Copy_half_t = typename std::conditional< HALF_ELEMENTS_PER_WARP_LOAD == 32, half, typename std::conditional<HALF_ELEMENTS_PER_WARP_LOAD == 64, int, typename std::conditional<HALF_ELEMENTS_PER_WARP_LOAD == 128, int2, int4>::type>::type>:: type; template<typename T, int ELEMENTS_PER_WARP_LOAD> using Copy_t = Copy_half_t<sizeof(T) / sizeof(half) * ELEMENTS_PER_WARP_LOAD>; /////////////////////////////////////////////////////////////////////////////////////////////////// template<typename T> __global__ void cross_attention_kernel(T* query_buf, const T* Q_bias, T* key_cache, const T* K_bias, T* value_cache, const T* V_bias, const int* length_per_sample, T* context_buf, const bool* finished, int batch_size, int head_num, int size_per_head, int step, const int seq_len, const T scalar, const int* ia3_tasks, const T* ia3_key_weights, const T* ia3_value_weights) { if (finished != nullptr && finished[blockIdx.x / head_num] == true) { return; } int tid = threadIdx.x; int bid = blockIdx.x / head_num; int head_id = blockIdx.x % head_num; const bool do_ia3 = step == 1 && ia3_tasks != nullptr; const int ia3_task = do_ia3 ? ia3_tasks[bid] : 0; extern __shared__ __align__(sizeof(float)) unsigned s_buf[]; // align on largest type T* sq = reinterpret_cast<T*>(s_buf); T* logits = reinterpret_cast<T*>(&sq[size_per_head]); int length = __ldg(&length_per_sample[bid]); int qkv_id = bid * head_num * size_per_head + head_id * size_per_head + tid; int qkv_bias_id = head_id * size_per_head + tid; if (tid < size_per_head) { sq[tid] = add(query_buf[qkv_id], Q_bias[qkv_bias_id]); } __syncthreads(); for (int ite = 0; ite < length; ++ite) { int key_id = bid * (seq_len * head_num * size_per_head) + ite * (head_num * size_per_head) + head_id * size_per_head + tid; T key = tid < size_per_head ? key_cache[key_id] : (T)(0.0f); // For the first step, we should add bias to key memory cache. // The KV memory cache only need to be updated at the first step. if (step == 1 && tid < size_per_head) { key = add(key, K_bias[head_id * size_per_head + tid]); if (do_ia3) { key = mmha::mul<T, T, T>(key, ia3_key_weights[(ia3_task * head_num + head_id) * size_per_head + tid]); } key_cache[key_id] = key; } T val = (tid < size_per_head) ? mul(key, sq[tid], scalar) : (T)(0.0f); T qk = blockReduceSum(val); if (threadIdx.x == 0) { logits[ite] = qk; } __syncthreads(); // try to remove } __syncthreads(); __shared__ float s_max_val, s_sum; float local_i = tid < length ? (float)logits[tid] : -1e20f; float max_val = blockReduceMax(local_i); if (tid == 0) { s_max_val = max_val; } __syncthreads(); local_i -= s_max_val; float local_o = tid < length ? __expf(local_i) : 0.0f; float val = blockReduceSum(local_o); if (tid == 0) { s_sum = val + 1e-6; } __syncthreads(); if (tid < length) { logits[tid] = local_o / s_sum; } __syncthreads(); if (tid < size_per_head) { T sum = (T)0.0f; for (int ite = 0; ite < length; ++ite) { int value_id = bid * seq_len * head_num * size_per_head + ite * head_num * size_per_head + head_id * size_per_head + tid; T value = value_cache[value_id]; // for the first step, we should add bias to key memory cache if (step == 1) { value = add(value, V_bias[head_id * size_per_head + tid]); if (do_ia3) { value = mmha::mul<T, T, T>( value, ia3_value_weights[(ia3_task * head_num + head_id) * size_per_head + tid]); } value_cache[value_id] = value; } sum = fma(value, logits[ite], sum); } context_buf[bid * head_num * size_per_head + head_id * size_per_head + tid] = sum; } } template<typename T, int size_per_head, int block_sz> __global__ void cross_attention_kernel_opt(T* __restrict query_buf, const T* __restrict Q_bias, T* __restrict key_cache, const T* __restrict K_bias, T* __restrict value_cache, const T* __restrict V_bias, const int* length_per_sample, T* __restrict context_buf, const bool* finished, int batch_size, int head_num, const int step, const int seq_len, const float scalar, const int* ia3_tasks, const T* ia3_key_weights, const T* ia3_value_weights) { if (finished != nullptr && finished[blockIdx.x / head_num] == true) { return; } typedef Copy_t<T, size_per_head> copy_t; const int elems_per_thread = size_per_head / WARP_SIZE; union Access_t { copy_t v; T x[elems_per_thread]; // supported size 1,2,4 }; typedef struct Float_n_t { float x[elems_per_thread]; // supported size 1,2,4 } float_n_t; __shared__ float_n_t sq[block_sz]; extern __shared__ float logits[]; // use to store the logits from [0~step] const int warp_id = threadIdx.x / WARP_SIZE; const int warp_num = block_sz / WARP_SIZE; typedef cub::BlockReduce<float, block_sz> MaxValBlockReduce; typedef cub::BlockReduce<float, block_sz> BlockReduce; __shared__ typename MaxValBlockReduce::TempStorage max_val_block_temp_storage; __shared__ typename BlockReduce::TempStorage block_temp_storage; __shared__ typename cub::WarpReduce<float>::TempStorage temp_storage[warp_num]; const int tid = threadIdx.x; const int bid = blockIdx.x / head_num; const int head_id = blockIdx.x % head_num; int length = __ldg(&length_per_sample[bid]); const int lane_id = tid % WARP_SIZE; int qkv_id = bid * head_num * size_per_head + head_id * size_per_head; int qkv_bias_id = head_id * size_per_head; int key_value_id = bid * (seq_len * head_num * size_per_head) + +head_id * size_per_head; const bool do_ia3 = step == 1 && ia3_tasks != nullptr; const int ia3_task = do_ia3 ? ia3_tasks[bid] : 0; query_buf = &query_buf[qkv_id]; K_bias = &K_bias[qkv_bias_id]; key_cache = &key_cache[key_value_id]; Q_bias = &Q_bias[qkv_bias_id]; V_bias = &V_bias[qkv_bias_id]; value_cache = &value_cache[key_value_id]; context_buf = &context_buf[qkv_id]; Access_t bias_r, key_val_r, query_buf_r; // each warp will have its own copy of sq query_buf_r.v = *((copy_t*)query_buf + lane_id); bias_r.v = *((copy_t*)Q_bias + lane_id); float qb_r[elems_per_thread]; for (int i = 0; i < elems_per_thread; ++i) { qb_r[i] = (float)query_buf_r.x[i] + (float)bias_r.x[i]; } // offset for each step int offset = head_num * size_per_head; bias_r.v = *((copy_t*)K_bias + lane_id); for (int ite = warp_id; ite < length; ite += warp_num) { key_val_r.v = *((copy_t*)&key_cache[ite * offset] + lane_id); // For the first step, we should add bias to key memory cache. // The KV memory cache only need to be updated at the first step. if (step == 1) { for (int i = 0; i < elems_per_thread; i++) { key_val_r.x[i] = (float)key_val_r.x[i] + (float)bias_r.x[i]; if (do_ia3) { key_val_r.x[i] = (float)key_val_r.x[i] * (float)(ia3_key_weights[(ia3_task * head_num + head_id) * size_per_head + tid]); } } *((copy_t*)&key_cache[ite * offset] + lane_id) = key_val_r.v; } float val = 0.f; for (int i = 0; i < elems_per_thread; i++) { val = val + (float)key_val_r.x[i] * qb_r[i] * scalar; } float qk = cub::WarpReduce<float>(temp_storage[warp_id]).Sum(val); if (lane_id == 0) { logits[ite] = qk; } } __syncthreads(); __shared__ float s_max_val, s_sum; float local_i = -1e20f; for (int i = tid; i < length; i += blockDim.x) { local_i = max(local_i, logits[i]); } float max_val = MaxValBlockReduce(max_val_block_temp_storage).Reduce(local_i, cub::Max()); if (tid == 0) { s_max_val = max_val; } __syncthreads(); float local_o = 0.0f; for (int i = tid; i < length; i += blockDim.x) { logits[i] = __expf(logits[i] - s_max_val); local_o += logits[i]; } float val = BlockReduce(block_temp_storage).Sum(local_o); if (tid == 0) { s_sum = val + 1e-6; } __syncthreads(); float s_sum_inverse = __fdividef(1.0f, s_sum); for (int i = tid; i < length; i += blockDim.x) { logits[i] = logits[i] * s_sum_inverse; } __syncthreads(); // This optimization introduces discrepancy because of different order in FP32 summation float sum_r[elems_per_thread] = {0.f}; bias_r.v = *((copy_t*)V_bias + lane_id); for (int ite = warp_id; ite < length; ite += warp_num) { key_val_r.v = *((copy_t*)&value_cache[ite * offset] + lane_id); // For the first step, we should add bias to key memory cache. if (step == 1) { for (int i = 0; i < elems_per_thread; i++) { key_val_r.x[i] = (float)key_val_r.x[i] + (float)bias_r.x[i]; if (do_ia3) { key_val_r.x[i] = (float)key_val_r.x[i] * (float)(ia3_value_weights[(ia3_task * head_num + head_id) * size_per_head + tid]); } } *((copy_t*)&value_cache[ite * offset] + lane_id) = key_val_r.v; } for (int i = 0; i < elems_per_thread; ++i) { sum_r[i] += (float)key_val_r.x[i] * logits[ite]; } } for (int i = 0; i < elems_per_thread; i++) { sq[warp_id * WARP_SIZE + lane_id].x[i] = sum_r[i]; } __syncthreads(); if (threadIdx.x < WARP_SIZE) { #pragma unroll for (int j = 1; j < warp_num; j++) { for (int i = 0; i < elems_per_thread; ++i) { sum_r[i] = sum_r[i] + (float)sq[j * WARP_SIZE + threadIdx.x].x[i]; } } } __syncthreads(); #pragma unroll for (int i = 0; i < elems_per_thread; i++) { key_val_r.x[i] = sum_r[i]; } if (threadIdx.x < WARP_SIZE) { *((copy_t*)context_buf + lane_id) = key_val_r.v; } } template<typename T> struct CATypeConverter { using Type = T; }; template<> struct CATypeConverter<half> { using Type = uint16_t; }; template<typename T> void cross_attention_dispatch(T* query_buf, const T* Q_bias, T* key_cache, const T* K_bias, T* value_cache, const T* V_bias, const int* length, T* context_buf, const bool* finished, const int max_batch_size, const int inference_batch_size, const int head_num, const int size_per_head, const int step, const int memory_max_len, const bool batch_major_cache, const float q_scaling, outputCrossAttentionParam<float> output_cross_attention_params, const int* ia3_tasks, const T* ia3_key_weights, const T* ia3_value_weights, cudaStream_t stream) { if (!batch_major_cache) { const int block_sz = ATTENTION_BLOCK_SIZE; float scalar = 1.f / (sqrtf(size_per_head * 1.0f) * q_scaling); dim3 grid(inference_batch_size * head_num); int cond = size_per_head * ((ATTENION_OPT) ? 1 : 0); switch (cond) { case 32: cross_attention_kernel_opt<T, 32, block_sz> <<<grid, block_sz, sizeof(float) * memory_max_len, stream>>>(query_buf, Q_bias, key_cache, K_bias, value_cache, V_bias, length, context_buf, finished, max_batch_size, head_num, step, memory_max_len, scalar, ia3_tasks, ia3_key_weights, ia3_value_weights); break; case 64: cross_attention_kernel_opt<T, 64, block_sz> <<<grid, block_sz, sizeof(float) * memory_max_len, stream>>>(query_buf, Q_bias, key_cache, K_bias, value_cache, V_bias, length, context_buf, finished, max_batch_size, head_num, step, memory_max_len, scalar, ia3_tasks, ia3_key_weights, ia3_value_weights); break; case 128: cross_attention_kernel_opt<T, 128, block_sz> <<<grid, block_sz, sizeof(float) * memory_max_len, stream>>>(query_buf, Q_bias, key_cache, K_bias, value_cache, V_bias, length, context_buf, finished, max_batch_size, head_num, step, memory_max_len, scalar, ia3_tasks, ia3_key_weights, ia3_value_weights); break; default: // default path int block_size = 128; if (memory_max_len <= 64) { block_size = 64; } else if (memory_max_len <= 128 && memory_max_len > size_per_head) { block_size = 128; } else if (memory_max_len > 128 && memory_max_len <= 256) { block_size = 256; } else if (memory_max_len > 256 && memory_max_len <= 512) { block_size = 512; } else { block_size = 1024; } if (block_size < size_per_head) { block_size = size_per_head; } assert(block_size <= 1024); dim3 block(block_size); int shared_size = sizeof(T) * (size_per_head + memory_max_len); cross_attention_kernel<T><<<grid, block, shared_size, stream>>>(query_buf, Q_bias, key_cache, K_bias, value_cache, V_bias, length, context_buf, finished, max_batch_size, head_num, size_per_head, step, memory_max_len, scalar, ia3_tasks, ia3_key_weights, ia3_value_weights); } } else { assert(step > 0); // assert(size_per_head == 32 || size_per_head == 64 || size_per_head == 128); // using DataType = typename std::conditional<sizeof(T) == 4, float, uint16_t>::type; using DataType = typename CATypeConverter<T>::Type; // Prepare the parameters. Cross_multihead_attention_params<DataType> params; params.q_bias = reinterpret_cast<const DataType*>(Q_bias); params.k_bias = reinterpret_cast<const DataType*>(K_bias); params.v_bias = reinterpret_cast<const DataType*>(V_bias); // Set the output buffer. params.out = reinterpret_cast<DataType*>(context_buf); // Set the input buffers. params.q = reinterpret_cast<const DataType*>(query_buf); params.k = nullptr; params.v = nullptr; params.stride = 0; params.finished = const_cast<bool*>(finished); params.memory_length_per_sample = const_cast<int*>(length); params.k_cache = reinterpret_cast<DataType*>(key_cache); params.v_cache = reinterpret_cast<DataType*>(value_cache); params.batch_size = inference_batch_size; // TODO(bhsueh) We can use batch but not batch * beam_width in k/v cache in cross attention // because they are same for all beams. params.beam_width = 1; // We don't care the beam_width in cross attention, set to 1 is enough. params.memory_max_len = memory_max_len; params.timestep = step - 1; params.num_heads = head_num; params.hidden_size_per_head = size_per_head; params.inv_sqrt_dh = 1.F / (sqrtf((float)params.hidden_size_per_head) * q_scaling); // output cross attentions params.max_decoder_seq_len = output_cross_attention_params.max_decoder_seq_len; params.cross_attention_out = output_cross_attention_params.cross_attention_out; params.is_return_cross_attentions = output_cross_attention_params.is_return_cross_attentions; params.ia3_tasks = ia3_tasks; params.ia3_key_weights = reinterpret_cast<const DataType*>(ia3_key_weights); params.ia3_value_weights = reinterpret_cast<const DataType*>(ia3_value_weights); cross_multihead_attention(params, stream); } } template void cross_attention_dispatch(float* query_buf, const float* Q_bias, float* key_cache, const float* K_bias, float* value_cache, const float* V_bias, const int* length, float* context_buf, const bool* finished, const int max_batch_size, const int inference_batch_size, const int head_num, const int size_per_head, const int step, const int memory_max_len, const bool batch_major_cache, const float q_scaling, outputCrossAttentionParam<float> output_cross_attention_params, const int* ia3_tasks, const float* ia3_key_weights, const float* ia3_value_weights, cudaStream_t stream); template void cross_attention_dispatch(half* query_buf, const half* Q_bias, half* key_cache, const half* K_bias, half* value_cache, const half* V_bias, const int* length, half* context_buf, const bool* finished, const int max_batch_size, const int inference_batch_size, const int head_num, const int size_per_head, const int step, const int memory_max_len, const bool batch_major_cache, const float q_scaling, outputCrossAttentionParam<float> output_cross_attention_params, const int* ia3_tasks, const half* ia3_key_weights, const half* ia3_value_weights, cudaStream_t stream); #ifdef ENABLE_BF16 template void cross_attention_dispatch(__nv_bfloat16* query_buf, const __nv_bfloat16* Q_bias, __nv_bfloat16* key_cache, const __nv_bfloat16* K_bias, __nv_bfloat16* value_cache, const __nv_bfloat16* V_bias, const int* length, __nv_bfloat16* context_buf, const bool* finished, const int max_batch_size, const int inference_batch_size, const int head_num, const int size_per_head, const int step, const int memory_max_len, const bool batch_major_cache, const float q_scaling, outputCrossAttentionParam<float> output_cross_attention_params, const int* ia3_tasks, const __nv_bfloat16* ia3_key_weights, const __nv_bfloat16* ia3_value_weights, cudaStream_t stream); #endif // Currently need to transpose at the first step in Cross attention template<typename T> __global__ void transpose_4d_batch_major_mem_k_cache( T* k_dst, const T* k_src, const int head_num, const int size_per_head, const int max_seq_len) { // B, L, H, Dh -> B, H, Dh/x, L, x const int batch_id = blockIdx.y; const int head_id = blockIdx.z; constexpr int X_ELEMS = (sizeof(T) == 4) ? 4 : 8; auto key_src = reinterpret_cast<const uint4*>(k_src + batch_id * head_num * size_per_head * max_seq_len + head_id * size_per_head); auto key_dst = reinterpret_cast<uint4*>(k_dst + batch_id * head_num * size_per_head * max_seq_len + head_id * size_per_head * max_seq_len); const int out_idx = blockIdx.x * blockDim.x + threadIdx.x; int size_per_head_div_x = size_per_head / X_ELEMS; if (out_idx >= size_per_head_div_x * max_seq_len) { return; } int idx = out_idx; const int k_seq_len_id = idx % max_seq_len; idx = (idx - k_seq_len_id) / max_seq_len; const int k_head_size_id = idx % size_per_head_div_x; key_dst[out_idx] = key_src[k_seq_len_id * head_num * size_per_head_div_x + k_head_size_id]; } template<typename T> __global__ void transpose_4d_batch_major_mem_v_cache( T* v_dst, const T* v_src, const int head_num, const int size_per_head, const int max_seq_len) { // B, L, H, Dh -> B, H, L, Dh const int batch_id = blockIdx.y; const int head_id = blockIdx.z; // 16 byte loads will handle "x" dimension auto val_src = reinterpret_cast<const uint4*>(v_src + batch_id * head_num * size_per_head * max_seq_len + head_id * size_per_head); auto val_dst = reinterpret_cast<uint4*>(v_dst + batch_id * head_num * size_per_head * max_seq_len + head_id * size_per_head * max_seq_len); // idx is over output dimension L * size_per_head / x for values const int out_idx = blockIdx.x * blockDim.x + threadIdx.x; constexpr int X_ELEMS = (sizeof(T) == 4) ? 4 : 8; const int size_per_head_div_x = size_per_head / X_ELEMS; if (out_idx >= size_per_head_div_x * max_seq_len) { return; } int idx = out_idx; const int v_head_size_id = idx % size_per_head_div_x; idx = (idx - v_head_size_id) / size_per_head_div_x; const int v_seq_len_id = idx % max_seq_len; val_dst[out_idx] = val_src[v_seq_len_id * head_num * size_per_head_div_x + v_head_size_id]; } template<typename T> void transpose_4d_batch_major_memory_kernelLauncher(T* dst, const T* src, const int local_batch_size, const int max_seq_len, const int size_per_head, const int local_head_num, const bool k_cache, cudaStream_t stream) { constexpr int block_sz = 128; constexpr int x = (sizeof(T) == 4) ? 4 : 8; int size = max_seq_len * size_per_head / x; dim3 grid((size + block_sz - 1) / block_sz, local_batch_size, local_head_num); if (k_cache) { transpose_4d_batch_major_mem_k_cache<<<grid, block_sz, 0, stream>>>( dst, src, local_head_num, size_per_head, max_seq_len); } else { transpose_4d_batch_major_mem_v_cache<<<grid, block_sz, 0, stream>>>( dst, src, local_head_num, size_per_head, max_seq_len); } } template void transpose_4d_batch_major_memory_kernelLauncher(float* dst, const float* src, const int local_batch_size, const int max_seq_len, const int size_per_head, const int local_head_num, const bool k_cache, cudaStream_t stream); template void transpose_4d_batch_major_memory_kernelLauncher(half* dst, const half* src, const int local_batch_size, const int max_seq_len, const int size_per_head, const int local_head_num, const bool k_cache, cudaStream_t stream); #ifdef ENABLE_BF16 template void transpose_4d_batch_major_memory_kernelLauncher(__nv_bfloat16* dst, const __nv_bfloat16* src, const int local_batch_size, const int max_seq_len, const int size_per_head, const int local_head_num, const bool k_cache, cudaStream_t stream); #endif template<typename T> void DecoderCrossAttentionLayer<T>::allocateBuffer() { FT_CHECK(false); if (is_allocate_buffer_ == false) { q_buf_ = reinterpret_cast<T*>(allocator_->reMalloc(q_buf_, sizeof(T) * max_batch_size_ * hidden_units_, false)); context_buf_ = reinterpret_cast<T*>( allocator_->reMalloc(context_buf_, sizeof(T) * max_batch_size_ * hidden_units_, false)); if (is_batch_major_cache_) { mem_cache_buf_ = reinterpret_cast<T*>(allocator_->reMalloc( mem_cache_buf_, sizeof(T) * max_batch_size_ * max_mem_seq_len_ * hidden_units_, false)); } is_allocate_buffer_ = true; } } template<typename T> void DecoderCrossAttentionLayer<T>::allocateBuffer(size_t batch_size, size_t max_mem_seq_len) { FT_LOG_DEBUG(__PRETTY_FUNCTION__); q_buf_ = reinterpret_cast<T*>(allocator_->reMalloc(q_buf_, sizeof(T) * batch_size * hidden_units_, false)); context_buf_ = reinterpret_cast<T*>(allocator_->reMalloc(context_buf_, sizeof(T) * batch_size * hidden_units_, false)); if (is_batch_major_cache_) { mem_cache_buf_ = reinterpret_cast<T*>( allocator_->reMalloc(mem_cache_buf_, sizeof(T) * batch_size * max_mem_seq_len * hidden_units_, false)); } is_allocate_buffer_ = true; } template<typename T> void DecoderCrossAttentionLayer<T>::freeBuffer() { if (is_allocate_buffer_) { allocator_->free((void**)(&q_buf_)); allocator_->free((void**)(&context_buf_)); if (is_batch_major_cache_) { allocator_->free((void**)(&mem_cache_buf_)); } is_allocate_buffer_ = false; } } template<typename T> DecoderCrossAttentionLayer<T>::DecoderCrossAttentionLayer(size_t max_batch_size, size_t head_num, size_t size_per_head, size_t d_model, const float q_scaling, cudaStream_t stream, cublasMMWrapper* cublas_wrapper, IAllocator* allocator, bool is_free_buffer_after_forward): BaseAttentionLayer<T>(stream, cublas_wrapper, allocator, is_free_buffer_after_forward), max_batch_size_(max_batch_size), head_num_(head_num), size_per_head_(size_per_head), hidden_units_(head_num_ * size_per_head_), d_model_(d_model), q_scaling_(q_scaling) { FT_CHECK(size_per_head_ == 32 || size_per_head_ == 48 || size_per_head_ == 64 || size_per_head_ == 80 || size_per_head_ == 96 || size_per_head_ == 112 || size_per_head_ == 128 || size_per_head_ == 144 || size_per_head_ == 160 || size_per_head_ == 192 || size_per_head_ == 224 || size_per_head_ == 256); } template<typename T> DecoderCrossAttentionLayer<T>::DecoderCrossAttentionLayer(size_t max_batch_size, size_t head_num, size_t size_per_head, cudaStream_t stream, cublasMMWrapper* cublas_wrapper, IAllocator* allocator, bool is_free_buffer_after_forward): DecoderCrossAttentionLayer<T>(max_batch_size, head_num, size_per_head, head_num * size_per_head, 1.0f, stream, cublas_wrapper, allocator, is_free_buffer_after_forward) { } template<typename T> DecoderCrossAttentionLayer<T>::DecoderCrossAttentionLayer(size_t max_batch_size, size_t head_num, size_t size_per_head, const float q_scaling, cudaStream_t stream, cublasMMWrapper* cublas_wrapper, IAllocator* allocator, bool is_free_buffer_after_forward): DecoderCrossAttentionLayer<T>(max_batch_size, head_num, size_per_head, head_num * size_per_head, q_scaling, stream, cublas_wrapper, allocator, is_free_buffer_after_forward) { } template<typename T> DecoderCrossAttentionLayer<T>::DecoderCrossAttentionLayer(DecoderCrossAttentionLayer<T> const& attention_layer): DecoderCrossAttentionLayer(attention_layer.max_batch_size_, attention_layer.head_num_, attention_layer.size_per_head_, attention_layer.d_model_, attention_layer.q_scaling_, attention_layer.stream_, attention_layer.cublas_wrapper_, attention_layer.allocator_, attention_layer.is_free_buffer_after_forward_) { } template<typename T> DecoderCrossAttentionLayer<T>::~DecoderCrossAttentionLayer() { cublas_wrapper_ = nullptr; freeBuffer(); } template<typename T> void DecoderCrossAttentionLayer<T>::forward(TensorMap* output_tensors, TensorMap* input_tensors, const AttentionWeight<T>* attention_weights) { // input tensors: // attention_input [batch_size, d_model], // encoder_output [batch_size, mem_max_seq_len, memory_d_model], // encoder_sequence_length [batch_size], // step [1] on cpu // finished [batch_size] (optional) // ia3_tasks [batch_size] (optional) // output tensors: // decoder_layer_output [batch_size, d_model], // key_mem_cache [batch_size, head_num, size_per_head // x, mem_max_seq_len, x], where x = 16 / sizeof(T) // value_mem_cache [batch_size, head_num, mem_max_seq_len, size_per_head] // cross_attentions [batch_size, head_num, max_decoder_seq_len, mem_max_seq_len] optional float* FT_LOG_DEBUG("%s", __PRETTY_FUNCTION__); allocateBuffer(input_tensors->at("input_query").shape[0], input_tensors->at("encoder_output").shape[1]); const T* attention_input = input_tensors->getPtr<T>("input_query"); Tensor encoder_output_tensor = input_tensors->at("encoder_output"); const int* memory_sequence_length = input_tensors->getPtr<int>("encoder_sequence_length"); const int step = input_tensors->getVal<int>("step"); const bool* finished = input_tensors->getPtr<bool>("finished", nullptr); const bool has_ia3 = input_tensors->isExist("ia3_tasks"); T* attention_out = output_tensors->getPtr<T>("hidden_features"); T* key_mem_cache = output_tensors->getPtr<T>("key_cache"); T* value_mem_cache = output_tensors->getPtr<T>("value_cache"); const bool output_cross_attentions = output_tensors->isExist("cross_attentions"); const int max_decoder_seq_len = output_cross_attentions ? output_tensors->at("cross_attentions").shape[2] : 0; const int batch_size = input_tensors->at("input_query").shape[0]; const int mem_max_seq_len = encoder_output_tensor.shape[1]; cublas_wrapper_->Gemm(CUBLAS_OP_N, CUBLAS_OP_N, hidden_units_, // n batch_size, d_model_, // k attention_weights->query_weight.kernel, hidden_units_, // n attention_input, d_model_, // k q_buf_, hidden_units_ /* n */); if (step == 1) { if (is_batch_major_cache_) { cublas_wrapper_->Gemm(CUBLAS_OP_N, CUBLAS_OP_N, hidden_units_, batch_size * mem_max_seq_len, encoder_output_tensor.shape[2], attention_weights->key_weight.kernel, hidden_units_, encoder_output_tensor.getPtr<T>(), encoder_output_tensor.shape[2], mem_cache_buf_, hidden_units_); transpose_4d_batch_major_memory_kernelLauncher<T>( key_mem_cache, mem_cache_buf_, batch_size, mem_max_seq_len, size_per_head_, head_num_, true, stream_); sync_check_cuda_error(); cublas_wrapper_->Gemm(CUBLAS_OP_N, CUBLAS_OP_N, hidden_units_, batch_size * mem_max_seq_len, encoder_output_tensor.shape[2], attention_weights->value_weight.kernel, hidden_units_, encoder_output_tensor.getPtr<T>(), encoder_output_tensor.shape[2], mem_cache_buf_, hidden_units_); transpose_4d_batch_major_memory_kernelLauncher<T>(value_mem_cache, mem_cache_buf_, batch_size, mem_max_seq_len, size_per_head_, head_num_, false, stream_); sync_check_cuda_error(); } else { cublas_wrapper_->Gemm(CUBLAS_OP_N, CUBLAS_OP_N, hidden_units_, batch_size * mem_max_seq_len, encoder_output_tensor.shape[2], attention_weights->key_weight.kernel, hidden_units_, encoder_output_tensor.getPtr<T>(), encoder_output_tensor.shape[2], key_mem_cache, hidden_units_); cublas_wrapper_->Gemm(CUBLAS_OP_N, CUBLAS_OP_N, hidden_units_, batch_size * mem_max_seq_len, encoder_output_tensor.shape[2], attention_weights->value_weight.kernel, hidden_units_, encoder_output_tensor.getPtr<T>(), encoder_output_tensor.shape[2], value_mem_cache, hidden_units_); } } sync_check_cuda_error(); outputCrossAttentionParam<float> output_attention_param{}; // output cross attentions if (output_cross_attentions) { output_attention_param.max_decoder_seq_len = max_decoder_seq_len; output_attention_param.cross_attention_out = output_tensors->at("cross_attentions").getPtr<float>(); output_attention_param.is_return_cross_attentions = true; } cross_attention_dispatch<T>(q_buf_, attention_weights->query_weight.bias, key_mem_cache, attention_weights->key_weight.bias, value_mem_cache, attention_weights->value_weight.bias, memory_sequence_length, context_buf_, finished, batch_size, batch_size, head_num_, size_per_head_, step, mem_max_seq_len, is_batch_major_cache_, q_scaling_, output_attention_param, has_ia3 ? input_tensors->at("ia3_tasks").getPtr<const int>() : nullptr, has_ia3 ? attention_weights->ia3_key_weight.kernel : nullptr, has_ia3 ? attention_weights->ia3_value_weight.kernel : nullptr, stream_); sync_check_cuda_error(); cublas_wrapper_->Gemm(CUBLAS_OP_N, CUBLAS_OP_N, d_model_, // n batch_size, hidden_units_, // k attention_weights->attention_output_weight.kernel, d_model_, // n context_buf_, hidden_units_, // k attention_out, d_model_ /* n */); if (is_free_buffer_after_forward_ == true) { freeBuffer(); } } template class DecoderCrossAttentionLayer<float>; template class DecoderCrossAttentionLayer<half>; #ifdef ENABLE_BF16 template class DecoderCrossAttentionLayer<__nv_bfloat16>; #endif } // namespace fastertransformer
fd15a016bce0afe75f6ff4225e19d54fd2824e78.hip
// !!! This is a file automatically generated by hipify!!! #include "tricount.h" #include <algorithm> #include <thrust/sort.h> #include <thrust/device_vector.h> #include <thrust/device_ptr.h> #include <thrust/copy.h> #include <thrust/remove.h> #include <thrust/unique.h> #include <thrust/scan.h> #include <device_launch_parameters.h> #include <hip/hip_runtime.h> #include "util.h" #define CUDA_TRY(call) \ do { \ hipError_t const status = (call); \ if (hipSuccess != status) { \ log_error("%s %s %d\n", hipGetErrorString(status), __FILE__, __LINE__); \ } \ } while (0) const int numBlocks = 1048576; const int BLOCKSIZE = 512;//1024; uint64_t gpu_mem; uint64_t init_gpu() { hipDeviceProp_t deviceProp{}; CUDA_TRY(hipGetDeviceProperties(&deviceProp, 0)); gpu_mem = deviceProp.totalGlobalMem; log_info("numBlocks: %d BLOCKSIZE: %d", numBlocks, BLOCKSIZE); return gpu_mem; } __global__ void all_degree_kernel(const uint64_t *edges, uint64_t edge_count, uint32_t *deg) { uint32_t blockSize = blockDim.x * gridDim.x; uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x; for (uint64_t i = tid; i < edge_count; i += blockSize) { uint64_t edge = edges[i]; auto first = FIRST(edge); auto second = SECOND(edge); atomicAdd(deg + first, 1); atomicAdd(deg + second, 1); } } void cal_degree(const uint64_t *edges, uint64_t edge_count, uint32_t *deg, uint32_t node_count) { uint64_t use_mem = node_count * sizeof(uint32_t) + 1024 * 1024 * 256; uint64_t edge_block = (gpu_mem - use_mem) / sizeof(uint64_t); uint64_t split_num = edge_count / edge_block + 1; edge_block = edge_count / split_num; uint64_t *dev_edges; uint32_t *dev_deg; CUDA_TRY(hipMalloc((void **) &dev_edges, edge_block * sizeof(uint64_t))); CUDA_TRY(hipMalloc((void **) &dev_deg, node_count * sizeof(uint32_t))); for (uint64_t i = 0; i < edge_count; i += edge_block) { uint64_t copy_size = min(edge_count - i, edge_block); CUDA_TRY(hipMemcpy(dev_edges, edges + i, copy_size * sizeof(uint64_t), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( all_degree_kernel), dim3(numBlocks), dim3(BLOCKSIZE), 0, 0, dev_edges, copy_size, dev_deg); CUDA_TRY(hipDeviceSynchronize()); } CUDA_TRY(hipMemcpy(deg, dev_deg, node_count * sizeof(uint32_t), hipMemcpyDeviceToHost)); CUDA_TRY(hipFree(dev_edges)); CUDA_TRY(hipFree(dev_deg)); } __global__ void redirect_edges_kernel(uint64_t *edges, uint64_t edge_count, const uint32_t *deg) { uint32_t blockSize = blockDim.x * gridDim.x; uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x; for (uint64_t i = tid; i < edge_count; i += blockSize) { uint64_t edge = edges[i]; auto first = FIRST(edge); auto second = SECOND(edge); if (deg[first] > deg[second] || (deg[first] == deg[second] && first > second)) { edges[i] = MAKEEDGE(second, first); } } } void redirect_edges(uint64_t *edges, uint64_t edge_count, const uint32_t *deg, uint32_t node_count) { uint64_t use_mem = node_count * sizeof(uint32_t) + 1024 * 1024 * 256; uint64_t edge_block = (gpu_mem - use_mem) / sizeof(uint64_t); uint64_t split_num = edge_count / edge_block + 1; edge_block = edge_count / split_num; uint64_t *dev_edges; uint32_t *dev_deg; CUDA_TRY(hipMalloc((void **) &dev_edges, edge_block * sizeof(uint64_t))); CUDA_TRY(hipMalloc((void **) &dev_deg, node_count * sizeof(uint32_t))); CUDA_TRY(hipMemcpy(dev_deg, deg, node_count * sizeof(uint32_t), hipMemcpyHostToDevice)); for (uint64_t i = 0; i < edge_count; i += edge_block) { uint64_t copy_size = min(edge_count - i, edge_block); CUDA_TRY(hipMemcpy(dev_edges, edges + i, copy_size * sizeof(uint64_t), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( redirect_edges_kernel), dim3(numBlocks), dim3(BLOCKSIZE), 0, 0, dev_edges, copy_size, dev_deg); CUDA_TRY(hipDeviceSynchronize()); CUDA_TRY(hipMemcpy(edges + i, dev_edges, copy_size * sizeof(uint64_t), hipMemcpyDeviceToHost)); } CUDA_TRY(hipFree(dev_edges)); CUDA_TRY(hipFree(dev_deg)); } uint32_t cal_part_num(const uint64_t *edges, uint64_t edge_count, uint32_t node_count) { uint64_t part_num = 1; while (true) { uint64_t part_edge_count = edge_count / part_num + 1; uint64_t part_node_count = node_count / part_num + 1; uint64_t tri_use_mem = part_edge_count * sizeof(uint64_t) * 2 * 115 / 100 + (part_node_count + 1) * sizeof(uint32_t) * 2 + numBlocks * BLOCKSIZE * sizeof(uint64_t); if (tri_use_mem < gpu_mem) { break; } ++part_num; } return part_num; } __global__ void unzip_edges_kernel(const uint64_t *edges, uint64_t edge_count, uint32_t *edges_first, uint32_t *edges_second) { auto from = blockDim.x * blockIdx.x + threadIdx.x; auto step = gridDim.x * blockDim.x; for (uint64_t i = from; i < edge_count; i += step) { uint64_t tmp = edges[i]; edges_first[i] = FIRST(tmp); edges_second[i] = SECOND(tmp); } } __global__ void node_index_construct_kernel(const uint32_t *edges_first, uint64_t edge_count, uint32_t *node_index, uint32_t node_count, uint32_t start_node) { auto from = blockDim.x * blockIdx.x + threadIdx.x; auto step = gridDim.x * blockDim.x; for (uint64_t i = from; i <= edge_count; i += step) { int64_t prev = i > 0 ? (int64_t) (edges_first[i - 1] - start_node) : -1; int64_t next = i < edge_count ? (int64_t) (edges_first[i] - start_node) : node_count; for (int64_t j = prev + 1; j <= next; ++j) { node_index[j] = i; } } } struct is_self_loop : public thrust::unary_function<uint64_t, bool> { __host__ __device__ bool operator()(uint64_t x) { return FIRST(x) == SECOND(x); } }; void split(uint64_t *edges, uint64_t edge_count, uint64_t part_num, uint32_t **node_index, const uint32_t *node_split, uint64_t *adj_count) { uint64_t begin = 0; uint64_t end; uint64_t adj_count_all = 0; auto *adj = reinterpret_cast<uint32_t *>(edges); uint64_t *dev_edges; uint32_t *dev_edges_first; uint32_t *dev_edges_second; uint32_t *dev_node_index; for (uint64_t i = 0; i < part_num; i++) { log_info("split i: %lu start", i); uint32_t stop_node = node_split[i + 1]; if (i == part_num - 1) { end = edge_count; } else { end = swap_if(edges + begin, edges + edge_count, [&](const uint64_t edge) { return FIRST(edge) < stop_node; }) - edges; } log_info("swap_if: %d start: %lu end: %lu", i, begin, end); adj_count[i] = end - begin; uint64_t copy_size = adj_count[i] * sizeof(uint64_t); CUDA_TRY(hipMalloc((void **) &dev_edges, copy_size)); CUDA_TRY(hipMemcpy(dev_edges, edges + begin, copy_size, hipMemcpyHostToDevice)); thrust::device_ptr<uint64_t> dev_ptr(dev_edges); thrust::sort(dev_ptr, dev_ptr + adj_count[i]); adj_count[i] = thrust::remove_if(dev_ptr, dev_ptr + adj_count[i], is_self_loop()) - dev_ptr; adj_count[i] = thrust::unique(dev_ptr, dev_ptr + adj_count[i]) - dev_ptr; CUDA_TRY(hipMalloc((void **) &dev_edges_first, adj_count[i] * sizeof(uint32_t))); CUDA_TRY(hipMalloc((void **) &dev_edges_second, adj_count[i] * sizeof(uint32_t))); hipLaunchKernelGGL(( unzip_edges_kernel), dim3(numBlocks), dim3(BLOCKSIZE), 0, 0, dev_edges, adj_count[i], dev_edges_first, dev_edges_second); CUDA_TRY(hipPeekAtLastError()); CUDA_TRY(hipDeviceSynchronize()); CUDA_TRY(hipMemcpy(adj + adj_count_all, dev_edges_second, adj_count[i] * sizeof(uint32_t), hipMemcpyDeviceToHost)); uint32_t node_count = node_split[i + 1] - node_split[i] + 1; uint32_t start_node = node_split[i]; CUDA_TRY(hipMalloc((void **) &dev_node_index, (node_count + 1) * sizeof(uint32_t))); hipLaunchKernelGGL(( node_index_construct_kernel), dim3(numBlocks), dim3(BLOCKSIZE), 0, 0, dev_edges_first, adj_count[i], dev_node_index, node_count, start_node); CUDA_TRY(hipPeekAtLastError()); CUDA_TRY(hipDeviceSynchronize()); node_index[i] = (uint32_t *) malloc((node_count + 1) * sizeof(uint32_t)); CUDA_TRY(hipMemcpy(node_index[i], dev_node_index, (node_count + 1) * sizeof(uint32_t), hipMemcpyDeviceToHost)); adj_count_all += adj_count[i]; CUDA_TRY(hipFree(dev_edges)); CUDA_TRY(hipFree(dev_edges_first)); CUDA_TRY(hipFree(dev_edges_second)); CUDA_TRY(hipFree(dev_node_index)); begin = end; } vector<uint64_t> adj_count_vec(adj_count, adj_count + part_num); adj_count[0] = 0; for (uint64_t i = 1; i <= part_num; i++) { adj_count[i] = adj_count[i - 1] + adj_count_vec[i - 1]; } } __global__ void node_index_reconstruct_kernel(uint32_t *edges_first, const uint32_t *node_index, uint32_t node_count) { auto from = blockDim.x * blockIdx.x + threadIdx.x; auto step = gridDim.x * blockDim.x; for (uint64_t i = from; i < node_count; i += step) { for (uint64_t j = node_index[i]; j < node_index[i + 1]; ++j) { edges_first[j] = i; } } } __global__ void warp_binary_kernel(const uint32_t* __restrict__ edge_m, const uint32_t* __restrict__ node_index_m, uint32_t edge_m_count, uint32_t* __restrict__ adj_m, uint32_t start_node_n, const uint32_t* __restrict__ node_index_n, uint32_t node_index_n_count, uint32_t* __restrict__ adj_n, uint64_t *results) { //phase 1, partition uint64_t count = 0; __shared__ uint32_t local[BLOCKSIZE]; uint32_t i = threadIdx.x % 32; uint32_t p = threadIdx.x / 32; for (uint32_t tid = (threadIdx.x + blockIdx.x * blockDim.x) / 32; tid < edge_m_count; tid += blockDim.x * gridDim.x / 32) { uint32_t node_m = edge_m[tid]; uint32_t node_n = adj_m[tid]; if (node_n < start_node_n || node_n >= start_node_n + node_index_n_count) { continue; } uint32_t degree_m = node_index_m[node_m + 1] - node_index_m[node_m]; uint32_t degree_n = node_index_n[node_n + 1 - start_node_n] - node_index_n[node_n - start_node_n]; uint32_t* a = adj_m + node_index_m[node_m]; uint32_t* b = adj_n + node_index_n[node_n - start_node_n]; if(degree_m < degree_n){ uint32_t temp = degree_m; degree_m = degree_n; degree_n = temp; uint32_t *aa = a; a = b; b = aa; } //initial cache local[p * 32 + i] = a[i * degree_m / 32]; __syncthreads(); //search uint32_t j = i; while(j < degree_n){ uint32_t X = b[j]; uint32_t Y; //phase 1: cache int32_t bot = 0; int32_t top = 32; int32_t r; while(top > bot + 1){ r = (top + bot) / 2; Y = local[p * 32 + r]; if(X == Y){ count++; bot = top + 32; } if(X < Y){ top = r; } if(X > Y){ bot = r; } } //phase 2 bot = bot * degree_m / 32; top = top * degree_m / 32 - 1; while(top >= bot){ r = (top + bot) / 2; Y = a[r]; if(X == Y){ count++; } if(X <= Y){ top = r - 1; } if(X >= Y){ bot = r + 1; } } j += 32; } __syncthreads(); } results[blockDim.x * blockIdx.x + threadIdx.x] = count; } __global__ void warp_intersection_kernel(const uint32_t* __restrict__ edge_m, const uint32_t* __restrict__ node_index_m, uint32_t edge_m_count, uint32_t* __restrict__ adj_m, uint32_t start_node_n, const uint32_t* __restrict__ node_index_n, uint32_t node_index_n_count, uint32_t* __restrict__ adj_n, uint64_t *results) { //phase 1, partition uint64_t count = 0; //__shared__ uint32_t local[BLOCKSIZE]; //uint32_t i = threadIdx.x % 32; //uint32_t p = threadIdx.x / 32; for (uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x; tid < edge_m_count; tid += blockDim.x * gridDim.x) { uint32_t node_m = edge_m[tid]; uint32_t node_n = adj_m[tid]; if (node_n < start_node_n || node_n >= start_node_n + node_index_n_count) { continue; } uint32_t degree_m = node_index_m[node_m + 1] - node_index_m[node_m]; uint32_t degree_n = node_index_n[node_n + 1 - start_node_n] - node_index_n[node_n - start_node_n]; uint32_t* a = adj_m + node_index_m[node_m]; uint32_t* b = adj_n + node_index_n[node_n - start_node_n]; //initial cache int i = 0, j = 0; while (i < degree_m && j < degree_n) { if (a[i] == b[j]) { count ++; i ++; j ++; } else if (a[i] < b[j]) { i ++; } else { j ++; } } //search } results[blockDim.x * blockIdx.x + threadIdx.x] = count; } uint64_t tricount_gpu(uint64_t part_num, uint32_t *adj, const uint64_t *adj_count, const uint32_t *node_split, uint32_t **node_index) { uint32_t n_result = numBlocks * BLOCKSIZE; uint64_t all_sum = 0; uint32_t *node_index_m_dev; uint32_t *adj_m_dev; uint32_t *edge_m_dev; uint32_t *node_index_n_dev; uint32_t *adj_n_dev; uint32_t *edge_n_dev; uint64_t *dev_results; for (uint64_t m = 0; m < part_num; m++) { uint32_t start_node_m = node_split[m]; uint32_t *node_index_m = node_index[m]; uint32_t node_index_m_count = node_split[m + 1] - node_split[m]; if (node_index_m_count == 0) { continue; } uint64_t start_adj_m = adj_count[m]; uint32_t *adj_m = adj + start_adj_m; uint32_t adj_count_m = adj_count[m + 1] - adj_count[m]; CUDA_TRY(hipMalloc((void **) &node_index_m_dev, (node_index_m_count + 1) * sizeof(uint32_t))); CUDA_TRY(hipMemcpy(node_index_m_dev, node_index_m, (node_index_m_count + 1) * sizeof(uint32_t), hipMemcpyHostToDevice)); CUDA_TRY(hipMalloc((void **) &adj_m_dev, adj_count_m * sizeof(uint32_t))); CUDA_TRY(hipMemcpy(adj_m_dev, adj_m, adj_count_m * sizeof(uint32_t), hipMemcpyHostToDevice)); CUDA_TRY(hipMalloc((void **) &edge_m_dev, adj_count_m * sizeof(uint32_t))); hipLaunchKernelGGL(( node_index_reconstruct_kernel), dim3(numBlocks), dim3(BLOCKSIZE), 0, 0, edge_m_dev, node_index_m_dev, node_index_m_count); CUDA_TRY(hipDeviceSynchronize()); for (uint64_t n = m; n < part_num; n++) { uint32_t start_node_n = node_split[n]; uint32_t *node_index_n = node_index[n]; uint32_t node_index_n_count = node_split[n + 1] - node_split[n]; uint64_t start_adj_n = adj_count[n]; uint32_t *adj_n = adj + start_adj_n; uint32_t adj_count_n = adj_count[n + 1] - adj_count[n]; CUDA_TRY(hipMalloc((void **) &node_index_n_dev, (node_index_n_count + 1) * sizeof(uint32_t))); CUDA_TRY(hipMemcpy(node_index_n_dev, node_index_n, (node_index_n_count + 1) * sizeof(uint32_t), hipMemcpyHostToDevice)); CUDA_TRY(hipMalloc((void **) &adj_n_dev, adj_count_n * sizeof(uint32_t))); CUDA_TRY(hipMemcpy(adj_n_dev, adj_n, adj_count_n * sizeof(uint32_t), hipMemcpyHostToDevice)); CUDA_TRY(hipMalloc((void **) &dev_results, n_result * sizeof(uint64_t))); // log_info("tricount_gpu_edge_kernel start"); hipLaunchKernelGGL(( warp_binary_kernel), dim3(numBlocks), dim3(BLOCKSIZE), 0, 0, edge_m_dev, node_index_m_dev, adj_count_m, adj_m_dev, start_node_n, node_index_n_dev, node_index_n_count, adj_n_dev, dev_results); CUDA_TRY(hipDeviceSynchronize()); // log_info("tricount_gpu_edge_kernel end"); thrust::device_ptr<uint64_t> ptr(dev_results); uint64_t sum = thrust::reduce(ptr, ptr + n_result); log_info("m: %d n: %d sum: %lu", m, n, sum); all_sum += sum; if (m != n) { CUDA_TRY(hipMalloc((void **) &edge_n_dev, adj_count_n * sizeof(uint32_t))); hipLaunchKernelGGL(( node_index_reconstruct_kernel), dim3(numBlocks), dim3(BLOCKSIZE), 0, 0, edge_n_dev, node_index_n_dev, node_index_n_count); CUDA_TRY(hipDeviceSynchronize()); // log_info("tricount_gpu_edge_kernel start"); hipLaunchKernelGGL(( warp_binary_kernel), dim3(numBlocks), dim3(BLOCKSIZE), 0, 0, edge_n_dev, node_index_n_dev, adj_count_n, adj_n_dev, start_node_m, node_index_m_dev, node_index_m_count, adj_m_dev, dev_results); CUDA_TRY(hipDeviceSynchronize()); // log_info("tricount_gpu_edge_kernel end"); thrust::device_ptr<uint64_t> ptr_n(dev_results); sum = thrust::reduce(ptr_n, ptr_n + n_result); log_info("m: %d n: %d sum: %lu", n, m, sum); all_sum += sum; CUDA_TRY(hipFree(edge_n_dev)); } CUDA_TRY(hipFree(node_index_n_dev)); CUDA_TRY(hipFree(adj_n_dev)); CUDA_TRY(hipFree(dev_results)); } CUDA_TRY(hipFree(node_index_m_dev)); CUDA_TRY(hipFree(adj_m_dev)); CUDA_TRY(hipFree(edge_m_dev)); } return all_sum; }
fd15a016bce0afe75f6ff4225e19d54fd2824e78.cu
#include "tricount.h" #include <algorithm> #include <thrust/sort.h> #include <thrust/device_vector.h> #include <thrust/device_ptr.h> #include <thrust/copy.h> #include <thrust/remove.h> #include <thrust/unique.h> #include <thrust/scan.h> #include <device_launch_parameters.h> #include <cuda_runtime.h> #include "util.h" #define CUDA_TRY(call) \ do { \ cudaError_t const status = (call); \ if (cudaSuccess != status) { \ log_error("%s %s %d\n", cudaGetErrorString(status), __FILE__, __LINE__); \ } \ } while (0) const int numBlocks = 1048576; const int BLOCKSIZE = 512;//1024; uint64_t gpu_mem; uint64_t init_gpu() { cudaDeviceProp deviceProp{}; CUDA_TRY(cudaGetDeviceProperties(&deviceProp, 0)); gpu_mem = deviceProp.totalGlobalMem; log_info("numBlocks: %d BLOCKSIZE: %d", numBlocks, BLOCKSIZE); return gpu_mem; } __global__ void all_degree_kernel(const uint64_t *edges, uint64_t edge_count, uint32_t *deg) { uint32_t blockSize = blockDim.x * gridDim.x; uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x; for (uint64_t i = tid; i < edge_count; i += blockSize) { uint64_t edge = edges[i]; auto first = FIRST(edge); auto second = SECOND(edge); atomicAdd(deg + first, 1); atomicAdd(deg + second, 1); } } void cal_degree(const uint64_t *edges, uint64_t edge_count, uint32_t *deg, uint32_t node_count) { uint64_t use_mem = node_count * sizeof(uint32_t) + 1024 * 1024 * 256; uint64_t edge_block = (gpu_mem - use_mem) / sizeof(uint64_t); uint64_t split_num = edge_count / edge_block + 1; edge_block = edge_count / split_num; uint64_t *dev_edges; uint32_t *dev_deg; CUDA_TRY(cudaMalloc((void **) &dev_edges, edge_block * sizeof(uint64_t))); CUDA_TRY(cudaMalloc((void **) &dev_deg, node_count * sizeof(uint32_t))); for (uint64_t i = 0; i < edge_count; i += edge_block) { uint64_t copy_size = min(edge_count - i, edge_block); CUDA_TRY(cudaMemcpy(dev_edges, edges + i, copy_size * sizeof(uint64_t), cudaMemcpyHostToDevice)); all_degree_kernel<<<numBlocks, BLOCKSIZE>>> (dev_edges, copy_size, dev_deg); CUDA_TRY(cudaDeviceSynchronize()); } CUDA_TRY(cudaMemcpy(deg, dev_deg, node_count * sizeof(uint32_t), cudaMemcpyDeviceToHost)); CUDA_TRY(cudaFree(dev_edges)); CUDA_TRY(cudaFree(dev_deg)); } __global__ void redirect_edges_kernel(uint64_t *edges, uint64_t edge_count, const uint32_t *deg) { uint32_t blockSize = blockDim.x * gridDim.x; uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x; for (uint64_t i = tid; i < edge_count; i += blockSize) { uint64_t edge = edges[i]; auto first = FIRST(edge); auto second = SECOND(edge); if (deg[first] > deg[second] || (deg[first] == deg[second] && first > second)) { edges[i] = MAKEEDGE(second, first); } } } void redirect_edges(uint64_t *edges, uint64_t edge_count, const uint32_t *deg, uint32_t node_count) { uint64_t use_mem = node_count * sizeof(uint32_t) + 1024 * 1024 * 256; uint64_t edge_block = (gpu_mem - use_mem) / sizeof(uint64_t); uint64_t split_num = edge_count / edge_block + 1; edge_block = edge_count / split_num; uint64_t *dev_edges; uint32_t *dev_deg; CUDA_TRY(cudaMalloc((void **) &dev_edges, edge_block * sizeof(uint64_t))); CUDA_TRY(cudaMalloc((void **) &dev_deg, node_count * sizeof(uint32_t))); CUDA_TRY(cudaMemcpy(dev_deg, deg, node_count * sizeof(uint32_t), cudaMemcpyHostToDevice)); for (uint64_t i = 0; i < edge_count; i += edge_block) { uint64_t copy_size = min(edge_count - i, edge_block); CUDA_TRY(cudaMemcpy(dev_edges, edges + i, copy_size * sizeof(uint64_t), cudaMemcpyHostToDevice)); redirect_edges_kernel<<< numBlocks, BLOCKSIZE>>> (dev_edges, copy_size, dev_deg); CUDA_TRY(cudaDeviceSynchronize()); CUDA_TRY(cudaMemcpy(edges + i, dev_edges, copy_size * sizeof(uint64_t), cudaMemcpyDeviceToHost)); } CUDA_TRY(cudaFree(dev_edges)); CUDA_TRY(cudaFree(dev_deg)); } uint32_t cal_part_num(const uint64_t *edges, uint64_t edge_count, uint32_t node_count) { uint64_t part_num = 1; while (true) { uint64_t part_edge_count = edge_count / part_num + 1; uint64_t part_node_count = node_count / part_num + 1; uint64_t tri_use_mem = part_edge_count * sizeof(uint64_t) * 2 * 115 / 100 + (part_node_count + 1) * sizeof(uint32_t) * 2 + numBlocks * BLOCKSIZE * sizeof(uint64_t); if (tri_use_mem < gpu_mem) { break; } ++part_num; } return part_num; } __global__ void unzip_edges_kernel(const uint64_t *edges, uint64_t edge_count, uint32_t *edges_first, uint32_t *edges_second) { auto from = blockDim.x * blockIdx.x + threadIdx.x; auto step = gridDim.x * blockDim.x; for (uint64_t i = from; i < edge_count; i += step) { uint64_t tmp = edges[i]; edges_first[i] = FIRST(tmp); edges_second[i] = SECOND(tmp); } } __global__ void node_index_construct_kernel(const uint32_t *edges_first, uint64_t edge_count, uint32_t *node_index, uint32_t node_count, uint32_t start_node) { auto from = blockDim.x * blockIdx.x + threadIdx.x; auto step = gridDim.x * blockDim.x; for (uint64_t i = from; i <= edge_count; i += step) { int64_t prev = i > 0 ? (int64_t) (edges_first[i - 1] - start_node) : -1; int64_t next = i < edge_count ? (int64_t) (edges_first[i] - start_node) : node_count; for (int64_t j = prev + 1; j <= next; ++j) { node_index[j] = i; } } } struct is_self_loop : public thrust::unary_function<uint64_t, bool> { __host__ __device__ bool operator()(uint64_t x) { return FIRST(x) == SECOND(x); } }; void split(uint64_t *edges, uint64_t edge_count, uint64_t part_num, uint32_t **node_index, const uint32_t *node_split, uint64_t *adj_count) { uint64_t begin = 0; uint64_t end; uint64_t adj_count_all = 0; auto *adj = reinterpret_cast<uint32_t *>(edges); uint64_t *dev_edges; uint32_t *dev_edges_first; uint32_t *dev_edges_second; uint32_t *dev_node_index; for (uint64_t i = 0; i < part_num; i++) { log_info("split i: %lu start", i); uint32_t stop_node = node_split[i + 1]; if (i == part_num - 1) { end = edge_count; } else { end = swap_if(edges + begin, edges + edge_count, [&](const uint64_t edge) { return FIRST(edge) < stop_node; }) - edges; } log_info("swap_if: %d start: %lu end: %lu", i, begin, end); adj_count[i] = end - begin; uint64_t copy_size = adj_count[i] * sizeof(uint64_t); CUDA_TRY(cudaMalloc((void **) &dev_edges, copy_size)); CUDA_TRY(cudaMemcpy(dev_edges, edges + begin, copy_size, cudaMemcpyHostToDevice)); thrust::device_ptr<uint64_t> dev_ptr(dev_edges); thrust::sort(dev_ptr, dev_ptr + adj_count[i]); adj_count[i] = thrust::remove_if(dev_ptr, dev_ptr + adj_count[i], is_self_loop()) - dev_ptr; adj_count[i] = thrust::unique(dev_ptr, dev_ptr + adj_count[i]) - dev_ptr; CUDA_TRY(cudaMalloc((void **) &dev_edges_first, adj_count[i] * sizeof(uint32_t))); CUDA_TRY(cudaMalloc((void **) &dev_edges_second, adj_count[i] * sizeof(uint32_t))); unzip_edges_kernel<<<numBlocks, BLOCKSIZE>>>(dev_edges, adj_count[i], dev_edges_first, dev_edges_second); CUDA_TRY(cudaPeekAtLastError()); CUDA_TRY(cudaDeviceSynchronize()); CUDA_TRY(cudaMemcpy(adj + adj_count_all, dev_edges_second, adj_count[i] * sizeof(uint32_t), cudaMemcpyDeviceToHost)); uint32_t node_count = node_split[i + 1] - node_split[i] + 1; uint32_t start_node = node_split[i]; CUDA_TRY(cudaMalloc((void **) &dev_node_index, (node_count + 1) * sizeof(uint32_t))); node_index_construct_kernel<<<numBlocks, BLOCKSIZE>>>(dev_edges_first, adj_count[i], dev_node_index, node_count, start_node); CUDA_TRY(cudaPeekAtLastError()); CUDA_TRY(cudaDeviceSynchronize()); node_index[i] = (uint32_t *) malloc((node_count + 1) * sizeof(uint32_t)); CUDA_TRY(cudaMemcpy(node_index[i], dev_node_index, (node_count + 1) * sizeof(uint32_t), cudaMemcpyDeviceToHost)); adj_count_all += adj_count[i]; CUDA_TRY(cudaFree(dev_edges)); CUDA_TRY(cudaFree(dev_edges_first)); CUDA_TRY(cudaFree(dev_edges_second)); CUDA_TRY(cudaFree(dev_node_index)); begin = end; } vector<uint64_t> adj_count_vec(adj_count, adj_count + part_num); adj_count[0] = 0; for (uint64_t i = 1; i <= part_num; i++) { adj_count[i] = adj_count[i - 1] + adj_count_vec[i - 1]; } } __global__ void node_index_reconstruct_kernel(uint32_t *edges_first, const uint32_t *node_index, uint32_t node_count) { auto from = blockDim.x * blockIdx.x + threadIdx.x; auto step = gridDim.x * blockDim.x; for (uint64_t i = from; i < node_count; i += step) { for (uint64_t j = node_index[i]; j < node_index[i + 1]; ++j) { edges_first[j] = i; } } } __global__ void warp_binary_kernel(const uint32_t* __restrict__ edge_m, const uint32_t* __restrict__ node_index_m, uint32_t edge_m_count, uint32_t* __restrict__ adj_m, uint32_t start_node_n, const uint32_t* __restrict__ node_index_n, uint32_t node_index_n_count, uint32_t* __restrict__ adj_n, uint64_t *results) { //phase 1, partition uint64_t count = 0; __shared__ uint32_t local[BLOCKSIZE]; uint32_t i = threadIdx.x % 32; uint32_t p = threadIdx.x / 32; for (uint32_t tid = (threadIdx.x + blockIdx.x * blockDim.x) / 32; tid < edge_m_count; tid += blockDim.x * gridDim.x / 32) { uint32_t node_m = edge_m[tid]; uint32_t node_n = adj_m[tid]; if (node_n < start_node_n || node_n >= start_node_n + node_index_n_count) { continue; } uint32_t degree_m = node_index_m[node_m + 1] - node_index_m[node_m]; uint32_t degree_n = node_index_n[node_n + 1 - start_node_n] - node_index_n[node_n - start_node_n]; uint32_t* a = adj_m + node_index_m[node_m]; uint32_t* b = adj_n + node_index_n[node_n - start_node_n]; if(degree_m < degree_n){ uint32_t temp = degree_m; degree_m = degree_n; degree_n = temp; uint32_t *aa = a; a = b; b = aa; } //initial cache local[p * 32 + i] = a[i * degree_m / 32]; __syncthreads(); //search uint32_t j = i; while(j < degree_n){ uint32_t X = b[j]; uint32_t Y; //phase 1: cache int32_t bot = 0; int32_t top = 32; int32_t r; while(top > bot + 1){ r = (top + bot) / 2; Y = local[p * 32 + r]; if(X == Y){ count++; bot = top + 32; } if(X < Y){ top = r; } if(X > Y){ bot = r; } } //phase 2 bot = bot * degree_m / 32; top = top * degree_m / 32 - 1; while(top >= bot){ r = (top + bot) / 2; Y = a[r]; if(X == Y){ count++; } if(X <= Y){ top = r - 1; } if(X >= Y){ bot = r + 1; } } j += 32; } __syncthreads(); } results[blockDim.x * blockIdx.x + threadIdx.x] = count; } __global__ void warp_intersection_kernel(const uint32_t* __restrict__ edge_m, const uint32_t* __restrict__ node_index_m, uint32_t edge_m_count, uint32_t* __restrict__ adj_m, uint32_t start_node_n, const uint32_t* __restrict__ node_index_n, uint32_t node_index_n_count, uint32_t* __restrict__ adj_n, uint64_t *results) { //phase 1, partition uint64_t count = 0; //__shared__ uint32_t local[BLOCKSIZE]; //uint32_t i = threadIdx.x % 32; //uint32_t p = threadIdx.x / 32; for (uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x; tid < edge_m_count; tid += blockDim.x * gridDim.x) { uint32_t node_m = edge_m[tid]; uint32_t node_n = adj_m[tid]; if (node_n < start_node_n || node_n >= start_node_n + node_index_n_count) { continue; } uint32_t degree_m = node_index_m[node_m + 1] - node_index_m[node_m]; uint32_t degree_n = node_index_n[node_n + 1 - start_node_n] - node_index_n[node_n - start_node_n]; uint32_t* a = adj_m + node_index_m[node_m]; uint32_t* b = adj_n + node_index_n[node_n - start_node_n]; //initial cache int i = 0, j = 0; while (i < degree_m && j < degree_n) { if (a[i] == b[j]) { count ++; i ++; j ++; } else if (a[i] < b[j]) { i ++; } else { j ++; } } //search } results[blockDim.x * blockIdx.x + threadIdx.x] = count; } uint64_t tricount_gpu(uint64_t part_num, uint32_t *adj, const uint64_t *adj_count, const uint32_t *node_split, uint32_t **node_index) { uint32_t n_result = numBlocks * BLOCKSIZE; uint64_t all_sum = 0; uint32_t *node_index_m_dev; uint32_t *adj_m_dev; uint32_t *edge_m_dev; uint32_t *node_index_n_dev; uint32_t *adj_n_dev; uint32_t *edge_n_dev; uint64_t *dev_results; for (uint64_t m = 0; m < part_num; m++) { uint32_t start_node_m = node_split[m]; uint32_t *node_index_m = node_index[m]; uint32_t node_index_m_count = node_split[m + 1] - node_split[m]; if (node_index_m_count == 0) { continue; } uint64_t start_adj_m = adj_count[m]; uint32_t *adj_m = adj + start_adj_m; uint32_t adj_count_m = adj_count[m + 1] - adj_count[m]; CUDA_TRY(cudaMalloc((void **) &node_index_m_dev, (node_index_m_count + 1) * sizeof(uint32_t))); CUDA_TRY(cudaMemcpy(node_index_m_dev, node_index_m, (node_index_m_count + 1) * sizeof(uint32_t), cudaMemcpyHostToDevice)); CUDA_TRY(cudaMalloc((void **) &adj_m_dev, adj_count_m * sizeof(uint32_t))); CUDA_TRY(cudaMemcpy(adj_m_dev, adj_m, adj_count_m * sizeof(uint32_t), cudaMemcpyHostToDevice)); CUDA_TRY(cudaMalloc((void **) &edge_m_dev, adj_count_m * sizeof(uint32_t))); node_index_reconstruct_kernel<<<numBlocks, BLOCKSIZE>>>(edge_m_dev, node_index_m_dev, node_index_m_count); CUDA_TRY(cudaDeviceSynchronize()); for (uint64_t n = m; n < part_num; n++) { uint32_t start_node_n = node_split[n]; uint32_t *node_index_n = node_index[n]; uint32_t node_index_n_count = node_split[n + 1] - node_split[n]; uint64_t start_adj_n = adj_count[n]; uint32_t *adj_n = adj + start_adj_n; uint32_t adj_count_n = adj_count[n + 1] - adj_count[n]; CUDA_TRY(cudaMalloc((void **) &node_index_n_dev, (node_index_n_count + 1) * sizeof(uint32_t))); CUDA_TRY(cudaMemcpy(node_index_n_dev, node_index_n, (node_index_n_count + 1) * sizeof(uint32_t), cudaMemcpyHostToDevice)); CUDA_TRY(cudaMalloc((void **) &adj_n_dev, adj_count_n * sizeof(uint32_t))); CUDA_TRY(cudaMemcpy(adj_n_dev, adj_n, adj_count_n * sizeof(uint32_t), cudaMemcpyHostToDevice)); CUDA_TRY(cudaMalloc((void **) &dev_results, n_result * sizeof(uint64_t))); // log_info("tricount_gpu_edge_kernel start"); warp_binary_kernel<<<numBlocks, BLOCKSIZE>>>(edge_m_dev, node_index_m_dev, adj_count_m, adj_m_dev, start_node_n, node_index_n_dev, node_index_n_count, adj_n_dev, dev_results); CUDA_TRY(cudaDeviceSynchronize()); // log_info("tricount_gpu_edge_kernel end"); thrust::device_ptr<uint64_t> ptr(dev_results); uint64_t sum = thrust::reduce(ptr, ptr + n_result); log_info("m: %d n: %d sum: %lu", m, n, sum); all_sum += sum; if (m != n) { CUDA_TRY(cudaMalloc((void **) &edge_n_dev, adj_count_n * sizeof(uint32_t))); node_index_reconstruct_kernel<<<numBlocks, BLOCKSIZE>>>(edge_n_dev, node_index_n_dev, node_index_n_count); CUDA_TRY(cudaDeviceSynchronize()); // log_info("tricount_gpu_edge_kernel start"); warp_binary_kernel<<<numBlocks, BLOCKSIZE>>>(edge_n_dev, node_index_n_dev, adj_count_n, adj_n_dev, start_node_m, node_index_m_dev, node_index_m_count, adj_m_dev, dev_results); CUDA_TRY(cudaDeviceSynchronize()); // log_info("tricount_gpu_edge_kernel end"); thrust::device_ptr<uint64_t> ptr_n(dev_results); sum = thrust::reduce(ptr_n, ptr_n + n_result); log_info("m: %d n: %d sum: %lu", n, m, sum); all_sum += sum; CUDA_TRY(cudaFree(edge_n_dev)); } CUDA_TRY(cudaFree(node_index_n_dev)); CUDA_TRY(cudaFree(adj_n_dev)); CUDA_TRY(cudaFree(dev_results)); } CUDA_TRY(cudaFree(node_index_m_dev)); CUDA_TRY(cudaFree(adj_m_dev)); CUDA_TRY(cudaFree(edge_m_dev)); } return all_sum; }
bb86c8e1fa1789f057ab47f33b0e134ce1dd5421.hip
// !!! This is a file automatically generated by hipify!!! //****************************************************************************** // Copyright 2019 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // #include "memory_api.h" // Ensure printing of CUDA runtime errors to console #define CUB_STDERR #include <hipcub/hipcub.hpp> #include <hipcub/hipcub.hpp> using namespace cub; #include <stdio.h> //--------------------------------------------------------------------- // Globals, constants and typedefs //--------------------------------------------------------------------- bool g_verbose = false; // Whether to display input/output to console CachingDeviceAllocator g_allocator(true); // Caching allocator for device memory hipError_t srtDeviceAllocate(void **devPtr, size_t size, hipStream_t stream) { // printf("srtDeviceAllocate: %ld\n", size); return g_allocator.DeviceAllocate(devPtr, size, stream); } hipError_t srtDeviceFree(void *devPtr) { return g_allocator.DeviceFree(devPtr); }
bb86c8e1fa1789f057ab47f33b0e134ce1dd5421.cu
//****************************************************************************** // Copyright 2019 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // #include "memory_api.h" // Ensure printing of CUDA runtime errors to console #define CUB_STDERR #include <cub/util_allocator.cuh> #include <cub/device/device_reduce.cuh> using namespace cub; #include <stdio.h> //--------------------------------------------------------------------- // Globals, constants and typedefs //--------------------------------------------------------------------- bool g_verbose = false; // Whether to display input/output to console CachingDeviceAllocator g_allocator(true); // Caching allocator for device memory cudaError_t srtDeviceAllocate(void **devPtr, size_t size, cudaStream_t stream) { // printf("srtDeviceAllocate: %ld\n", size); return g_allocator.DeviceAllocate(devPtr, size, stream); } cudaError_t srtDeviceFree(void *devPtr) { return g_allocator.DeviceFree(devPtr); }
4a9d4a4c83641b7a7128ee8a6d089e35ff2fd78a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" /* This file is copied from https://github.com/jzbonter/mc-cnn */ extern "C" { } #define TB 128 #define DISP_MAX 256 __global__ void rho(float *x, int size, float lambda) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { x[id] = 1 - exp(-x[id] / lambda); } }
4a9d4a4c83641b7a7128ee8a6d089e35ff2fd78a.cu
#include "includes.h" /* This file is copied from https://github.com/jzbonter/mc-cnn */ extern "C" { } #define TB 128 #define DISP_MAX 256 __global__ void rho(float *x, int size, float lambda) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { x[id] = 1 - exp(-x[id] / lambda); } }
604397d89538df0fa06eb30ca851769c07d3ae62.hip
// !!! This is a file automatically generated by hipify!!! // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2016 Benoit Steiner <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #define EIGEN_TEST_NO_LONGDOUBLE #define EIGEN_TEST_NO_COMPLEX #define EIGEN_TEST_FUNC cxx11_tensor_cast_float16_cuda #define EIGEN_DEFAULT_DENSE_INDEX_TYPE int #define EIGEN_USE_GPU #if defined __CUDACC_VER__ && __CUDACC_VER__ >= 70500 #include <hip/hip_fp16.h> #endif #include "4dface-ulsTracker.h" #include <unsupported/Eigen/CXX11/Tensor> using Eigen::Tensor; void test_cuda_conversion() { Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); int num_elem = 101; Tensor<float, 1> floats(num_elem); floats.setRandom(); float* d_float = (float*)gpu_device.allocate(num_elem * sizeof(float)); Eigen::half* d_half = (Eigen::half*)gpu_device.allocate(num_elem * sizeof(Eigen::half)); float* d_conv = (float*)gpu_device.allocate(num_elem * sizeof(float)); Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_float( d_float, num_elem); Eigen::TensorMap<Eigen::Tensor<Eigen::half, 1>, Eigen::Aligned> gpu_half( d_half, num_elem); Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_conv( d_conv, num_elem); gpu_device.memcpyHostToDevice(d_float, floats.data(), num_elem*sizeof(float)); gpu_half.device(gpu_device) = gpu_float.cast<Eigen::half>(); gpu_conv.device(gpu_device) = gpu_half.cast<float>(); Tensor<float, 1> initial(num_elem); Tensor<float, 1> final(num_elem); gpu_device.memcpyDeviceToHost(initial.data(), d_float, num_elem*sizeof(float)); gpu_device.memcpyDeviceToHost(final.data(), d_conv, num_elem*sizeof(float)); gpu_device.synchronize(); for (int i = 0; i < num_elem; ++i) { VERIFY_IS_APPROX(initial(i), final(i)); } gpu_device.deallocate(d_float); gpu_device.deallocate(d_half); gpu_device.deallocate(d_conv); } void test_fallback_conversion() { int num_elem = 101; Tensor<float, 1> floats(num_elem); floats.setRandom(); Eigen::Tensor<Eigen::half, 1> halfs = floats.cast<Eigen::half>(); Eigen::Tensor<float, 1> conv = halfs.cast<float>(); for (int i = 0; i < num_elem; ++i) { VERIFY_IS_APPROX(floats(i), conv(i)); } } void test_cxx11_tensor_cast_float16_cuda() { CALL_SUBTEST(test_cuda_conversion()); CALL_SUBTEST(test_fallback_conversion()); }
604397d89538df0fa06eb30ca851769c07d3ae62.cu
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2016 Benoit Steiner <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #define EIGEN_TEST_NO_LONGDOUBLE #define EIGEN_TEST_NO_COMPLEX #define EIGEN_TEST_FUNC cxx11_tensor_cast_float16_cuda #define EIGEN_DEFAULT_DENSE_INDEX_TYPE int #define EIGEN_USE_GPU #if defined __CUDACC_VER__ && __CUDACC_VER__ >= 70500 #include <cuda_fp16.h> #endif #include "4dface-ulsTracker.h" #include <unsupported/Eigen/CXX11/Tensor> using Eigen::Tensor; void test_cuda_conversion() { Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); int num_elem = 101; Tensor<float, 1> floats(num_elem); floats.setRandom(); float* d_float = (float*)gpu_device.allocate(num_elem * sizeof(float)); Eigen::half* d_half = (Eigen::half*)gpu_device.allocate(num_elem * sizeof(Eigen::half)); float* d_conv = (float*)gpu_device.allocate(num_elem * sizeof(float)); Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_float( d_float, num_elem); Eigen::TensorMap<Eigen::Tensor<Eigen::half, 1>, Eigen::Aligned> gpu_half( d_half, num_elem); Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_conv( d_conv, num_elem); gpu_device.memcpyHostToDevice(d_float, floats.data(), num_elem*sizeof(float)); gpu_half.device(gpu_device) = gpu_float.cast<Eigen::half>(); gpu_conv.device(gpu_device) = gpu_half.cast<float>(); Tensor<float, 1> initial(num_elem); Tensor<float, 1> final(num_elem); gpu_device.memcpyDeviceToHost(initial.data(), d_float, num_elem*sizeof(float)); gpu_device.memcpyDeviceToHost(final.data(), d_conv, num_elem*sizeof(float)); gpu_device.synchronize(); for (int i = 0; i < num_elem; ++i) { VERIFY_IS_APPROX(initial(i), final(i)); } gpu_device.deallocate(d_float); gpu_device.deallocate(d_half); gpu_device.deallocate(d_conv); } void test_fallback_conversion() { int num_elem = 101; Tensor<float, 1> floats(num_elem); floats.setRandom(); Eigen::Tensor<Eigen::half, 1> halfs = floats.cast<Eigen::half>(); Eigen::Tensor<float, 1> conv = halfs.cast<float>(); for (int i = 0; i < num_elem; ++i) { VERIFY_IS_APPROX(floats(i), conv(i)); } } void test_cxx11_tensor_cast_float16_cuda() { CALL_SUBTEST(test_cuda_conversion()); CALL_SUBTEST(test_fallback_conversion()); }
dea33b33e8e73b4d25891095b09323c0e8302e26.hip
// !!! This is a file automatically generated by hipify!!! #if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; using LayoutDst = cutlass::layout::TensorNCxHWx<4>; using ThreadBlockShape = cutlass::gemm::GemmShape<16, 128, 16>; using WarpShape = cutlass::gemm::GemmShape<16, 128, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp< int8_t, 4, int32_t, int32_t, float>; using Convolution = cutlass::conv::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutDst, int32_t, LayoutDst, int32_t, cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, 1, 4, 8, true, cutlass::arch::OpMultiplyAddSaturate>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, hipStream_t stream); #pragma GCC diagnostic pop #endif
dea33b33e8e73b4d25891095b09323c0e8302e26.cu
#if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; using LayoutDst = cutlass::layout::TensorNCxHWx<4>; using ThreadBlockShape = cutlass::gemm::GemmShape<16, 128, 16>; using WarpShape = cutlass::gemm::GemmShape<16, 128, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp< int8_t, 4, int32_t, int32_t, float>; using Convolution = cutlass::conv::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutDst, int32_t, LayoutDst, int32_t, cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, 1, 4, 8, true, cutlass::arch::OpMultiplyAddSaturate>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, cudaStream_t stream); #pragma GCC diagnostic pop #endif
b69ea349553f3ad7d1c6af1a55ca015ccc6840b0.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "unmarshalling.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *input_itemsets = NULL; hipMalloc(&input_itemsets, XSIZE*YSIZE); int *tmp = NULL; hipMalloc(&tmp, XSIZE*YSIZE); int max_rows = 1; int max_cols = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( unmarshalling), dim3(gridBlock),dim3(threadBlock), 0, 0, input_itemsets,tmp,max_rows,max_cols); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( unmarshalling), dim3(gridBlock),dim3(threadBlock), 0, 0, input_itemsets,tmp,max_rows,max_cols); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( unmarshalling), dim3(gridBlock),dim3(threadBlock), 0, 0, input_itemsets,tmp,max_rows,max_cols); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
b69ea349553f3ad7d1c6af1a55ca015ccc6840b0.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "unmarshalling.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *input_itemsets = NULL; cudaMalloc(&input_itemsets, XSIZE*YSIZE); int *tmp = NULL; cudaMalloc(&tmp, XSIZE*YSIZE); int max_rows = 1; int max_cols = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); unmarshalling<<<gridBlock,threadBlock>>>(input_itemsets,tmp,max_rows,max_cols); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { unmarshalling<<<gridBlock,threadBlock>>>(input_itemsets,tmp,max_rows,max_cols); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { unmarshalling<<<gridBlock,threadBlock>>>(input_itemsets,tmp,max_rows,max_cols); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
65e3a5f0ddd35497d11a9535c33f221f8cc8dda2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<math.h> #include<time.h> #include<stdexcept> #include<iostream> #include<cstdlib> //for abs(x) #include<stdio.h> using namespace std; __global__ void findMax(int* A,int* current_max,int* mutex,unsigned int n); int main() { const int NUMBER_OF_ELEMENTS = 1024*1024*20; int* hostA = (int*)malloc(NUMBER_OF_ELEMENTS*sizeof(int)); int* hostMax = (int*)malloc(sizeof(int)); *hostMax = -1; srand(time(0)); int i,j; //initialize host vector by random elements for(i=0;i<NUMBER_OF_ELEMENTS;i++) { hostA[i] = NUMBER_OF_ELEMENTS*rand() / RAND_MAX/123; } int* deviceA,*deviceMax,*deviceMutex; hipMalloc(&deviceA,NUMBER_OF_ELEMENTS*sizeof(int)); hipMalloc(&deviceMax,sizeof(int)); hipMalloc(&deviceMutex,sizeof(int)); hipMemset(deviceMax,-1,sizeof(int)); hipMemset(deviceMutex,0,sizeof(int)); hipMemcpy(deviceA,hostA,NUMBER_OF_ELEMENTS*sizeof(int),hipMemcpyHostToDevice); //set up timing variables float gpu_elapsed_time; hipEvent_t gpu_start,gpu_stop; hipEventCreate(&gpu_start); hipEventCreate(&gpu_stop); hipEventRecord(gpu_start,0); hipLaunchKernelGGL(( findMax), dim3(256),dim3(256), 0, 0, deviceA,deviceMax,deviceMutex,NUMBER_OF_ELEMENTS); hipDeviceSynchronize(); hipMemcpy(hostMax,deviceMax,sizeof(int),hipMemcpyDeviceToHost); hipEventRecord(gpu_stop, 0); hipEventSynchronize(gpu_stop); hipEventElapsedTime(&gpu_elapsed_time, gpu_start, gpu_stop); hipEventDestroy(gpu_start); hipEventDestroy(gpu_stop); cout<<"Answer by CUDA for MAX is = "<<*hostMax<<endl; std::cout<<"The gpu took: "<<gpu_elapsed_time<<" milli-seconds"<<std::endl; clock_t cpu_start = clock(); int maxx = -1; for(int i=0;i<NUMBER_OF_ELEMENTS;i++) { if(hostA[i]>maxx) maxx = hostA[i]; } clock_t cpu_stop = clock(); clock_t cpu_elapsed_time = 1000*(cpu_stop - cpu_start)/CLOCKS_PER_SEC; cout<<"Expected max value is = "<<maxx<<endl; std::cout<<"The cpu took: "<<cpu_elapsed_time<<" milli-seconds"<<std::endl; hipFree(deviceA); delete[] hostA; return hipDeviceSynchronize(); } __global__ void findMax(int* A,int* current_max,int* mutex,unsigned int n) { //printf("threadIdx.x = %d and blockIdx = %d and gridDim.x = %d\n",threadIdx.x,blockIdx.x,gridDim.x); unsigned int index = threadIdx.x + blockIdx.x*blockDim.x; unsigned int stride = gridDim.x*blockDim.x; unsigned int offset = 0; __shared__ int cache[256]; int temp = -1; while(index+offset<n) { temp = fmaxf(temp,A[index+offset]); offset+=stride; } cache[threadIdx.x]=temp; __syncthreads(); //reduction //printf("blockDim.x = %d\n",blockDim.x/2); unsigned int i=blockDim.x/2; while(i!=0) { if(threadIdx.x<i) { cache[threadIdx.x] = fmaxf(cache[threadIdx.x],cache[threadIdx.x+i]); } __syncthreads(); i/=2; } if(threadIdx.x ==0) while(atomicCAS(mutex,0,1)!=0); *current_max = fmaxf(*current_max,cache[0]); atomicExch(mutex,0); }
65e3a5f0ddd35497d11a9535c33f221f8cc8dda2.cu
#include<math.h> #include<time.h> #include<stdexcept> #include<iostream> #include<cstdlib> //for abs(x) #include<stdio.h> using namespace std; __global__ void findMax(int* A,int* current_max,int* mutex,unsigned int n); int main() { const int NUMBER_OF_ELEMENTS = 1024*1024*20; int* hostA = (int*)malloc(NUMBER_OF_ELEMENTS*sizeof(int)); int* hostMax = (int*)malloc(sizeof(int)); *hostMax = -1; srand(time(0)); int i,j; //initialize host vector by random elements for(i=0;i<NUMBER_OF_ELEMENTS;i++) { hostA[i] = NUMBER_OF_ELEMENTS*rand() / RAND_MAX/123; } int* deviceA,*deviceMax,*deviceMutex; cudaMalloc(&deviceA,NUMBER_OF_ELEMENTS*sizeof(int)); cudaMalloc(&deviceMax,sizeof(int)); cudaMalloc(&deviceMutex,sizeof(int)); cudaMemset(deviceMax,-1,sizeof(int)); cudaMemset(deviceMutex,0,sizeof(int)); cudaMemcpy(deviceA,hostA,NUMBER_OF_ELEMENTS*sizeof(int),cudaMemcpyHostToDevice); //set up timing variables float gpu_elapsed_time; cudaEvent_t gpu_start,gpu_stop; cudaEventCreate(&gpu_start); cudaEventCreate(&gpu_stop); cudaEventRecord(gpu_start,0); findMax<<<256,256>>>(deviceA,deviceMax,deviceMutex,NUMBER_OF_ELEMENTS); cudaDeviceSynchronize(); cudaMemcpy(hostMax,deviceMax,sizeof(int),cudaMemcpyDeviceToHost); cudaEventRecord(gpu_stop, 0); cudaEventSynchronize(gpu_stop); cudaEventElapsedTime(&gpu_elapsed_time, gpu_start, gpu_stop); cudaEventDestroy(gpu_start); cudaEventDestroy(gpu_stop); cout<<"Answer by CUDA for MAX is = "<<*hostMax<<endl; std::cout<<"The gpu took: "<<gpu_elapsed_time<<" milli-seconds"<<std::endl; clock_t cpu_start = clock(); int maxx = -1; for(int i=0;i<NUMBER_OF_ELEMENTS;i++) { if(hostA[i]>maxx) maxx = hostA[i]; } clock_t cpu_stop = clock(); clock_t cpu_elapsed_time = 1000*(cpu_stop - cpu_start)/CLOCKS_PER_SEC; cout<<"Expected max value is = "<<maxx<<endl; std::cout<<"The cpu took: "<<cpu_elapsed_time<<" milli-seconds"<<std::endl; cudaFree(deviceA); delete[] hostA; return cudaDeviceSynchronize(); } __global__ void findMax(int* A,int* current_max,int* mutex,unsigned int n) { //printf("threadIdx.x = %d and blockIdx = %d and gridDim.x = %d\n",threadIdx.x,blockIdx.x,gridDim.x); unsigned int index = threadIdx.x + blockIdx.x*blockDim.x; unsigned int stride = gridDim.x*blockDim.x; unsigned int offset = 0; __shared__ int cache[256]; int temp = -1; while(index+offset<n) { temp = fmaxf(temp,A[index+offset]); offset+=stride; } cache[threadIdx.x]=temp; __syncthreads(); //reduction //printf("blockDim.x = %d\n",blockDim.x/2); unsigned int i=blockDim.x/2; while(i!=0) { if(threadIdx.x<i) { cache[threadIdx.x] = fmaxf(cache[threadIdx.x],cache[threadIdx.x+i]); } __syncthreads(); i/=2; } if(threadIdx.x ==0) while(atomicCAS(mutex,0,1)!=0); *current_max = fmaxf(*current_max,cache[0]); atomicExch(mutex,0); }
54d27e0c47f896e623a136a980c559643c42fbde.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kronmult.cuh" #include <device_launch_parameters.h> #include <type_traits> /* * computes number^power for integers * does not care about performances * does not use std::pow as it does an implicit float conversion that could lead to rounding errors for large * numbers */ __host__ int pow_int(int const number, int const power) { if (power == 0) return 1; return number * pow_int(number, power - 1); } /* * converts row and col indices into a single index for a matrix stored in col-major * `stride` is usually the number of rows of the matrix */ __device__ __forceinline__ constexpr int colmajor(int const row, int const col, int const stride) { return row + col * stride; } /* * computes output = input^T * * `input` is a `matrix_size` by `matrix_size` square matrix of stride `input_stride` * `output` is a `matrix_size` by `matrix_size` square matrix of stride `matrix_size` * * WARNING: the matrices are assumed to be stored in col-major order */ template<typename T> __device__ void transpose(T const input[], T output[], int const matrix_size, int const input_stride) { for (int i = threadIdx.x; i < matrix_size * matrix_size; i += blockDim.x) { int const c = i / matrix_size; int const r = i - c * matrix_size; output[colmajor(r, c, matrix_size)] = input[colmajor(c, r, input_stride)]; } } /* * Computes Y = X^T * M^T * * X is a `size_M` by `nb_col_X` matrix of stride `size_M` * M_transposed is a `size_M` by `size_M` matrix of stride `size_M` that contains a precomputed M^T * Y is a `nb_col_X` by `size_M` matrix of stride `nb_col_X` * * WARNING: the matrices are assumed to be stored in col-major order */ template<typename T> __device__ void multiply_transpose(T const X[], int const nb_col_X, T const M_transposed[], int const size_M, T Y[]) { // strided loop, each thread threadIdx.x manages the inputs i such that threadIdx.x % t==0 for (int i = threadIdx.x; i < nb_col_X * size_M; i += blockDim.x) { // extracts the column and row number for the current thread int const colX = i / size_M; int const rowM = i - colX * size_M; // computes the dot product to fill the [colX,rowM] cell of the matrix T dotprod = 0.; for (int k = 0; k < size_M; k++) { dotprod += X[colmajor(k, colX, size_M)] * M_transposed[colmajor(k, rowM, size_M)]; } // this sync is there to synchronise the threads for significantly improved performance in float // it does not impact correctness if constexpr(std::is_same<float, T>::value) __syncthreads(); Y[colmajor(colX, rowM, nb_col_X)] = dotprod; } } /* * Computes output += kron(matrix_list) * input while insuring that the addition to output is thread-safe * * `matrix_list` is an array containing pointers to `matrix_number` square matrices of size `matrix_size` by * `matrix_size` and stride `matrix_stride` `input` is a `size_input` (`matrix_size`^`matrix_number`) elements * vector `output` is a `size_input` elements vector, to which the output of the multiplication will be added * `workspace` is a `size_input` elements vector, to be used as workspace * `transpose_workspace` is a vector of size `matrix_size`*`matrix_size` to store transposed matrices * temporarily * * WARNINGS: * - `input`, `workspace` and `transpose_workspace` will be used as temporary workspaces and thus modified * - the matrices are assumed to be stored in col-major order * - the sizes are assumed to be correct */ template<typename T> __device__ void cuda_kronmult(int const matrix_count, int const matrix_size, T const *const matrix_list[], int const matrix_stride, T input[], int const size_input, T output[], T workspace[], T transpose_workspace[]) { // how many column should `input` have for the multiplications to be legal int const nb_col_input = size_input / matrix_size; // iterates on the matrices from last to first for (int i = matrix_count - 1; i >= 0; i--) { // transpose the matrix to get a better memory coalescing T const *const matrix = matrix_list[i]; transpose(matrix, transpose_workspace, matrix_size, matrix_stride); __syncthreads(); // performs the multiplication to consume the matrix multiply_transpose<T>(input, nb_col_input, transpose_workspace, matrix_size, workspace); __syncthreads(); // swap `input` and `workspace` such that `input` contains once again the input // note that, while they have the same size flattened, the shape (nb_columns and nb_rows) of `input` // and `workspace` are different this is on purpose and equivalent to a reshape operation that is // actually needed by the algorithm T *temp = input; input = workspace; workspace = temp; } // adds result to output in a thread-safe way // strided loop, each thread threadIdx.x manages the input i such that i % threadIdx.x==0 for (int i = threadIdx.x; i < size_input; i += blockDim.x) { atomicAdd(&output[i], input[i]); } } /* * each block gets a single batch element to process * * computes the current batch element * finds the corresponding inputs * and calls kronmult on them */ template<typename T> __global__ void cuda_kronmult_batchelement(int const matrix_count, int const matrix_size, T const *const matrix_list_batched[], int const matrix_stride, T *input_batched[], int const size_input, T *output_batched[], T *workspace_batched[], int const nb_batch) { // each block corresponds to a single batch element int const batchId = blockIdx.x; // gets the inputs for a given batch element T const *const *matrix_list = &matrix_list_batched[batchId * matrix_count]; T *input = input_batched[batchId]; T *output = output_batched[batchId]; T *workspace = workspace_batched[batchId]; // uses a thread to allocates the transpose workspace // in shared memory for improved performances __shared__ T *transpose_workspace; if (threadIdx.x == 0) transpose_workspace = new T[matrix_size * matrix_size]; __syncthreads(); // does the kronmult computations cuda_kronmult<T>(matrix_count, matrix_size, matrix_list, matrix_stride, input, size_input, output, workspace, transpose_workspace); // frees the tranpose workspace memory __syncthreads(); if (threadIdx.x == 0) delete[] transpose_workspace; } /* * calls the cuda kernel with the proper number of blocks and threads * we expect the inputs to already be on the GPU */ template<typename T> __host__ hipError_t cuda_kronmult_batched(int const matrix_count, int const matrix_size, T const *const matrix_list_batched[], int const matrix_stride, T *input_batched[], T *output_batched[], T *workspace_batched[], int const nb_batch) { // numbers of elements in the input vector int const size_input = pow_int(matrix_size, matrix_count); // each block will take care of a single batch element // the threads within a block will loop over input_size int deviceId; hipGetDevice(&deviceId); int threadsPerBlock; hipDeviceGetAttribute(&threadsPerBlock, hipDeviceAttributeMaxThreadsPerBlock, deviceId); if (size_input < threadsPerBlock) threadsPerBlock = size_input; // parallelize over batch elements hipLaunchKernelGGL(( cuda_kronmult_batchelement), dim3(nb_batch), dim3(threadsPerBlock), 0, 0, matrix_count, matrix_size, matrix_list_batched, matrix_stride, input_batched, size_input, output_batched, workspace_batched, nb_batch); // waits for kernel to finish and returns the error code return hipDeviceSynchronize(); } /* * double specialization of kronmult_batched */ template<> __host__ hipError_t kronmult_batched<double>(int const matrix_count, int const matrix_size, double const *const matrix_list_batched[], int const matrix_stride, double *input_batched[], double *output_batched[], double *workspace_batched[], int const nb_batch) { return cuda_kronmult_batched(matrix_count, matrix_size, matrix_list_batched, matrix_stride, input_batched, output_batched, workspace_batched, nb_batch); } /* * float specialization of kronmult_batched */ template<> __host__ hipError_t kronmult_batched<float>(int const matrix_count, int const matrix_size, float const *const matrix_list_batched[], int const matrix_stride, float *input_batched[], float *output_batched[], float *workspace_batched[], int const nb_batch) { return cuda_kronmult_batched(matrix_count, matrix_size, matrix_list_batched, matrix_stride, input_batched, output_batched, workspace_batched, nb_batch); }
54d27e0c47f896e623a136a980c559643c42fbde.cu
#include "kronmult.cuh" #include <device_launch_parameters.h> #include <type_traits> /* * computes number^power for integers * does not care about performances * does not use std::pow as it does an implicit float conversion that could lead to rounding errors for large * numbers */ __host__ int pow_int(int const number, int const power) { if (power == 0) return 1; return number * pow_int(number, power - 1); } /* * converts row and col indices into a single index for a matrix stored in col-major * `stride` is usually the number of rows of the matrix */ __device__ __forceinline__ constexpr int colmajor(int const row, int const col, int const stride) { return row + col * stride; } /* * computes output = input^T * * `input` is a `matrix_size` by `matrix_size` square matrix of stride `input_stride` * `output` is a `matrix_size` by `matrix_size` square matrix of stride `matrix_size` * * WARNING: the matrices are assumed to be stored in col-major order */ template<typename T> __device__ void transpose(T const input[], T output[], int const matrix_size, int const input_stride) { for (int i = threadIdx.x; i < matrix_size * matrix_size; i += blockDim.x) { int const c = i / matrix_size; int const r = i - c * matrix_size; output[colmajor(r, c, matrix_size)] = input[colmajor(c, r, input_stride)]; } } /* * Computes Y = X^T * M^T * * X is a `size_M` by `nb_col_X` matrix of stride `size_M` * M_transposed is a `size_M` by `size_M` matrix of stride `size_M` that contains a precomputed M^T * Y is a `nb_col_X` by `size_M` matrix of stride `nb_col_X` * * WARNING: the matrices are assumed to be stored in col-major order */ template<typename T> __device__ void multiply_transpose(T const X[], int const nb_col_X, T const M_transposed[], int const size_M, T Y[]) { // strided loop, each thread threadIdx.x manages the inputs i such that threadIdx.x % t==0 for (int i = threadIdx.x; i < nb_col_X * size_M; i += blockDim.x) { // extracts the column and row number for the current thread int const colX = i / size_M; int const rowM = i - colX * size_M; // computes the dot product to fill the [colX,rowM] cell of the matrix T dotprod = 0.; for (int k = 0; k < size_M; k++) { dotprod += X[colmajor(k, colX, size_M)] * M_transposed[colmajor(k, rowM, size_M)]; } // this sync is there to synchronise the threads for significantly improved performance in float // it does not impact correctness if constexpr(std::is_same<float, T>::value) __syncthreads(); Y[colmajor(colX, rowM, nb_col_X)] = dotprod; } } /* * Computes output += kron(matrix_list) * input while insuring that the addition to output is thread-safe * * `matrix_list` is an array containing pointers to `matrix_number` square matrices of size `matrix_size` by * `matrix_size` and stride `matrix_stride` `input` is a `size_input` (`matrix_size`^`matrix_number`) elements * vector `output` is a `size_input` elements vector, to which the output of the multiplication will be added * `workspace` is a `size_input` elements vector, to be used as workspace * `transpose_workspace` is a vector of size `matrix_size`*`matrix_size` to store transposed matrices * temporarily * * WARNINGS: * - `input`, `workspace` and `transpose_workspace` will be used as temporary workspaces and thus modified * - the matrices are assumed to be stored in col-major order * - the sizes are assumed to be correct */ template<typename T> __device__ void cuda_kronmult(int const matrix_count, int const matrix_size, T const *const matrix_list[], int const matrix_stride, T input[], int const size_input, T output[], T workspace[], T transpose_workspace[]) { // how many column should `input` have for the multiplications to be legal int const nb_col_input = size_input / matrix_size; // iterates on the matrices from last to first for (int i = matrix_count - 1; i >= 0; i--) { // transpose the matrix to get a better memory coalescing T const *const matrix = matrix_list[i]; transpose(matrix, transpose_workspace, matrix_size, matrix_stride); __syncthreads(); // performs the multiplication to consume the matrix multiply_transpose<T>(input, nb_col_input, transpose_workspace, matrix_size, workspace); __syncthreads(); // swap `input` and `workspace` such that `input` contains once again the input // note that, while they have the same size flattened, the shape (nb_columns and nb_rows) of `input` // and `workspace` are different this is on purpose and equivalent to a reshape operation that is // actually needed by the algorithm T *temp = input; input = workspace; workspace = temp; } // adds result to output in a thread-safe way // strided loop, each thread threadIdx.x manages the input i such that i % threadIdx.x==0 for (int i = threadIdx.x; i < size_input; i += blockDim.x) { atomicAdd(&output[i], input[i]); } } /* * each block gets a single batch element to process * * computes the current batch element * finds the corresponding inputs * and calls kronmult on them */ template<typename T> __global__ void cuda_kronmult_batchelement(int const matrix_count, int const matrix_size, T const *const matrix_list_batched[], int const matrix_stride, T *input_batched[], int const size_input, T *output_batched[], T *workspace_batched[], int const nb_batch) { // each block corresponds to a single batch element int const batchId = blockIdx.x; // gets the inputs for a given batch element T const *const *matrix_list = &matrix_list_batched[batchId * matrix_count]; T *input = input_batched[batchId]; T *output = output_batched[batchId]; T *workspace = workspace_batched[batchId]; // uses a thread to allocates the transpose workspace // in shared memory for improved performances __shared__ T *transpose_workspace; if (threadIdx.x == 0) transpose_workspace = new T[matrix_size * matrix_size]; __syncthreads(); // does the kronmult computations cuda_kronmult<T>(matrix_count, matrix_size, matrix_list, matrix_stride, input, size_input, output, workspace, transpose_workspace); // frees the tranpose workspace memory __syncthreads(); if (threadIdx.x == 0) delete[] transpose_workspace; } /* * calls the cuda kernel with the proper number of blocks and threads * we expect the inputs to already be on the GPU */ template<typename T> __host__ cudaError cuda_kronmult_batched(int const matrix_count, int const matrix_size, T const *const matrix_list_batched[], int const matrix_stride, T *input_batched[], T *output_batched[], T *workspace_batched[], int const nb_batch) { // numbers of elements in the input vector int const size_input = pow_int(matrix_size, matrix_count); // each block will take care of a single batch element // the threads within a block will loop over input_size int deviceId; cudaGetDevice(&deviceId); int threadsPerBlock; cudaDeviceGetAttribute(&threadsPerBlock, cudaDevAttrMaxThreadsPerBlock, deviceId); if (size_input < threadsPerBlock) threadsPerBlock = size_input; // parallelize over batch elements cuda_kronmult_batchelement<<<nb_batch, threadsPerBlock>>>(matrix_count, matrix_size, matrix_list_batched, matrix_stride, input_batched, size_input, output_batched, workspace_batched, nb_batch); // waits for kernel to finish and returns the error code return cudaDeviceSynchronize(); } /* * double specialization of kronmult_batched */ template<> __host__ cudaError kronmult_batched<double>(int const matrix_count, int const matrix_size, double const *const matrix_list_batched[], int const matrix_stride, double *input_batched[], double *output_batched[], double *workspace_batched[], int const nb_batch) { return cuda_kronmult_batched(matrix_count, matrix_size, matrix_list_batched, matrix_stride, input_batched, output_batched, workspace_batched, nb_batch); } /* * float specialization of kronmult_batched */ template<> __host__ cudaError kronmult_batched<float>(int const matrix_count, int const matrix_size, float const *const matrix_list_batched[], int const matrix_stride, float *input_batched[], float *output_batched[], float *workspace_batched[], int const nb_batch) { return cuda_kronmult_batched(matrix_count, matrix_size, matrix_list_batched, matrix_stride, input_batched, output_batched, workspace_batched, nb_batch); }
5b7a9aa0355a7949b117a6052b83fc10039a9288.hip
// !!! This is a file automatically generated by hipify!!! /* * GPUb1piAngAmp_kernel.cu * */ #include <stdio.h> #include "hip/hip_runtime.h" // Original headers were scattered around file system #include "GPUManager/GPUCustomTypes.h" #include "GPUManager/CUDA-Complex.cuh" #include "GPUUtils/lorentzBoost.cuh" #include "GPUUtils/threeVector.cuh" #include "GPUUtils/wignerD.cuh" #include "GPUUtils/clebsch.cuh" #include "AMPTOOLS_AMPS/breakupMomentum.cuh" #include "AMPTOOLS_AMPS/barrierFactor.cuh" // Test headers #if 0 #include "GPUCustomTypes.h" #include "CUDA-Complex.cuh" #include "lorentzBoost.cuh" #include "threeVector.cuh" #include "wignerD.cuh" #include "clebsch.cuh" #include "breakupMomentum.cuh" #include "barrierFactor.cuh" #endif #define ADD4(a,b) { a[0]+b[0], a[1]+b[1], a[2]+b[2], a[3]+b[3] } #define MASS(v) (G_SQRT(v[0]*v[0]-v[1]*v[1]-v[2]*v[2]-v[3]*v[3])) #define Nterm(J) (G_SQRT((2*J+1)/(4*M_PI))) // Macro to ease definition of loops #define LOOP(INDEX,START,END,INC) for (int INDEX=START;INDEX<=END;INDEX+=INC) static __device__ void //note: 4-vector input presumed rotateZ( GDouble* v, GDouble phi ){ GDouble sinphi = G_SIN(phi); GDouble cosphi = G_COS(phi); GDouble tx; tx = v[1] * cosphi - v[2] * sinphi; v[2] = v[2] * cosphi + v[1] * sinphi; v[1] = tx; } static __device__ void //note: 4-vector input presumed rotateY ( GDouble* v, GDouble theta) { double sinphi = G_SIN(theta); double cosphi = G_COS(theta); double tz; tz = v[3] * cosphi - v[1] * sinphi; v[1] = v[1] * cosphi + v[3] * sinphi; v[3] = tz; } static __device__ GDouble //note: 3-vector input presumed theta( GDouble* pv ){ GDouble r= G_SQRT(pv[0]*pv[0] + pv[1]*pv[1]); return G_ATAN2( r , pv[2] ); } static __device__ void MoveToRF(GDouble *parent, GDouble *daughter) { GDouble *par3vec=parent+1; rotateZ( daughter , -phi(par3vec) ); rotateY( daughter , -theta(par3vec) ); GDouble beta[]={0,0, -G_SQRT(dot(par3vec,par3vec))/parent[0]}; //** (x) Might this be bootToRest??? // beta is defined to boost to parent's rest frame // I just adapted GPUUtil boost fcn with vector beta input boost( daughter , beta ); } static __device__ WCUComplex BreitWigner_loc(GDouble m0, GDouble Gamma0, int L, GDouble *P1, GDouble *P2) { GDouble Ptot[4] = ADD4(P1, P2); GDouble m = MASS(Ptot); GDouble mass1 = MASS(P1); GDouble mass2 = MASS(P2); // assert positive breakup momenta GDouble q0 = fabs( breakupMomentum(m0, mass1, mass2) ); GDouble q = fabs( breakupMomentum(m, mass1, mass2) ); //printf("BW: (%5.3f, %5.3f, %d) m=%6.4f m1=%6.4f m2=%6.4f q=%6.4f q0=%6.4f\n", // m0,Gamma0,L,m,mass1,mass2,q,q0); GDouble F0 = L==0 ? 1.0 : barrierFactor(q0, L); GDouble F = L==0 ? 1.0 : barrierFactor(q, L); GDouble width_coef=Gamma0*(m0/m); //GDouble qq0=q/q0; //GDouble width_qdep = (L==0 ? qq0 : (L==1 ? qq0*qq0*qq0 : pow(qq0,2*L+1)))*((F*F)/(F0*F0)); GDouble width_qdep = q/q0 * (F*F)/(F0*F0); //GDouble num_qdep = (L==0 ? q : (L==1 ? q*q*q : pow(q,2*L+1)))*(F*F); GDouble num_qdep = q*(F*F); GDouble width = width_coef * width_qdep; //complex<GDouble> bwtop(m0 * width, 0.0 ); WCUComplex bwtop = { G_SQRT(m0*width_coef) * num_qdep, 0 }; WCUComplex bwbottom = { m0*m0 - m*m , -1.0 * ( m0 * width ) }; return ( bwtop / bwbottom ); } // JR 2012-07-29 // Set all Amplitudes to 0 on the Device. This is needed now because we only // calculate amplitudes for those momenta sets with non-zero amplitudes. If // this function were not performed, amplitudes which are supposed to be zero will // be undefined. __global__ void Setzero_kernel(WCUComplex *pcDevAmp, int iNEvents) { int iEvent = GPU_THIS_EVENT; if (iEvent>=iNEvents) return; pcDevAmp[iEvent].m_dRe = 0.0; pcDevAmp[iEvent].m_dIm = 0.0; } // JR 2012-07-29 // Perform beginning of b1pi calculation, just enough to determine those // amplitude which will be set to zero. Amplitudes are set to (1,0) if // they are not zero. These amplitudes will need set to their correct // values on the call to GPUb1piAngAmp_kernel(). __global__ void Pretest_kernel( GPU_AMP_PROTO , int polBeam, GDouble polFrac, int J_X, int Par_X, int L_X, int I_X, int epsilon_R, int Iz_b1, int Iz_pi, GDouble u_rho_1, GDouble u_rho_3, GDouble u_omega_1, GDouble u_omega_3, GDouble u_b1_0, GDouble u_b1_2, GDouble G0_omega, GDouble G0_b1, bool orthocheck) { // Calculate event for this thread. int iEvent = GPU_THIS_EVENT; WCUComplex CZero = { 0, 0 }; WCUComplex COne = { 1, 0 }; int pol=(polBeam==1 ? +1 : -1); // y and x-pol. respectively //** (x) This statement can be evaluated at top of function? if (J_X==0 && Par_X*pol*epsilon_R==-1) { pcDevAmp[iEvent] = CZero; return; } GDouble m0_omega = 0.783; GDouble m0_b1 = 1.223; bool isZero; // Copy four-vectors for this thread from global memory. GDouble b1s_pi [4] = GPU_P4(3); GDouble omegas_pi[4] = GPU_P4(4); GDouble rhos_pim [4] = GPU_P4(5); GDouble rhos_pip [4] = GPU_P4(6); // Make four-vector sums GDouble rho [4] = ADD4(rhos_pip, rhos_pim ); GDouble omega [4] = ADD4(rho, omegas_pi); GDouble b1 [4] = ADD4(omega, b1s_pi); // Store mass of b1; for other vectors we can calculate mass on the fly. GDouble b1mass = MASS(b1); // Is this term zero? isZero = MASS(rho)+0.135 > m0_omega+3*G0_omega; isZero |= fabs(MASS(omega)-m0_omega) > 3*G0_omega; isZero |= fabs(b1mass-m0_b1) > 3*G0_b1; isZero |= b1mass < (m0_omega - 3*G0_omega); if (isZero) pcDevAmp[iEvent] = CZero; else pcDevAmp[iEvent] = COne; } // JR 2012-07-29 // Calculate amplitudes only for those momenta sets with known non-zero // amplitudes. __global__ void GPUb1piAngAmp_kernel( int cnt, // GPU_AMP_PROTO , GDouble* pfDevData, WCUComplex* pcDevAmp, int* piDevPerm, int iNParticles, int iNEvents, int polBeam, GDouble polFrac, int J_X, int Par_X, int L_X, int I_X, int epsilon_R, int Iz_b1, int Iz_pi, GDouble u_rho_1, GDouble u_rho_3, GDouble u_omega_1, GDouble u_omega_3, GDouble u_b1_0, GDouble u_b1_2, GDouble G0_omega, GDouble G0_b1, bool orthocheck) { // Calculate event for this thread. // int iEvent = GPU_THIS_EVENT; // JR 2012-07-29 // NOTE: This vesrsion of this function is called with different settings // for threadIdx, blockIdx and blockDim than for the original version. // The next line relects that change. int iEvent = threadIdx.x + blockIdx.x * blockDim.x; // Skip this event index if it overruns number of events. if (iEvent>=iNEvents) return; WCUComplex CZero = { 0, 0 }; WCUComplex i = { 0, 1 }; WCUComplex COne = { 1, 0 }; int pol=(polBeam==1 ? +1 : -1); // y and x-pol. respectively if (J_X==0 && Par_X*pol*epsilon_R==-1) { pcDevAmp[iEvent] = CZero; return; } int m_X; GDouble u_rho, u_omega, u_b1; GDouble InvSqrt2 = 1.0/G_SQRT(2.0); GDouble m0_rho = 0.775; GDouble G0_rho = 0.149; GDouble m0_omega = 0.783; GDouble m0_b1 = 1.223; bool useCutoff = true; bool isZero; // Copy four-vectors for this thread from global memory. // 2012-05-19 JR rhos_pip0,omega0,rho0 added for use // in BreitWigner_loc() below. GDouble beam [4] = GPU_P4(0); GDouble recoil [4] = GPU_P4(1); GDouble Xs_pi [4] = GPU_P4(2); GDouble b1s_pi [4] = GPU_P4(3); GDouble omegas_pi[4] = GPU_P4(4); GDouble rhos_pim [4] = GPU_P4(5); GDouble rhos_pip [4] = GPU_P4(6); GDouble rhos_pip0[4] = GPU_P4(6); // Make four-vector sums GDouble rho [4] = ADD4(rhos_pip, rhos_pim ); GDouble rho0 [4] = ADD4(rhos_pip, rhos_pim ); GDouble omega [4] = ADD4(rho, omegas_pi); GDouble omega0[4] = ADD4(rho, omegas_pi); GDouble b1 [4] = ADD4(omega, b1s_pi); // Store mass of b1; for other vectors we can calculate mass on the fly. GDouble b1mass = MASS(b1); // Is this term zero? if (useCutoff) { isZero = MASS(rho)+0.135 > m0_omega+3*G0_omega; isZero |= fabs(MASS(omega)-m0_omega) > 3*G0_omega; isZero |= fabs(b1mass-m0_b1) > 3*G0_b1; isZero |= b1mass < (m0_omega - 3*G0_omega); // Zero amplitude if (isZero) { pcDevAmp[iEvent] = CZero; return; } } // Continue to Calculate amplitude GDouble X[4] = ADD4(b1, Xs_pi); GDouble q = breakupMomentum( MASS(X), b1mass, MASS(Xs_pi) ); GDouble alpha = phi( &(recoil[1]) ); // NOTE: Values of beam and recoil are changed below. boostToRest (beam, X); boostToRest (recoil, X); // Define new coordinate system with // - beam parallel to z direction // - recoil in the x,z plain (i.e., y is normal to recoil and beam) // - y is normal to beam and recoil. GDouble zGJ[3] = { beam[1], beam[2], beam[3] }; makeUnit( zGJ ); //** (x) Be care of cross order, need to check this // 2012-05-19 JR - Invert yGJ to make cross come out right. // GDouble yGJ[3] = { recoil[1], recoil[2], recoil[3] }; GDouble yGJ[3] = { -recoil[1], -recoil[2], -recoil[3] }; cross( yGJ, zGJ ); makeUnit( yGJ ); GDouble xGJ[3] = { yGJ[0], yGJ[1], yGJ[2] }; cross( xGJ, zGJ ); //particles to rest frames of their parents boostToRest (b1, X); boostToRest (omega, X); boostToRest (rho, X); boostToRest (rhos_pip, X); // Note that in this form of the cascade of boosts, we are not // saving the 4-vecs in their intermediate RF, but going sequentially // straight to their immediate parent's RF. // Make sure to verify that the intermediares were not in fact needed // and that we didn't break anything with this simplification. MoveToRF(b1,omega); MoveToRF(b1,rho); MoveToRF(omega,rho); MoveToRF(b1,rhos_pip); MoveToRF(omega,rhos_pip); MoveToRF(rho,rhos_pip); GDouble *b1_3vec=b1+1; GDouble ang_b1[]={dot(b1_3vec, xGJ), dot(b1_3vec, yGJ), dot(b1_3vec, zGJ)}; GDouble b1_XRF_cosTheta = cosTheta(ang_b1); GDouble b1_XRF_phi = phi(ang_b1); GDouble rho_omegaRF_cosTheta = cosTheta(rho+1); GDouble rho_omegaRF_phi = phi(rho+1); GDouble rhos_pip_rhoRF_cosTheta = cosTheta(rhos_pip+1); GDouble rhos_pip_rhoRF_phi = phi(rhos_pip+1); GDouble omega_b1RF_cosTheta = cosTheta(omega+1); GDouble omega_b1RF_phi = phi(omega+1); /* List_l_R: 0 1 List_J_rho: 1 List_l_rho: -1 1 List_L_omega: 1 List_l_omega: -1 0 1 List_L_b1: 0 2 List_l_b1: -1 0 1 */ // SUMMATION GUIDE: // notation meant to resemble TeX symbols in derivation // exception: pol = \epsilon_\gamma // l -> lambda, indicating helicity // u_[particle](q.n.) -> amplitude strength coefficient int l_R_lim = J_X + 1; //shortcut: CB(L_X, J_b1, 0, l_b1 ; J_X, l_b1) vanishes when // = CB(1, 1, 0, 0 ; 1, 0), so omit l_b1=0 when J_X=L_X=1 int l_b1_inc = L_X==1 && J_X==1 ? 2 : 1; // restrict omega decay to just p wave int L_omega_lim = 1; // set to 3 to allow F wave int L_Rsign_lim; GDouble cosAlpha=G_COS(alpha), sinAlpha=G_SIN(alpha); WCUComplex expFact = {cosAlpha, sinAlpha}; WCUComplex expFact_conj = {cosAlpha, -sinAlpha}; WCUComplex ThelSum = { 0 , 0 }; // Setup dependent loop limits LOOP(l_gamma, -1, 1, 2) { LOOP(l_R, 0, l_R_lim, 1) { if(l_R==0 && epsilon_R==-1) continue; // LOOP(l_R, (1-epsilon_R)/2, l_R_lim, 1) // if this still causes some GPU core // misalignment, try setting lower bound back to zero and tacking on // * !(l_R==0 && epsilon_R==-1) // to the long list of factors multiplying Thelsum below -IS //summing positive and negative helicity terms of R's reflectivity state L_Rsign_lim = l_R > 0 ? -1 : +1; // Switch order of loop, because LOOP can only handle increasing increments // LOOP(l_Rsign, 1, L_Rsign_lim, -2) LOOP(l_Rsign, L_Rsign_lim, 1, 2) { m_X = l_gamma - l_Rsign * l_R; if (m_X==0) { //testing for cancelation in |J 0>+pol*P*epsilon_R*(-1)^J|J 0> if(Par_X*pol*epsilon_R == (J_X % 2 ==0 ? -1:+1)) continue; } else { //enforcing that the selected projection <= vector magnitude if( abs(m_X)>J_X) continue; } WCUComplex l_b1DepTerm = {0,0}; LOOP(l_b1, -1,1,l_b1_inc) { WCUComplex L_b1DepTerm = {0,0}; LOOP(L_b1,0,2,2) { WCUComplex l_omegaDepTerm = {0,0}; // 2012-05-19 JR Fix l_omega loop // LOOP(l_omega,-1,0,1) LOOP(l_omega,-1,1,1) { WCUComplex L_omegaDepTerm = {0,0}; LOOP(L_omega, 1, L_omega_lim, 2) { WCUComplex J_rhoDepTerm = {0,0}; LOOP(J_rho, 1, L_omega_lim, 2) { //enforces triang. ineq. betw. J_omega=1, J_rho and L_omega // in effect, L_omega and J_rho take identical values if( abs(J_rho-L_omega) > 1) continue; WCUComplex l_rhoDepTerm = {0,0}; LOOP(l_rho,-1,1,1) { //shortcut CB(1,1,0,0;1,0)=0 if(L_omega==1 && J_rho==1 && l_rho==0) continue; l_rhoDepTerm += Conjugate(wignerD(1, l_omega, l_rho, rho_omegaRF_cosTheta, rho_omegaRF_phi)) * clebsch(L_omega, 0, J_rho, l_rho, 1, l_rho) * Y(J_rho, l_rho, rhos_pip_rhoRF_cosTheta, rhos_pip_rhoRF_phi); } u_rho = J_rho==1 ? u_rho_1 : (J_rho==3 ? u_rho_3 : 0); J_rhoDepTerm += u_rho * l_rhoDepTerm * BreitWigner_loc(m0_rho,G0_rho, J_rho,rhos_pip0,rhos_pim); } J_rhoDepTerm *= BreitWigner_loc(m0_omega, G0_omega, L_omega, omegas_pi,rho0); u_omega = L_omega==1 ? u_omega_1 : (L_omega==3 ? u_omega_3 : 0); L_omegaDepTerm += u_omega * J_rhoDepTerm * Nterm(L_omega); } l_omegaDepTerm += L_omegaDepTerm * clebsch(L_b1, 0, 1, l_omega, 1, l_omega) * Conjugate(wignerD(1, l_b1, l_omega, omega_b1RF_cosTheta, omega_b1RF_phi)); } l_omegaDepTerm *= BreitWigner_loc(m0_b1, G0_b1, L_b1, b1s_pi, omega0); u_b1 = L_b1==0 ? u_b1_0 : (L_b1==2 ? u_b1_2 : 0); L_b1DepTerm += u_b1 * l_omegaDepTerm * Nterm(L_b1); } //-- (_) understand why assignment here produces: // KERNEL LAUNCH ERROR [b1piAngAmp]: the launch timed out and was terminated // assigning/incrementing integers causes no problems l_b1DepTerm += L_b1DepTerm * Conjugate(wignerD(J_X, m_X, l_b1, b1_XRF_cosTheta, b1_XRF_phi)) * clebsch(L_X, 0, 1, l_b1, J_X, l_b1); } ThelSum += l_b1DepTerm //to account for |eps_g> ~ (|1,-1>exp(-ia)-pol|1,+1>exp(ia)) * (l_gamma==1 ? (-pol)*expFact : expFact_conj) //Assemble reflectivity eigenvector with epsilon_X=pol*epslion_R * (GDouble) (m_X<0 ? Par_X*pol*epsilon_R*((J_X-m_X) % 2 == 0 ? +1:-1) : 1) * (GDouble) (m_X == 0 ? 1.0 : InvSqrt2 ) // to apply th(l_R) reflectivity state prefactor: // m=0: 1/2 m>0: 1/sqrt(2) m<0: 0 (last just skipped in this sum) * (GDouble) (l_R > 0 ? InvSqrt2 : 1.0 ) //apply coefficients to the reflectivity basis terms: * (GDouble) (l_Rsign==1 ? 1 : epsilon_R) ; //v(*epsilon_R) * } } } ThelSum *= Nterm(L_X) * // barrier factor // (GDouble)(L_X==0 ? 1.0 : (L_X==1 ? q : G_POW(q,L_X))) * (GDouble)(L_X==0 ? 1.0 : (L_X==1 ? q : ::pow(q,L_X))) * // to apply polarization fraction weights: (GDouble)G_SQRT((1.0-pol*polFrac)*0.5) * //(1+g) for x-pol, (1-g) for y-pol (pol==1 ? i : COne)*InvSqrt2 * //to account for |eps_g> ~ sqrt(-eps/2) clebsch(1, Iz_b1, 1, Iz_pi, I_X, Iz_b1 + Iz_pi); pcDevAmp[iEvent] = ThelSum; } #ifdef DEBUG // This is for debugging // It reads the amplitdues and momemta vectors from the CUDA device and prints them. void printCudaArrays(GDouble* pfDevData, WCUComplex* pcDevAmp, int* piDevPerm, int iNParticles, int iNEvents, int cnt) { // Read amplitudes from GPU to CPU GDouble *amp = (GDouble *) malloc (iNEvents * 2 * sizeof(GDouble)); hipMemcpy (amp, pcDevAmp, iNEvents * 2 * sizeof(GDouble), hipMemcpyDeviceToHost); // Copy 4momenta from GPU to CPU - make part() big enough to hold the entire set of momenta GDouble *part = (GDouble *) malloc (iNEvents * 4 * iNParticles * sizeof(GDouble)); hipMemcpy (part, pfDevData, iNEvents * 4 * iNParticles * sizeof(GDouble), hipMemcpyDeviceToHost); // Print arrays int ievent, ipart, idim; int ndim = 4; for (ievent=0; ievent<iNEvents; ievent++) { printf ("test: CUDA: %2d %6d ", cnt, ievent); // Print amplitude printf (" %12.4e %12.4e", amp[2*ievent], amp[2*ievent+1]); for (ipart=0;ipart<iNParticles;ipart++) { printf (" "); for (idim=0;idim<4;idim++) { printf ( " %8.4f", part[ievent + idim*iNEvents + ipart*ndim*iNEvents ] ); } } printf("\n"); } // Free allocations from arrays allocated withing this function if (amp) free(amp); if (part) free(part); } #endif void GPUb1piAngAmp_exec(dim3 dimGrid, dim3 dimBlock, // GPU_AMP_PROTO, GDouble* pfDevData, WCUComplex* pcDevAmp, int* piDevPerm, int iNParticles, int iNEvents, int polBeam, GDouble polFrac, int J_X, int Par_X, int L_X, int I_X, int epsilon_R, int Iz_b1, int Iz_pi, GDouble u_rho_1, GDouble u_rho_3, GDouble u_omega_1, GDouble u_omega_3, GDouble u_b1_0, GDouble u_b1_2, GDouble G0_omega, GDouble G0_b1, bool orthocheck) { int ievent, ievent1, idim, ipart, i, j, k; int nonZero = 0; int static cnt = 0; cnt++; // printf("test: Call to GPUb1piAngAmp_exec: cnt %d\n", cnt); // Identify amplitudes which are zero hipLaunchKernelGGL(( Pretest_kernel), dim3(dimGrid), dim3(dimBlock) , 0, 0, // GPU_AMP_ARGS, pfDevData, pcDevAmp, piDevPerm, iNParticles, iNEvents, polBeam, polFrac, J_X, Par_X, L_X, I_X, epsilon_R, Iz_b1, Iz_pi, u_rho_1, u_rho_3, u_omega_1, u_omega_3, u_b1_0, u_b1_2, G0_omega, G0_b1, orthocheck ); // printf("test: after call to Pretest_kernel()\n"); // Copy pcDevAmp from device to host */ GDouble *hostAmp = (GDouble *) malloc(2*iNEvents*sizeof(GDouble)); hipMemcpy (hostAmp, pcDevAmp, 2*iNEvents*sizeof(GDouble), hipMemcpyDeviceToHost); // Initialize all on-device amplitudes to zero hipLaunchKernelGGL(( Setzero_kernel), dim3(dimGrid), dim3(dimBlock) , 0, 0, pcDevAmp,iNEvents); // printf("test: after call to Setzero_kernel()\n"); // Count number of nonZero amplitudes for (i=0;i<iNEvents;i++) { if (hostAmp[2*i]==1.0) nonZero++; } // Allocate array to hold indices of nonZero amplitudes int *nonZeroIndices = (int *) malloc(nonZero * sizeof(int)); j = 0; for (i=0;i<iNEvents;i++) { if (hostAmp[2*i]==1.0) nonZeroIndices[j++] = i; } // Copy 4momenta from GPU to CPU - make part() big enough to hold the entire set of momenta GDouble *part = (GDouble *) malloc (iNEvents * 4 * iNParticles * sizeof(GDouble)); hipMemcpy (part, pfDevData, iNEvents * 4 * iNParticles * sizeof(GDouble), hipMemcpyDeviceToHost); // printf("test: after copy pfDevData to Device\n"); // Copy nonZero momenta in place to the start of the array part // Make sure order of copying moves continuously from lower to higher indice. for (ipart=0;ipart<iNParticles;ipart++) { for (idim=0;idim<4;idim++) { for (ievent1=0;ievent1<nonZero;ievent1++) { ievent = nonZeroIndices[ievent1]; // Index of nonZero event in original particle array i = ievent + idim * iNEvents + ipart * 4 * iNEvents; // Index of nonZero event in new particle array j = ievent1 + idim * nonZero + ipart * 4 * nonZero; part[j] = part[i]; } } } // Copy new particles on CPU back to GPU, only need those momenta sets which were non-zero, not the size of the entire set. GDouble *part_dev; hipMalloc(&part_dev, nonZero * 4 * iNParticles * sizeof(GDouble) ); hipMemcpy( part_dev, part, nonZero * 4 * iNParticles * sizeof(GDouble), hipMemcpyHostToDevice ); // printf("test: after copy Part to Device\n"); // Reset dimGrid and dimBlock for the value of nonZero int Nthreads = 32; dim3 dimBlock1(Nthreads); dim3 dimGrid1((nonZero-1)/Nthreads+1); // Evaluate non-zero amplitudes // iNEvents = nonZero; hipLaunchKernelGGL(( GPUb1piAngAmp_kernel), dim3(dimGrid1), dim3(dimBlock1) , 0, 0, cnt, // GPU_AMP_ARGS, // pfDevData, pcDevAmp, piDevPerm, iNParticles, nonZero, part_dev, pcDevAmp, piDevPerm, iNParticles, nonZero, polBeam, polFrac, J_X, Par_X, L_X, I_X, epsilon_R, Iz_b1, Iz_pi, u_rho_1, u_rho_3, u_omega_1, u_omega_3, u_b1_0, u_b1_2, G0_omega, G0_b1, orthocheck ); // printf("test: after call to GUPb1piAngAmp_kernel()\n"); // Read amplitudes from GPU to CPU GDouble *amp = (GDouble *) malloc (iNEvents * 2 * sizeof(GDouble)); hipMemcpy (amp, pcDevAmp, iNEvents * 2 * sizeof(GDouble), hipMemcpyDeviceToHost); // printf("test: after copy Amp to Host\n"); // Re-arrange location of amplitudes on GPU to match original distribution of vectors // Progress through the index array backward. k = iNEvents; for (i=nonZero-1;i>=0;i--) { // Zero those elements between this element and last. for (j=nonZeroIndices[i]+1;j<k;j++) { amp[2*j ] = 0.0; amp[2*j+1] = 0.0; } k = nonZeroIndices[i]; amp[2*k ] = amp[2*i ]; amp[2*k+1] = amp[2*i+1]; } // Zero remaining elements for (j=0;j<nonZeroIndices[0];j++) { amp[2*j ] = 0.0; amp[2*j+1] = 0.0; } // Write values back to GPU so calling program will find them where they // expect them. hipMemcpy (pcDevAmp, amp, iNEvents * 2 * sizeof(GDouble), hipMemcpyHostToDevice); // printf("test: after copy Amp to Device\n"); // Free allocations if (part_dev) hipFree(part_dev); if (amp) free(amp); if (part) free(part); // printf("test: after Free allocations\n"); // Print Particle and Amplitude CUDA arrays #ifdef DEBUG printCudaArrays(pfDevData, pcDevAmp, piDevPerm, iNParticles, iNEvents, cnt); #endif }
5b7a9aa0355a7949b117a6052b83fc10039a9288.cu
/* * GPUb1piAngAmp_kernel.cu * */ #include <stdio.h> #include "cuda.h" // Original headers were scattered around file system #include "GPUManager/GPUCustomTypes.h" #include "GPUManager/CUDA-Complex.cuh" #include "GPUUtils/lorentzBoost.cuh" #include "GPUUtils/threeVector.cuh" #include "GPUUtils/wignerD.cuh" #include "GPUUtils/clebsch.cuh" #include "AMPTOOLS_AMPS/breakupMomentum.cuh" #include "AMPTOOLS_AMPS/barrierFactor.cuh" // Test headers #if 0 #include "GPUCustomTypes.h" #include "CUDA-Complex.cuh" #include "lorentzBoost.cuh" #include "threeVector.cuh" #include "wignerD.cuh" #include "clebsch.cuh" #include "breakupMomentum.cuh" #include "barrierFactor.cuh" #endif #define ADD4(a,b) { a[0]+b[0], a[1]+b[1], a[2]+b[2], a[3]+b[3] } #define MASS(v) (G_SQRT(v[0]*v[0]-v[1]*v[1]-v[2]*v[2]-v[3]*v[3])) #define Nterm(J) (G_SQRT((2*J+1)/(4*M_PI))) // Macro to ease definition of loops #define LOOP(INDEX,START,END,INC) for (int INDEX=START;INDEX<=END;INDEX+=INC) static __device__ void //note: 4-vector input presumed rotateZ( GDouble* v, GDouble phi ){ GDouble sinphi = G_SIN(phi); GDouble cosphi = G_COS(phi); GDouble tx; tx = v[1] * cosphi - v[2] * sinphi; v[2] = v[2] * cosphi + v[1] * sinphi; v[1] = tx; } static __device__ void //note: 4-vector input presumed rotateY ( GDouble* v, GDouble theta) { double sinphi = G_SIN(theta); double cosphi = G_COS(theta); double tz; tz = v[3] * cosphi - v[1] * sinphi; v[1] = v[1] * cosphi + v[3] * sinphi; v[3] = tz; } static __device__ GDouble //note: 3-vector input presumed theta( GDouble* pv ){ GDouble r= G_SQRT(pv[0]*pv[0] + pv[1]*pv[1]); return G_ATAN2( r , pv[2] ); } static __device__ void MoveToRF(GDouble *parent, GDouble *daughter) { GDouble *par3vec=parent+1; rotateZ( daughter , -phi(par3vec) ); rotateY( daughter , -theta(par3vec) ); GDouble beta[]={0,0, -G_SQRT(dot(par3vec,par3vec))/parent[0]}; //** (x) Might this be bootToRest??? // beta is defined to boost to parent's rest frame // I just adapted GPUUtil boost fcn with vector beta input boost( daughter , beta ); } static __device__ WCUComplex BreitWigner_loc(GDouble m0, GDouble Gamma0, int L, GDouble *P1, GDouble *P2) { GDouble Ptot[4] = ADD4(P1, P2); GDouble m = MASS(Ptot); GDouble mass1 = MASS(P1); GDouble mass2 = MASS(P2); // assert positive breakup momenta GDouble q0 = fabs( breakupMomentum(m0, mass1, mass2) ); GDouble q = fabs( breakupMomentum(m, mass1, mass2) ); //printf("BW: (%5.3f, %5.3f, %d) m=%6.4f m1=%6.4f m2=%6.4f q=%6.4f q0=%6.4f\n", // m0,Gamma0,L,m,mass1,mass2,q,q0); GDouble F0 = L==0 ? 1.0 : barrierFactor(q0, L); GDouble F = L==0 ? 1.0 : barrierFactor(q, L); GDouble width_coef=Gamma0*(m0/m); //GDouble qq0=q/q0; //GDouble width_qdep = (L==0 ? qq0 : (L==1 ? qq0*qq0*qq0 : pow(qq0,2*L+1)))*((F*F)/(F0*F0)); GDouble width_qdep = q/q0 * (F*F)/(F0*F0); //GDouble num_qdep = (L==0 ? q : (L==1 ? q*q*q : pow(q,2*L+1)))*(F*F); GDouble num_qdep = q*(F*F); GDouble width = width_coef * width_qdep; //complex<GDouble> bwtop(m0 * width, 0.0 ); WCUComplex bwtop = { G_SQRT(m0*width_coef) * num_qdep, 0 }; WCUComplex bwbottom = { m0*m0 - m*m , -1.0 * ( m0 * width ) }; return ( bwtop / bwbottom ); } // JR 2012-07-29 // Set all Amplitudes to 0 on the Device. This is needed now because we only // calculate amplitudes for those momenta sets with non-zero amplitudes. If // this function were not performed, amplitudes which are supposed to be zero will // be undefined. __global__ void Setzero_kernel(WCUComplex *pcDevAmp, int iNEvents) { int iEvent = GPU_THIS_EVENT; if (iEvent>=iNEvents) return; pcDevAmp[iEvent].m_dRe = 0.0; pcDevAmp[iEvent].m_dIm = 0.0; } // JR 2012-07-29 // Perform beginning of b1pi calculation, just enough to determine those // amplitude which will be set to zero. Amplitudes are set to (1,0) if // they are not zero. These amplitudes will need set to their correct // values on the call to GPUb1piAngAmp_kernel(). __global__ void Pretest_kernel( GPU_AMP_PROTO , int polBeam, GDouble polFrac, int J_X, int Par_X, int L_X, int I_X, int epsilon_R, int Iz_b1, int Iz_pi, GDouble u_rho_1, GDouble u_rho_3, GDouble u_omega_1, GDouble u_omega_3, GDouble u_b1_0, GDouble u_b1_2, GDouble G0_omega, GDouble G0_b1, bool orthocheck) { // Calculate event for this thread. int iEvent = GPU_THIS_EVENT; WCUComplex CZero = { 0, 0 }; WCUComplex COne = { 1, 0 }; int pol=(polBeam==1 ? +1 : -1); // y and x-pol. respectively //** (x) This statement can be evaluated at top of function? if (J_X==0 && Par_X*pol*epsilon_R==-1) { pcDevAmp[iEvent] = CZero; return; } GDouble m0_omega = 0.783; GDouble m0_b1 = 1.223; bool isZero; // Copy four-vectors for this thread from global memory. GDouble b1s_pi [4] = GPU_P4(3); GDouble omegas_pi[4] = GPU_P4(4); GDouble rhos_pim [4] = GPU_P4(5); GDouble rhos_pip [4] = GPU_P4(6); // Make four-vector sums GDouble rho [4] = ADD4(rhos_pip, rhos_pim ); GDouble omega [4] = ADD4(rho, omegas_pi); GDouble b1 [4] = ADD4(omega, b1s_pi); // Store mass of b1; for other vectors we can calculate mass on the fly. GDouble b1mass = MASS(b1); // Is this term zero? isZero = MASS(rho)+0.135 > m0_omega+3*G0_omega; isZero |= fabs(MASS(omega)-m0_omega) > 3*G0_omega; isZero |= fabs(b1mass-m0_b1) > 3*G0_b1; isZero |= b1mass < (m0_omega - 3*G0_omega); if (isZero) pcDevAmp[iEvent] = CZero; else pcDevAmp[iEvent] = COne; } // JR 2012-07-29 // Calculate amplitudes only for those momenta sets with known non-zero // amplitudes. __global__ void GPUb1piAngAmp_kernel( int cnt, // GPU_AMP_PROTO , GDouble* pfDevData, WCUComplex* pcDevAmp, int* piDevPerm, int iNParticles, int iNEvents, int polBeam, GDouble polFrac, int J_X, int Par_X, int L_X, int I_X, int epsilon_R, int Iz_b1, int Iz_pi, GDouble u_rho_1, GDouble u_rho_3, GDouble u_omega_1, GDouble u_omega_3, GDouble u_b1_0, GDouble u_b1_2, GDouble G0_omega, GDouble G0_b1, bool orthocheck) { // Calculate event for this thread. // int iEvent = GPU_THIS_EVENT; // JR 2012-07-29 // NOTE: This vesrsion of this function is called with different settings // for threadIdx, blockIdx and blockDim than for the original version. // The next line relects that change. int iEvent = threadIdx.x + blockIdx.x * blockDim.x; // Skip this event index if it overruns number of events. if (iEvent>=iNEvents) return; WCUComplex CZero = { 0, 0 }; WCUComplex i = { 0, 1 }; WCUComplex COne = { 1, 0 }; int pol=(polBeam==1 ? +1 : -1); // y and x-pol. respectively if (J_X==0 && Par_X*pol*epsilon_R==-1) { pcDevAmp[iEvent] = CZero; return; } int m_X; GDouble u_rho, u_omega, u_b1; GDouble InvSqrt2 = 1.0/G_SQRT(2.0); GDouble m0_rho = 0.775; GDouble G0_rho = 0.149; GDouble m0_omega = 0.783; GDouble m0_b1 = 1.223; bool useCutoff = true; bool isZero; // Copy four-vectors for this thread from global memory. // 2012-05-19 JR rhos_pip0,omega0,rho0 added for use // in BreitWigner_loc() below. GDouble beam [4] = GPU_P4(0); GDouble recoil [4] = GPU_P4(1); GDouble Xs_pi [4] = GPU_P4(2); GDouble b1s_pi [4] = GPU_P4(3); GDouble omegas_pi[4] = GPU_P4(4); GDouble rhos_pim [4] = GPU_P4(5); GDouble rhos_pip [4] = GPU_P4(6); GDouble rhos_pip0[4] = GPU_P4(6); // Make four-vector sums GDouble rho [4] = ADD4(rhos_pip, rhos_pim ); GDouble rho0 [4] = ADD4(rhos_pip, rhos_pim ); GDouble omega [4] = ADD4(rho, omegas_pi); GDouble omega0[4] = ADD4(rho, omegas_pi); GDouble b1 [4] = ADD4(omega, b1s_pi); // Store mass of b1; for other vectors we can calculate mass on the fly. GDouble b1mass = MASS(b1); // Is this term zero? if (useCutoff) { isZero = MASS(rho)+0.135 > m0_omega+3*G0_omega; isZero |= fabs(MASS(omega)-m0_omega) > 3*G0_omega; isZero |= fabs(b1mass-m0_b1) > 3*G0_b1; isZero |= b1mass < (m0_omega - 3*G0_omega); // Zero amplitude if (isZero) { pcDevAmp[iEvent] = CZero; return; } } // Continue to Calculate amplitude GDouble X[4] = ADD4(b1, Xs_pi); GDouble q = breakupMomentum( MASS(X), b1mass, MASS(Xs_pi) ); GDouble alpha = phi( &(recoil[1]) ); // NOTE: Values of beam and recoil are changed below. boostToRest (beam, X); boostToRest (recoil, X); // Define new coordinate system with // - beam parallel to z direction // - recoil in the x,z plain (i.e., y is normal to recoil and beam) // - y is normal to beam and recoil. GDouble zGJ[3] = { beam[1], beam[2], beam[3] }; makeUnit( zGJ ); //** (x) Be care of cross order, need to check this // 2012-05-19 JR - Invert yGJ to make cross come out right. // GDouble yGJ[3] = { recoil[1], recoil[2], recoil[3] }; GDouble yGJ[3] = { -recoil[1], -recoil[2], -recoil[3] }; cross( yGJ, zGJ ); makeUnit( yGJ ); GDouble xGJ[3] = { yGJ[0], yGJ[1], yGJ[2] }; cross( xGJ, zGJ ); //particles to rest frames of their parents boostToRest (b1, X); boostToRest (omega, X); boostToRest (rho, X); boostToRest (rhos_pip, X); // Note that in this form of the cascade of boosts, we are not // saving the 4-vecs in their intermediate RF, but going sequentially // straight to their immediate parent's RF. // Make sure to verify that the intermediares were not in fact needed // and that we didn't break anything with this simplification. MoveToRF(b1,omega); MoveToRF(b1,rho); MoveToRF(omega,rho); MoveToRF(b1,rhos_pip); MoveToRF(omega,rhos_pip); MoveToRF(rho,rhos_pip); GDouble *b1_3vec=b1+1; GDouble ang_b1[]={dot(b1_3vec, xGJ), dot(b1_3vec, yGJ), dot(b1_3vec, zGJ)}; GDouble b1_XRF_cosTheta = cosTheta(ang_b1); GDouble b1_XRF_phi = phi(ang_b1); GDouble rho_omegaRF_cosTheta = cosTheta(rho+1); GDouble rho_omegaRF_phi = phi(rho+1); GDouble rhos_pip_rhoRF_cosTheta = cosTheta(rhos_pip+1); GDouble rhos_pip_rhoRF_phi = phi(rhos_pip+1); GDouble omega_b1RF_cosTheta = cosTheta(omega+1); GDouble omega_b1RF_phi = phi(omega+1); /* List_l_R: 0 1 List_J_rho: 1 List_l_rho: -1 1 List_L_omega: 1 List_l_omega: -1 0 1 List_L_b1: 0 2 List_l_b1: -1 0 1 */ // SUMMATION GUIDE: // notation meant to resemble TeX symbols in derivation // exception: pol = \epsilon_\gamma // l -> lambda, indicating helicity // u_[particle](q.n.) -> amplitude strength coefficient int l_R_lim = J_X + 1; //shortcut: CB(L_X, J_b1, 0, l_b1 ; J_X, l_b1) vanishes when // = CB(1, 1, 0, 0 ; 1, 0), so omit l_b1=0 when J_X=L_X=1 int l_b1_inc = L_X==1 && J_X==1 ? 2 : 1; // restrict omega decay to just p wave int L_omega_lim = 1; // set to 3 to allow F wave int L_Rsign_lim; GDouble cosAlpha=G_COS(alpha), sinAlpha=G_SIN(alpha); WCUComplex expFact = {cosAlpha, sinAlpha}; WCUComplex expFact_conj = {cosAlpha, -sinAlpha}; WCUComplex ThelSum = { 0 , 0 }; // Setup dependent loop limits LOOP(l_gamma, -1, 1, 2) { LOOP(l_R, 0, l_R_lim, 1) { if(l_R==0 && epsilon_R==-1) continue; // LOOP(l_R, (1-epsilon_R)/2, l_R_lim, 1) // if this still causes some GPU core // misalignment, try setting lower bound back to zero and tacking on // * !(l_R==0 && epsilon_R==-1) // to the long list of factors multiplying Thelsum below -IS //summing positive and negative helicity terms of R's reflectivity state L_Rsign_lim = l_R > 0 ? -1 : +1; // Switch order of loop, because LOOP can only handle increasing increments // LOOP(l_Rsign, 1, L_Rsign_lim, -2) LOOP(l_Rsign, L_Rsign_lim, 1, 2) { m_X = l_gamma - l_Rsign * l_R; if (m_X==0) { //testing for cancelation in |J 0>+pol*P*epsilon_R*(-1)^J|J 0> if(Par_X*pol*epsilon_R == (J_X % 2 ==0 ? -1:+1)) continue; } else { //enforcing that the selected projection <= vector magnitude if( abs(m_X)>J_X) continue; } WCUComplex l_b1DepTerm = {0,0}; LOOP(l_b1, -1,1,l_b1_inc) { WCUComplex L_b1DepTerm = {0,0}; LOOP(L_b1,0,2,2) { WCUComplex l_omegaDepTerm = {0,0}; // 2012-05-19 JR Fix l_omega loop // LOOP(l_omega,-1,0,1) LOOP(l_omega,-1,1,1) { WCUComplex L_omegaDepTerm = {0,0}; LOOP(L_omega, 1, L_omega_lim, 2) { WCUComplex J_rhoDepTerm = {0,0}; LOOP(J_rho, 1, L_omega_lim, 2) { //enforces triang. ineq. betw. J_omega=1, J_rho and L_omega // in effect, L_omega and J_rho take identical values if( abs(J_rho-L_omega) > 1) continue; WCUComplex l_rhoDepTerm = {0,0}; LOOP(l_rho,-1,1,1) { //shortcut CB(1,1,0,0;1,0)=0 if(L_omega==1 && J_rho==1 && l_rho==0) continue; l_rhoDepTerm += Conjugate(wignerD(1, l_omega, l_rho, rho_omegaRF_cosTheta, rho_omegaRF_phi)) * clebsch(L_omega, 0, J_rho, l_rho, 1, l_rho) * Y(J_rho, l_rho, rhos_pip_rhoRF_cosTheta, rhos_pip_rhoRF_phi); } u_rho = J_rho==1 ? u_rho_1 : (J_rho==3 ? u_rho_3 : 0); J_rhoDepTerm += u_rho * l_rhoDepTerm * BreitWigner_loc(m0_rho,G0_rho, J_rho,rhos_pip0,rhos_pim); } J_rhoDepTerm *= BreitWigner_loc(m0_omega, G0_omega, L_omega, omegas_pi,rho0); u_omega = L_omega==1 ? u_omega_1 : (L_omega==3 ? u_omega_3 : 0); L_omegaDepTerm += u_omega * J_rhoDepTerm * Nterm(L_omega); } l_omegaDepTerm += L_omegaDepTerm * clebsch(L_b1, 0, 1, l_omega, 1, l_omega) * Conjugate(wignerD(1, l_b1, l_omega, omega_b1RF_cosTheta, omega_b1RF_phi)); } l_omegaDepTerm *= BreitWigner_loc(m0_b1, G0_b1, L_b1, b1s_pi, omega0); u_b1 = L_b1==0 ? u_b1_0 : (L_b1==2 ? u_b1_2 : 0); L_b1DepTerm += u_b1 * l_omegaDepTerm * Nterm(L_b1); } //-- (_) understand why assignment here produces: // KERNEL LAUNCH ERROR [b1piAngAmp]: the launch timed out and was terminated // assigning/incrementing integers causes no problems l_b1DepTerm += L_b1DepTerm * Conjugate(wignerD(J_X, m_X, l_b1, b1_XRF_cosTheta, b1_XRF_phi)) * clebsch(L_X, 0, 1, l_b1, J_X, l_b1); } ThelSum += l_b1DepTerm //to account for |eps_g> ~ (|1,-1>exp(-ia)-pol|1,+1>exp(ia)) * (l_gamma==1 ? (-pol)*expFact : expFact_conj) //Assemble reflectivity eigenvector with epsilon_X=pol*epslion_R * (GDouble) (m_X<0 ? Par_X*pol*epsilon_R*((J_X-m_X) % 2 == 0 ? +1:-1) : 1) * (GDouble) (m_X == 0 ? 1.0 : InvSqrt2 ) // to apply th(l_R) reflectivity state prefactor: // m=0: 1/2 m>0: 1/sqrt(2) m<0: 0 (last just skipped in this sum) * (GDouble) (l_R > 0 ? InvSqrt2 : 1.0 ) //apply coefficients to the reflectivity basis terms: * (GDouble) (l_Rsign==1 ? 1 : epsilon_R) ; //v(*epsilon_R) * } } } ThelSum *= Nterm(L_X) * // barrier factor // (GDouble)(L_X==0 ? 1.0 : (L_X==1 ? q : G_POW(q,L_X))) * (GDouble)(L_X==0 ? 1.0 : (L_X==1 ? q : ::pow(q,L_X))) * // to apply polarization fraction weights: (GDouble)G_SQRT((1.0-pol*polFrac)*0.5) * //(1+g) for x-pol, (1-g) for y-pol (pol==1 ? i : COne)*InvSqrt2 * //to account for |eps_g> ~ sqrt(-eps/2) clebsch(1, Iz_b1, 1, Iz_pi, I_X, Iz_b1 + Iz_pi); pcDevAmp[iEvent] = ThelSum; } #ifdef DEBUG // This is for debugging // It reads the amplitdues and momemta vectors from the CUDA device and prints them. void printCudaArrays(GDouble* pfDevData, WCUComplex* pcDevAmp, int* piDevPerm, int iNParticles, int iNEvents, int cnt) { // Read amplitudes from GPU to CPU GDouble *amp = (GDouble *) malloc (iNEvents * 2 * sizeof(GDouble)); cudaMemcpy (amp, pcDevAmp, iNEvents * 2 * sizeof(GDouble), cudaMemcpyDeviceToHost); // Copy 4momenta from GPU to CPU - make part() big enough to hold the entire set of momenta GDouble *part = (GDouble *) malloc (iNEvents * 4 * iNParticles * sizeof(GDouble)); cudaMemcpy (part, pfDevData, iNEvents * 4 * iNParticles * sizeof(GDouble), cudaMemcpyDeviceToHost); // Print arrays int ievent, ipart, idim; int ndim = 4; for (ievent=0; ievent<iNEvents; ievent++) { printf ("test: CUDA: %2d %6d ", cnt, ievent); // Print amplitude printf (" %12.4e %12.4e", amp[2*ievent], amp[2*ievent+1]); for (ipart=0;ipart<iNParticles;ipart++) { printf (" "); for (idim=0;idim<4;idim++) { printf ( " %8.4f", part[ievent + idim*iNEvents + ipart*ndim*iNEvents ] ); } } printf("\n"); } // Free allocations from arrays allocated withing this function if (amp) free(amp); if (part) free(part); } #endif void GPUb1piAngAmp_exec(dim3 dimGrid, dim3 dimBlock, // GPU_AMP_PROTO, GDouble* pfDevData, WCUComplex* pcDevAmp, int* piDevPerm, int iNParticles, int iNEvents, int polBeam, GDouble polFrac, int J_X, int Par_X, int L_X, int I_X, int epsilon_R, int Iz_b1, int Iz_pi, GDouble u_rho_1, GDouble u_rho_3, GDouble u_omega_1, GDouble u_omega_3, GDouble u_b1_0, GDouble u_b1_2, GDouble G0_omega, GDouble G0_b1, bool orthocheck) { int ievent, ievent1, idim, ipart, i, j, k; int nonZero = 0; int static cnt = 0; cnt++; // printf("test: Call to GPUb1piAngAmp_exec: cnt %d\n", cnt); // Identify amplitudes which are zero Pretest_kernel<<< dimGrid, dimBlock >>> ( // GPU_AMP_ARGS, pfDevData, pcDevAmp, piDevPerm, iNParticles, iNEvents, polBeam, polFrac, J_X, Par_X, L_X, I_X, epsilon_R, Iz_b1, Iz_pi, u_rho_1, u_rho_3, u_omega_1, u_omega_3, u_b1_0, u_b1_2, G0_omega, G0_b1, orthocheck ); // printf("test: after call to Pretest_kernel()\n"); // Copy pcDevAmp from device to host */ GDouble *hostAmp = (GDouble *) malloc(2*iNEvents*sizeof(GDouble)); cudaMemcpy (hostAmp, pcDevAmp, 2*iNEvents*sizeof(GDouble), cudaMemcpyDeviceToHost); // Initialize all on-device amplitudes to zero Setzero_kernel<<< dimGrid, dimBlock >>>(pcDevAmp,iNEvents); // printf("test: after call to Setzero_kernel()\n"); // Count number of nonZero amplitudes for (i=0;i<iNEvents;i++) { if (hostAmp[2*i]==1.0) nonZero++; } // Allocate array to hold indices of nonZero amplitudes int *nonZeroIndices = (int *) malloc(nonZero * sizeof(int)); j = 0; for (i=0;i<iNEvents;i++) { if (hostAmp[2*i]==1.0) nonZeroIndices[j++] = i; } // Copy 4momenta from GPU to CPU - make part() big enough to hold the entire set of momenta GDouble *part = (GDouble *) malloc (iNEvents * 4 * iNParticles * sizeof(GDouble)); cudaMemcpy (part, pfDevData, iNEvents * 4 * iNParticles * sizeof(GDouble), cudaMemcpyDeviceToHost); // printf("test: after copy pfDevData to Device\n"); // Copy nonZero momenta in place to the start of the array part // Make sure order of copying moves continuously from lower to higher indice. for (ipart=0;ipart<iNParticles;ipart++) { for (idim=0;idim<4;idim++) { for (ievent1=0;ievent1<nonZero;ievent1++) { ievent = nonZeroIndices[ievent1]; // Index of nonZero event in original particle array i = ievent + idim * iNEvents + ipart * 4 * iNEvents; // Index of nonZero event in new particle array j = ievent1 + idim * nonZero + ipart * 4 * nonZero; part[j] = part[i]; } } } // Copy new particles on CPU back to GPU, only need those momenta sets which were non-zero, not the size of the entire set. GDouble *part_dev; cudaMalloc(&part_dev, nonZero * 4 * iNParticles * sizeof(GDouble) ); cudaMemcpy( part_dev, part, nonZero * 4 * iNParticles * sizeof(GDouble), cudaMemcpyHostToDevice ); // printf("test: after copy Part to Device\n"); // Reset dimGrid and dimBlock for the value of nonZero int Nthreads = 32; dim3 dimBlock1(Nthreads); dim3 dimGrid1((nonZero-1)/Nthreads+1); // Evaluate non-zero amplitudes // iNEvents = nonZero; GPUb1piAngAmp_kernel<<< dimGrid1, dimBlock1 >>> ( cnt, // GPU_AMP_ARGS, // pfDevData, pcDevAmp, piDevPerm, iNParticles, nonZero, part_dev, pcDevAmp, piDevPerm, iNParticles, nonZero, polBeam, polFrac, J_X, Par_X, L_X, I_X, epsilon_R, Iz_b1, Iz_pi, u_rho_1, u_rho_3, u_omega_1, u_omega_3, u_b1_0, u_b1_2, G0_omega, G0_b1, orthocheck ); // printf("test: after call to GUPb1piAngAmp_kernel()\n"); // Read amplitudes from GPU to CPU GDouble *amp = (GDouble *) malloc (iNEvents * 2 * sizeof(GDouble)); cudaMemcpy (amp, pcDevAmp, iNEvents * 2 * sizeof(GDouble), cudaMemcpyDeviceToHost); // printf("test: after copy Amp to Host\n"); // Re-arrange location of amplitudes on GPU to match original distribution of vectors // Progress through the index array backward. k = iNEvents; for (i=nonZero-1;i>=0;i--) { // Zero those elements between this element and last. for (j=nonZeroIndices[i]+1;j<k;j++) { amp[2*j ] = 0.0; amp[2*j+1] = 0.0; } k = nonZeroIndices[i]; amp[2*k ] = amp[2*i ]; amp[2*k+1] = amp[2*i+1]; } // Zero remaining elements for (j=0;j<nonZeroIndices[0];j++) { amp[2*j ] = 0.0; amp[2*j+1] = 0.0; } // Write values back to GPU so calling program will find them where they // expect them. cudaMemcpy (pcDevAmp, amp, iNEvents * 2 * sizeof(GDouble), cudaMemcpyHostToDevice); // printf("test: after copy Amp to Device\n"); // Free allocations if (part_dev) cudaFree(part_dev); if (amp) free(amp); if (part) free(part); // printf("test: after Free allocations\n"); // Print Particle and Amplitude CUDA arrays #ifdef DEBUG printCudaArrays(pfDevData, pcDevAmp, piDevPerm, iNParticles, iNEvents, cnt); #endif }
5fe4330c330b0d30b07188ff1bc5997ed7e017d7.hip
// !!! This is a file automatically generated by hipify!!! #include<stdio.h> #include<cuda.h> #include<cuda_runtime.h> #include <cmath> /* log2() */ #include <cstdint> /* int64_t, uint64_t */ #include <cstdlib> /* srand(), rand() */ #include <ctime> /* time() */ #include <iostream> /* std::cout, std::endl */ #include "../include/utils.cuh" //INCLUDE HEADER FILE /* void cpuToGpuMemcpy(uint64_t* h_data,uint64_t* d_data,int size) { hipError_t err = hipMemcpy(d_data,h_data,size,hipMemcpyHostToDevice) ; if(err != hipSuccess) { fprintf(stderr,"Failed to copy vector from host device!",hipGetErrorString(err)) ; exit(EXIT_FAILURE) ; } } void gpuToCpuMemcpy(uint64_t* d_data,uint64_t* h_data,int size) { hipError_t err = hipMemcpy(h_data,d_data,size,hipMemcpyDeviceToHost) ; if(err != hipSuccess) { fprintf(stderr,"Failed to copy vector from gpu device!",hipGetErrorString(err)) ; exit(EXIT_FAILURE) ; } hipFree(d_data) ; } */ uint64_t* preComputeTwiddleFactor(uint64_t n,uint64_t p, uint64_t r) { uint64_t x,y ; uint64_t m=1,a,k_ ; uint64_t* twiddleFactorArray = (uint64_t*)calloc((log2(n)*(n/2)),sizeof(uint64_t)) ; uint64_t maxRow = log2(n) ; uint64_t maxCol = n/2 ; for(x=0;x < maxRow;x++){ m = m<<1 ; k_ = (p-1) / m ; a = modExp(r,k_,p) ; for(y=0;y<m/2;y++){ twiddleFactorArray[ x*maxCol + y] = modExp(a,y,p) ; //std::cout<<std::endl<<modExp(a,y,p) ; } } return twiddleFactorArray ; } bool compVec(uint64_t *vec1, uint64_t *vec2, uint64_t n, bool debug){ bool comp = true; for(uint64_t i = 0; i < n; i++){ if(vec1[i] != vec2[i]){ comp = false; if(debug){ std::cout << "(vec1[" << i << "] : " << vec1[i] << ")"; std::cout << "!= (vec2[" << i << "] : " << vec2[i] << ")"; std::cout << std::endl; }else{ break; } } } return comp; } /** * Return vector with each element of the input at its bit-reversed position * * @param vec The vector to bit reverse * @param n The length of the vector, must be a power of two * @return The bit reversed vector */ uint64_t *bit_reverse(uint64_t *vec, uint64_t n){ uint64_t num_bits = log2(n); uint64_t *result; result = (uint64_t *) malloc(n*sizeof(uint64_t)); uint64_t reverse_num; for(uint64_t i = 0; i < n; i++){ reverse_num = 0; for(uint64_t j = 0; j < num_bits; j++){ reverse_num = reverse_num << 1; if(i & (1 << j)){ reverse_num = reverse_num | 1; } } result[reverse_num] = vec[i]; } return result; } /** * Perform the operation 'base^exp (mod m)' using the memory-efficient method * * @param base The base of the expression * @param exp The exponent of the expression * @param m The modulus of the expression * @return The result of the expression */ __host__ __device__ uint64_t modExp(uint64_t base, uint64_t exp, uint64_t m){ uint64_t result = 1; while(exp > 0){ if(exp % 2){ result = modulo(result*base, m); } exp = exp >> 1; base = modulo(base*base,m); } return result; } /** * Perform the operation 'base (mod m)' * * @param base The base of the expression * @param m The modulus of the expression * @return The result of the expression */ __host__ __device__ uint64_t modulo(int64_t base, int64_t m){ int64_t result = base % m; return (result >= 0) ? result : result + m; } /** * Print an array of arbitrary length in a readable format * * @param vec The array to be displayed * @param n The length of the array */ void printVec(uint64_t *vec, uint64_t n){ std::cout << "["; for(uint64_t i = 0; i < n; i++){ std::cout << vec[i] << ","; } std::cout << "]" << std::endl; } /** * Generate an array of arbitrary length containing random positive integers * * @param n The length of the array * @param max The maximum value for an array element [Default: RAND_MAX] */ uint64_t *randVec(uint64_t n, uint64_t max){ uint64_t *vec; vec = (uint64_t *)malloc(n*sizeof(uint64_t)); srand(time(0)); for(uint64_t i = 0; i < n; i++){ vec[i] = rand()%(max + 1); } return vec; }
5fe4330c330b0d30b07188ff1bc5997ed7e017d7.cu
#include<stdio.h> #include<cuda.h> #include<cuda_runtime.h> #include <cmath> /* log2() */ #include <cstdint> /* int64_t, uint64_t */ #include <cstdlib> /* srand(), rand() */ #include <ctime> /* time() */ #include <iostream> /* std::cout, std::endl */ #include "../include/utils.cuh" //INCLUDE HEADER FILE /* void cpuToGpuMemcpy(uint64_t* h_data,uint64_t* d_data,int size) { cudaError_t err = cudaMemcpy(d_data,h_data,size,cudaMemcpyHostToDevice) ; if(err != cudaSuccess) { fprintf(stderr,"Failed to copy vector from host device!",cudaGetErrorString(err)) ; exit(EXIT_FAILURE) ; } } void gpuToCpuMemcpy(uint64_t* d_data,uint64_t* h_data,int size) { cudaError_t err = cudaMemcpy(h_data,d_data,size,cudaMemcpyDeviceToHost) ; if(err != cudaSuccess) { fprintf(stderr,"Failed to copy vector from gpu device!",cudaGetErrorString(err)) ; exit(EXIT_FAILURE) ; } cudaFree(d_data) ; } */ uint64_t* preComputeTwiddleFactor(uint64_t n,uint64_t p, uint64_t r) { uint64_t x,y ; uint64_t m=1,a,k_ ; uint64_t* twiddleFactorArray = (uint64_t*)calloc((log2(n)*(n/2)),sizeof(uint64_t)) ; uint64_t maxRow = log2(n) ; uint64_t maxCol = n/2 ; for(x=0;x < maxRow;x++){ m = m<<1 ; k_ = (p-1) / m ; a = modExp(r,k_,p) ; for(y=0;y<m/2;y++){ twiddleFactorArray[ x*maxCol + y] = modExp(a,y,p) ; //std::cout<<std::endl<<modExp(a,y,p) ; } } return twiddleFactorArray ; } bool compVec(uint64_t *vec1, uint64_t *vec2, uint64_t n, bool debug){ bool comp = true; for(uint64_t i = 0; i < n; i++){ if(vec1[i] != vec2[i]){ comp = false; if(debug){ std::cout << "(vec1[" << i << "] : " << vec1[i] << ")"; std::cout << "!= (vec2[" << i << "] : " << vec2[i] << ")"; std::cout << std::endl; }else{ break; } } } return comp; } /** * Return vector with each element of the input at its bit-reversed position * * @param vec The vector to bit reverse * @param n The length of the vector, must be a power of two * @return The bit reversed vector */ uint64_t *bit_reverse(uint64_t *vec, uint64_t n){ uint64_t num_bits = log2(n); uint64_t *result; result = (uint64_t *) malloc(n*sizeof(uint64_t)); uint64_t reverse_num; for(uint64_t i = 0; i < n; i++){ reverse_num = 0; for(uint64_t j = 0; j < num_bits; j++){ reverse_num = reverse_num << 1; if(i & (1 << j)){ reverse_num = reverse_num | 1; } } result[reverse_num] = vec[i]; } return result; } /** * Perform the operation 'base^exp (mod m)' using the memory-efficient method * * @param base The base of the expression * @param exp The exponent of the expression * @param m The modulus of the expression * @return The result of the expression */ __host__ __device__ uint64_t modExp(uint64_t base, uint64_t exp, uint64_t m){ uint64_t result = 1; while(exp > 0){ if(exp % 2){ result = modulo(result*base, m); } exp = exp >> 1; base = modulo(base*base,m); } return result; } /** * Perform the operation 'base (mod m)' * * @param base The base of the expression * @param m The modulus of the expression * @return The result of the expression */ __host__ __device__ uint64_t modulo(int64_t base, int64_t m){ int64_t result = base % m; return (result >= 0) ? result : result + m; } /** * Print an array of arbitrary length in a readable format * * @param vec The array to be displayed * @param n The length of the array */ void printVec(uint64_t *vec, uint64_t n){ std::cout << "["; for(uint64_t i = 0; i < n; i++){ std::cout << vec[i] << ","; } std::cout << "]" << std::endl; } /** * Generate an array of arbitrary length containing random positive integers * * @param n The length of the array * @param max The maximum value for an array element [Default: RAND_MAX] */ uint64_t *randVec(uint64_t n, uint64_t max){ uint64_t *vec; vec = (uint64_t *)malloc(n*sizeof(uint64_t)); srand(time(0)); for(uint64_t i = 0; i < n; i++){ vec[i] = rand()%(max + 1); } return vec; }
68da2eb9575c97061a28ba0c4ebb3f2dacc1c7eb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Implements the math functions for GPU. #include "caffe2/utils/math.h" #include <cstring> #include <limits> #include <numeric> #include <vector> #include <hipcub/hipcub.hpp> #include <hipcub/hipcub.hpp> #include <thrust/device_vector.h> #include <thrust/functional.h> #include "caffe2/core/context_gpu.h" #include "caffe2/utils/conversions.h" #include "caffe2/utils/fixed_divisor.h" // TODO: Move this to fixed_divisor.h #ifdef __HIP_PLATFORM_HCC__ #define FIXED_DIVISOR int32_t #define FIXED_DIVISOR_DIV(d, n) (n / d) #define FIXED_DIVISOR_MOD(d, n) (n % d) #define FIXED_DIVISOR_DIV_MOD(d, n, q, r) \ do { \ const auto n_copy = n; \ *q = n_copy / d; \ *r = n_copy % d; \ } while (0) #else // __HIP_PLATFORM_HCC__ #define FIXED_DIVISOR FixedDivisor<int32_t> #define FIXED_DIVISOR_DIV(d, n) (d.Div(n)) #define FIXED_DIVISOR_MOD(d, n) (d.Mod(n)) #define FIXED_DIVISOR_DIV_MOD(d, n, q, r) (d.DivMod(n, q, r)) #endif // __HIP_PLATFORM_HCC__ #ifdef __HIP_PLATFORM_HCC__ using CUBLAS_HALF_TYPE = rocblas_half; #else // __HIP_PLATFORM_HCC using CUBLAS_HALF_TYPE = __half; #endif // __HIP_PLATFORM_HCC #include "caffe2/utils/math_utils.h" #if THRUST_VERSION >= 100800 #define THRUST_SUPPORTS_PER_THREAD #endif // THRUST_VERSION >= 100800 namespace caffe2 { namespace math { namespace { #define DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Func, expr) \ template <typename T> \ struct Func##Functor { \ inline __host__ __device__ T \ operator()(const T& lhs, const T& rhs) const { \ return lhs expr rhs; \ } \ }; \ template <> \ struct Func##Functor<at::Half> { \ inline __host__ __device__ at::Half operator()( \ const at::Half& lhs, \ const at::Half& rhs) const { \ return convert::To<float, at::Half>(convert::To<at::Half, float>( \ lhs) expr convert::To<at::Half, float>(rhs)); \ } \ }; DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Add, +) DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Sub, -) DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Mul, *) DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Div, /) #undef DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR template <typename T> __global__ void SinCosCUDAKernel(const int N, const T* X, T* S, T* C) { CUDA_1D_KERNEL_LOOP(i, N) { #if __CUDA_ARCH__ >= 350 c10::hip::compat::sincos(__ldg(X + i), S + i, C + i); #else c10::hip::compat::sincos(X[i], S + i, C + i); #endif } } template <typename TIn, typename TOut, class BinaryOperator> __global__ void SimpleBinaryOpCUDAKernel( const int N, const BinaryOperator op, const TIn* A, const TIn* B, TOut* C) { CUDA_1D_KERNEL_LOOP(i, N) { C[i] = op(A[i], B[i]); } } template <typename TIn, typename TOut, class BinaryOperator, bool broadcast_1st> __global__ void RowwiseBinaryOpCUDAKenel( const int size, const FIXED_DIVISOR cols, const BinaryOperator op, const TIn* A, const TIn* B, TOut* C) { CUDA_1D_KERNEL_LOOP(C_index, size) { const int j = FIXED_DIVISOR_MOD(cols, C_index); const int A_index = broadcast_1st ? j : C_index; const int B_index = broadcast_1st ? C_index : j; C[C_index] = op(A[A_index], B[B_index]); } } template <typename TIn, typename TOut, class BinaryOperator, bool broadcast_1st> __global__ void ColwiseBinaryOpCUDAKenel( const int size, const FIXED_DIVISOR cols, const BinaryOperator op, const TIn* A, const TIn* B, TOut* C) { CUDA_1D_KERNEL_LOOP(C_index, size) { const int i = FIXED_DIVISOR_DIV(cols, C_index); const int A_index = broadcast_1st ? i : C_index; const int B_index = broadcast_1st ? C_index : i; C[C_index] = op(A[A_index], B[B_index]); } } template <typename TIn, typename TOut, class BinaryOperator, int D> __global__ void BroadcastBinaryOpCUDAKernel( const int size, const SimpleArray<int, D> A_strides, const SimpleArray<int, D> B_strides, const SimpleArray<FIXED_DIVISOR, D> C_dims, const BinaryOperator op, const TIn* A, const TIn* B, TOut* C) { CUDA_1D_KERNEL_LOOP(C_index, size) { int A_index = 0; int B_index = 0; int C_index_val = C_index; #pragma unroll for (int i = D - 1; i >= 0; --i) { int d; FIXED_DIVISOR_DIV_MOD(C_dims.data[i], C_index_val, &C_index_val, &d); A_index += d * A_strides.data[i]; B_index += d * B_strides.data[i]; } C[C_index] = op(A[A_index], B[B_index]); } } template <typename TIn, typename TOut, class BinaryOperator> CAFFE2_CUDA_EXPORT void BinaryOpWith2DBroadcasting( const int rows, const int cols, const bool rowwise_broadcast, const bool broadcast_1st, const BinaryOperator& op, const TIn* A, const TIn* B, TOut* C, CUDAContext* context) { if (rows == 0 || cols == 0) { return; } const int size = rows * cols; const FIXED_DIVISOR cols_div(cols); if (rowwise_broadcast) { if (broadcast_1st) { hipLaunchKernelGGL(( RowwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, true>) , dim3(CAFFE_GET_BLOCKS(size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), size, cols_div, op, A, B, C); } else { hipLaunchKernelGGL(( RowwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, false>) , dim3(CAFFE_GET_BLOCKS(size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), size, cols_div, op, A, B, C); } } else { if (broadcast_1st) { hipLaunchKernelGGL(( ColwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, true>) , dim3(CAFFE_GET_BLOCKS(size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), size, cols_div, op, A, B, C); } else { hipLaunchKernelGGL(( ColwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, false>) , dim3(CAFFE_GET_BLOCKS(size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), size, cols_div, op, A, B, C); } } } template <typename TIn, typename TOut, class BinaryOperator, int D> CAFFE2_CUDA_EXPORT void BroadcastBinaryOpImpl( const int* A_dims, const int* B_dims, const int* C_dims, const BinaryOperator& op, const TIn* A, const TIn* B, TOut* C, CUDAContext* context) { SimpleArray<int, D> A_strides_array; SimpleArray<int, D> B_strides_array; SimpleArray<FIXED_DIVISOR, D> C_dims_array; int A_stride = 1; int B_stride = 1; for (int i = D - 1; i >= 0; --i) { if (C_dims[i] == 0) { return; } A_strides_array.data[i] = A_dims[i] == 1 ? 0 : A_stride; B_strides_array.data[i] = B_dims[i] == 1 ? 0 : B_stride; A_stride *= A_dims[i]; B_stride *= B_dims[i]; C_dims_array.data[i] = FIXED_DIVISOR(C_dims[i]); } const int size = std::accumulate(C_dims, C_dims + D, 1, std::multiplies<int>()); hipLaunchKernelGGL(( BroadcastBinaryOpCUDAKernel<TIn, TOut, BinaryOperator, D>) , dim3(CAFFE_GET_BLOCKS(size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), size, A_strides_array, B_strides_array, C_dims_array, op, A, B, C); } template <typename TIn, typename TOut, class BinaryOperator> CAFFE2_CUDA_EXPORT void BroadcastBinaryOp( const int A_ndim, const int* A_dims, const int B_ndim, const int* B_dims, const BinaryOperator& op, const TIn* A, const TIn* B, TOut* C, CUDAContext* context) { const int ndim = ::max(A_ndim, B_ndim); std::vector<int> A_dims_array(ndim); std::vector<int> B_dims_array(ndim); std::vector<int> C_dims_array(ndim); utils::ComputeBroadcastBinaryOpDims( A_ndim, A_dims, B_ndim, B_dims, A_dims_array.data(), B_dims_array.data(), C_dims_array.data()); if (A_dims_array == B_dims_array) { const int size = std::accumulate( C_dims_array.cbegin(), C_dims_array.cend(), 1, std::multiplies<int>()); hipLaunchKernelGGL(( SimpleBinaryOpCUDAKernel<TIn, TOut, BinaryOperator>) , dim3(CAFFE_GET_BLOCKS(size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), size, op, A, B, C); return; } int rows; int cols; bool broadcast_1st; if (utils::IsRowwiseBroadcastBinaryOp( ndim, A_dims_array.data(), B_dims_array.data(), &rows, &cols, &broadcast_1st)) { BinaryOpWith2DBroadcasting<TIn, TOut, BinaryOperator>( rows, cols, true, broadcast_1st, op, A, B, C, context); return; } if (utils::IsColwiseBroadcastBinaryOp( ndim, A_dims_array.data(), B_dims_array.data(), &rows, &cols, &broadcast_1st)) { BinaryOpWith2DBroadcasting<TIn, TOut, BinaryOperator>( rows, cols, false, broadcast_1st, op, A, B, C, context); return; } DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_3( ndim, BroadcastBinaryOpImpl, TIn, TOut, BinaryOperator, A_dims_array.data(), B_dims_array.data(), C_dims_array.data(), op, A, B, C, context); } } // namespace #define DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(T, Func, op) \ __global__ void Func##CUDAKernel(const int N, const T* X, T* Y) { \ CUDA_1D_KERNEL_LOOP(i, N) { \ Y[i] = op(X[i]); \ } \ } \ template <> \ CAFFE2_CUDA_EXPORT void Func<T, CUDAContext>( \ const int N, const T* x, T* y, CUDAContext* context) { \ hipLaunchKernelGGL(( Func##CUDAKernel), \ CAFFE_GET_BLOCKS(N), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream(), N, x, y); \ } DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Exp, expf) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Log, logf) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cos, cosf) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Acos, acosf) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sin, sinf) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Asin, asinf) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Tan, tanf) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Atan, atanf) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sinh, sinhf) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cosh, coshf) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Tanh, tanhf) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Abs, fabsf) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sqr, utils::Square<float>) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sqrt, sqrtf) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Rsqrt, rsqrtf) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cbrt, cbrtf) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Erf, erff) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Erf, erf) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cube, utils::Cube<float>) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Cube, utils::Cube<double>) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION( std::int32_t, Cube, utils::Cube<std::int32_t>) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION( std::int64_t, Cube, utils::Cube<std::int64_t>) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(bool, Not, utils::Not) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Neg, utils::Negate<float>) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Neg, utils::Negate<double>) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION( std::int32_t, Neg, utils::Negate<std::int32_t>) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION( std::int64_t, Neg, utils::Negate<std::int64_t>) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sign, utils::Sign<float>) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Sign, utils::Sign<double>) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION( std::int32_t, Sign, utils::Sign<std::int32_t>) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION( std::int64_t, Sign, utils::Sign<std::int64_t>) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Inv, utils::Inv<float>) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Inv, utils::Inv<double>) #undef DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION #define CAFFE2_SPECIALIZED_CUDA_SINCOS(T) \ template <> \ CAFFE2_CUDA_EXPORT void SinCos<T, CUDAContext>( \ const int N, const T* x, T* ys, T* yc, CUDAContext* context) { \ hipLaunchKernelGGL(( SinCosCUDAKernel), \ CAFFE_GET_BLOCKS(N), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream(), N, x, ys, yc); \ } CAFFE2_SPECIALIZED_CUDA_SINCOS(float) CAFFE2_SPECIALIZED_CUDA_SINCOS(double) #undef CAFFE2_SPECIALIZED_CUDA_SINCOS #define DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(TIn, TOut, Func, Op) \ template <> \ CAFFE2_CUDA_EXPORT void Func<TIn, CUDAContext>( \ const int N, \ const TIn* A, \ const TIn* B, \ TOut* C, \ CUDAContext* context) { \ hipLaunchKernelGGL(( SimpleBinaryOpCUDAKernel<TIn, TOut, Op<TIn>>) \ , dim3(CAFFE_GET_BLOCKS(N)), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream(), N, Op<TIn>(), A, B, C); \ } #define DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(Func, Op) \ DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int32_t, bool, Func, Op) \ DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int64_t, bool, Func, Op) \ DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, bool, Func, Op) \ DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, bool, Func, Op) \ DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, bool, Func, Op) DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(EQ, thrust::equal_to) DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(NE, thrust::not_equal_to) DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(LT, thrust::less) DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(LE, thrust::less_equal) DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(GT, thrust::greater) DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(GE, thrust::greater_equal) #undef DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION #define DEFINE_SIMPLE_CUDA_BINARY_FUNCTION(Func, Op) \ DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int32_t, std::int32_t, Func, Op) \ DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int64_t, std::int64_t, Func, Op) \ DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, float, Func, Op) \ DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, double, Func, Op) \ DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(at::Half, at::Half, Func, Op) DEFINE_SIMPLE_CUDA_BINARY_FUNCTION(Add, AddFunctor) DEFINE_SIMPLE_CUDA_BINARY_FUNCTION(Sub, SubFunctor) DEFINE_SIMPLE_CUDA_BINARY_FUNCTION(Mul, MulFunctor) DEFINE_SIMPLE_CUDA_BINARY_FUNCTION(Div, DivFunctor) #undef DEFINE_SIMPLE_CUDA_BINARY_FUNCTION DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, bool, And, thrust::logical_and) DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, bool, Or, thrust::logical_or) DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, bool, Xor, thrust::bit_xor) #define DEFINE_SIMPLE_CUDA_BITWISE_BINARY_FUNCTION(Func, Op) \ DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, bool, Func, Op) \ DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int32_t, std::int32_t, Func, Op) \ DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int64_t, std::int64_t, Func, Op) DEFINE_SIMPLE_CUDA_BITWISE_BINARY_FUNCTION(BitwiseAnd, thrust::bit_and) DEFINE_SIMPLE_CUDA_BITWISE_BINARY_FUNCTION(BitwiseOr, thrust::bit_or) DEFINE_SIMPLE_CUDA_BITWISE_BINARY_FUNCTION(BitwiseXor, thrust::bit_xor) #undef DEFINE_SIMPLE_CUDA_BITWISE_BINARY_FUNCTION DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION( float, float, ElemwiseMax, thrust::maximum); #undef DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION #define DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(TIn, TOut, Func, Op) \ template <> \ CAFFE2_CUDA_EXPORT void Rowwise##Func<TIn, CUDAContext, true>( \ const int rows, \ const int cols, \ const TIn* A, \ const TIn* B, \ TOut* C, \ CUDAContext* context) { \ if (rows == 0 || cols == 0) { \ return; \ } \ const int size = rows * cols; \ const FIXED_DIVISOR cols_div(cols); \ hipLaunchKernelGGL(( RowwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, true>) \ , dim3(CAFFE_GET_BLOCKS(size)), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream(), size, cols_div, Op<TIn>(), A, B, C); \ } \ template <> \ CAFFE2_CUDA_EXPORT void Rowwise##Func<TIn, CUDAContext, false>( \ const int rows, \ const int cols, \ const TIn* A, \ const TIn* B, \ TOut* C, \ CUDAContext* context) { \ if (rows == 0 || cols == 0) { \ return; \ } \ const int size = rows * cols; \ const FIXED_DIVISOR cols_div(cols); \ hipLaunchKernelGGL(( RowwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, false>) \ , dim3(CAFFE_GET_BLOCKS(size)), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream(), size, cols_div, Op<TIn>(), A, B, C); \ } \ template <> \ CAFFE2_CUDA_EXPORT void Colwise##Func<TIn, CUDAContext, true>( \ const int rows, \ const int cols, \ const TIn* A, \ const TIn* B, \ TOut* C, \ CUDAContext* context) { \ if (rows == 0 || cols == 0) { \ return; \ } \ const int size = rows * cols; \ const FIXED_DIVISOR cols_div(cols); \ hipLaunchKernelGGL(( ColwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, true>) \ , dim3(CAFFE_GET_BLOCKS(size)), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream(), size, cols_div, Op<TIn>(), A, B, C); \ } \ template <> \ CAFFE2_CUDA_EXPORT void Colwise##Func<TIn, CUDAContext, false>( \ const int rows, \ const int cols, \ const TIn* A, \ const TIn* B, \ TOut* C, \ CUDAContext* context) { \ if (rows == 0 || cols == 0) { \ return; \ } \ const int size = rows * cols; \ const FIXED_DIVISOR cols_div(cols); \ hipLaunchKernelGGL(( ColwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, false>) \ , dim3(CAFFE_GET_BLOCKS(size)), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream(), size, cols_div, Op<TIn>(), A, B, C); \ } #define DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(Func, Op) \ DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(std::int32_t, bool, Func, Op) \ DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(std::int64_t, bool, Func, Op) \ DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(float, bool, Func, Op) \ DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(double, bool, Func, Op) \ DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op) DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(EQ, thrust::equal_to) DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(NE, thrust::not_equal_to) DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(LT, thrust::less) DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(LE, thrust::less_equal) DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(GT, thrust::greater) DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(GE, thrust::greater_equal) #undef DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION #define DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Func, Op) \ DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \ std::int32_t, std::int32_t, Func, Op) \ DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \ std::int64_t, std::int64_t, Func, Op) \ DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(float, float, Func, Op) \ DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(double, double, Func, Op) \ DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(at::Half, at::Half, Func, Op) DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Add, AddFunctor) DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Sub, SubFunctor) DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Mul, MulFunctor) DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Div, DivFunctor) #undef DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, And, thrust::logical_and) DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Or, thrust::logical_or) DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Xor, thrust::bit_xor) #define DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(Func, Op) \ DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op) \ DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \ std::int32_t, std::int32_t, Func, Op) \ DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \ std::int64_t, std::int64_t, Func, Op) DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseAnd, thrust::bit_and) DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseOr, thrust::bit_or) DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseXor, thrust::bit_xor) #undef DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION #undef DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION #define DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(TIn, TOut, Func, Op) \ template <> \ CAFFE2_CUDA_EXPORT void Func<TIn, CUDAContext>( \ const int A_ndim, \ const int* A_dims, \ const int B_ndim, \ const int* B_dims, \ const TIn* A, \ const TIn* B, \ TOut* C, \ CUDAContext* context) { \ BroadcastBinaryOp<TIn, TOut, Op<TIn>>( \ A_ndim, A_dims, B_ndim, B_dims, Op<TIn>(), A, B, C, context); \ } #define DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(Func, Op) \ DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(std::int32_t, bool, Func, Op) \ DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(std::int64_t, bool, Func, Op) \ DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(float, bool, Func, Op) \ DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(double, bool, Func, Op) \ DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op) DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(EQ, thrust::equal_to) DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(NE, thrust::not_equal_to) DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(LT, thrust::less) DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(LE, thrust::less_equal) DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(GT, thrust::greater) DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(GE, thrust::greater_equal) #undef DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION #define DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Func, Op) \ DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION( \ std::int32_t, std::int32_t, Func, Op) \ DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION( \ std::int64_t, std::int64_t, Func, Op) \ DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(float, float, Func, Op) \ DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(double, double, Func, Op) \ DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(at::Half, at::Half, Func, Op) DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Add, AddFunctor) DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Sub, SubFunctor) DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Mul, MulFunctor) DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Div, DivFunctor) #undef DEFINE_BROADCAST_CUDA_BINARY_FUNCTION DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, And, thrust::logical_and) DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Or, thrust::logical_or) DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Xor, thrust::bit_xor) #define DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(Func, Op) \ DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op) \ DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION( \ std::int32_t, std::int32_t, Func, Op) \ DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(std::int64_t, std::int64_t, Func, Op) DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseAnd, thrust::bit_and) DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseOr, thrust::bit_or) DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseXor, thrust::bit_xor) #undef DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION #undef DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION #define DELEGATE_REDUCTION_FUNCTION(T, Funcname, func) \ template <> \ CAFFE2_CUDA_EXPORT void Funcname<T, CUDAContext>( \ const int N, \ const T* src, \ T* dst, \ Tensor* scratch_ptr, \ CUDAContext* context) { \ size_t memRequired = 0; \ hipcub::DeviceReduce::func( \ nullptr, memRequired, src, dst, N, context->cuda_stream()); \ auto buffer_size = \ static_cast<int64_t>((memRequired + sizeof(T) - 1) / sizeof(T)); \ scratch_ptr->Resize(std::vector<int64_t>{buffer_size}); \ hipcub::DeviceReduce::func( \ static_cast<void*>(scratch_ptr->mutable_data<T>()), \ memRequired, \ src, \ dst, \ N, \ context->cuda_stream()); \ } DELEGATE_REDUCTION_FUNCTION(float, ReduceMin, Min) DELEGATE_REDUCTION_FUNCTION(float, ReduceMax, Max) DELEGATE_REDUCTION_FUNCTION(int32_t, ReduceMax, Max) DELEGATE_REDUCTION_FUNCTION(int64_t, ReduceMax, Max) #undef DELEGATE_REDUCTION_FUNCTION // Caffe2 gemm provides a simpler interface to the gemm functions, with the // limitation that the data has to be contiguous in memory. template <> CAFFE2_CUDA_EXPORT void Gemm<float, CUDAContext>( const CBLAS_TRANSPOSE trans_A, const CBLAS_TRANSPOSE trans_B, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C, CUDAContext* context, TensorProto::DataType math_type) { // Note that cublas follows fortran order, so the order is different from // the cblas convention. const int lda = (trans_A == CblasNoTrans) ? K : M; const int ldb = (trans_B == CblasNoTrans) ? N : K; const hipblasOperation_t cu_trans_A = (trans_A == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; const hipblasOperation_t cu_trans_B = (trans_B == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; CUBLAS_ENFORCE( hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST)); CUBLAS_ENFORCE(hipblasSgemm( context->cublas_handle(), cu_trans_B, cu_trans_A, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> CAFFE2_CUDA_EXPORT void Gemm<at::Half, CUDAContext>( const CBLAS_TRANSPOSE trans_A, const CBLAS_TRANSPOSE trans_B, const int M, const int N, const int K, const float alpha, const at::Half* A, const at::Half* B, const float beta, at::Half* C, CUDAContext* context, TensorProto::DataType math_type) { // Note that cublas follows fortran order, so the order is different from // the cblas convention. const int lda = (trans_A == CblasNoTrans) ? K : M; const int ldb = (trans_B == CblasNoTrans) ? N : K; const hipblasOperation_t cu_trans_A = (trans_A == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; const hipblasOperation_t cu_trans_B = (trans_B == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; if (math_type == TensorProto_DataType_FLOAT) { CUBLAS_ENFORCE(hipblasSetPointerMode( context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST)); #ifdef __HIP_PLATFORM_HCC__ // rocblas doesn't support cublasSgemmEx type API yet. // It has more general rocblas_gemm_ex API which is more close to // hipblasGemmEx rocblas_gemm_ex does D = alpha*op( A )*op( B ) + beta*C, // whereas cublasgemmEx does C = alpha*op( A )*op( B ) + beta*C ROCBLAS_ENFORCE(rocblas_gemm_ex( context->rocblashandle(), cu_trans_B, cu_trans_A, N, M, K, &alpha, B, rocblas_datatype_f16_r, ldb, A, rocblas_datatype_f16_r, lda, &beta, C, rocblas_datatype_f16_r, N, C, // D rocblas_datatype_f16_r, // D type N, // ldd rocblas_datatype_f32_r, // compute type rocblas_gemm_algo_standard, // rocblas_gemm_algo 0, // solution index, reserved for future use 0, // flags, reserved for future use NULL, // size of workspace NULL)); // workspace #else CUBLAS_ENFORCE(cublasSgemmEx( context->cublas_handle(), cu_trans_B, cu_trans_A, N, M, K, &alpha, B, HIP_R_16F, ldb, A, HIP_R_16F, lda, &beta, C, HIP_R_16F, N)); #endif // __HIP_PLATFORM_HCC__ } else if (math_type == TensorProto_DataType_FLOAT16) { // convert alpha, beta from float -> __half const __half alpha_fp16 = at::Half(alpha); const __half beta_fp16 = at::Half(beta); // call hipblasHgemm CUBLAS_ENFORCE(hipblasSetPointerMode( context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST)); CUBLAS_ENFORCE(hipblasHgemm( context->cublas_handle(), cu_trans_B, cu_trans_A, N, M, K, reinterpret_cast<const CUBLAS_HALF_TYPE*>(&alpha_fp16), reinterpret_cast<const CUBLAS_HALF_TYPE*>(B), ldb, reinterpret_cast<const CUBLAS_HALF_TYPE*>(A), lda, reinterpret_cast<const CUBLAS_HALF_TYPE*>(&beta_fp16), reinterpret_cast<CUBLAS_HALF_TYPE*>(C), N)); } else { // fail CAFFE_THROW("Unsupported math type"); } } template <> CAFFE2_CUDA_EXPORT void BiasCHW<float, CUDAContext>( const float* bias, const float* bias_multiplier, const int bias_channels, const int image_size, float* image, CUDAContext* context) { Gemm<float, CUDAContext>( CblasNoTrans, CblasNoTrans, bias_channels, image_size, 1, 1, bias, bias_multiplier, 1, image, context); } template <> CAFFE2_CUDA_EXPORT void GemmBatched<float, CUDAContext>( const CBLAS_TRANSPOSE trans_A, const CBLAS_TRANSPOSE trans_B, const int batch_size, const int M, const int N, const int K, const float alpha, const float** A, const float** B, const float beta, float** C, CUDAContext* context, TensorProto::DataType math_type) { #if __CUDACC_VER_MAJOR__ < 8 || defined(__HIP_PLATFORM_HCC__) // loop over matrices in the batch for (int i = 0; i < batch_size; ++i) { Gemm<float, CUDAContext>( trans_A, trans_B, M, N, K, alpha, A[i], B[i], beta, C[i], context, math_type); } #else // Note that cublas follows fortran order, so the order is different from // the cblas convention. const int lda = (trans_A == CblasNoTrans) ? K : M; const int ldb = (trans_B == CblasNoTrans) ? N : K; const int ldc = N; const hipblasOperation_t cu_trans_A = (trans_A == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; const hipblasOperation_t cu_trans_B = (trans_B == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; thrust::device_vector<const float*> A_device(A, A + batch_size); thrust::device_vector<const float*> B_device(B, B + batch_size); thrust::device_vector<float*> C_device(C, C + batch_size); CUBLAS_ENFORCE( hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST)); CUBLAS_ENFORCE(hipblasSgemmBatched( context->cublas_handle(), cu_trans_B, cu_trans_A, N, M, K, &alpha, B_device.data().get(), ldb, A_device.data().get(), lda, &beta, C_device.data().get(), ldc, batch_size)); #endif } template <> CAFFE2_CUDA_EXPORT void GemmStridedBatched<float, CUDAContext>( const CBLAS_TRANSPOSE trans_A, const CBLAS_TRANSPOSE trans_B, const int batch_size, const int M, const int N, const int K, const float alpha, const float* A, const int A_stride, const float* B, const int B_stride, const float beta, float* C, const int C_stride, CUDAContext* context, TensorProto::DataType math_type) { #if __CUDACC_VER_MAJOR__ < 8 && !defined(__HIP_PLATFORM_HCC__) // loop over matrices in the batch for (int i = 0; i < batch_size; ++i) { Gemm<float, CUDAContext>( trans_A, trans_B, M, N, K, alpha, A, B, beta, C, context, math_type); A += A_stride; B += B_stride; C += C_stride; } #else // Note that cublas follows fortran order, so the order is different from // the cblas convention. const int lda = (trans_A == CblasNoTrans) ? K : M; const int ldb = (trans_B == CblasNoTrans) ? N : K; const int ldc = N; const hipblasOperation_t cu_trans_A = (trans_A == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; const hipblasOperation_t cu_trans_B = (trans_B == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; CUBLAS_ENFORCE( hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST)); CUBLAS_ENFORCE(hipblasSgemmStridedBatched( context->cublas_handle(), cu_trans_B, cu_trans_A, N, M, K, &alpha, B, ldb, B_stride, A, lda, A_stride, &beta, C, ldc, C_stride, batch_size)); #endif } template <> CAFFE2_CUDA_EXPORT void GemmBatched<at::Half, CUDAContext>( const CBLAS_TRANSPOSE trans_A, const CBLAS_TRANSPOSE trans_B, const int batch_size, const int M, const int N, const int K, const float alpha, const at::Half** A, const at::Half** B, const float beta, at::Half** C, CUDAContext* context, TensorProto::DataType math_type) { #if __CUDACC_VER_MAJOR__ < 9 // loop over matrices in the batch for (int i = 0; i < batch_size; ++i) { Gemm<at::Half, CUDAContext>( trans_A, trans_B, M, N, K, alpha, A[i], B[i], beta, C[i], context, math_type); } #else // Note that cublas follows fortran order, so the order is different from // the cblas convention. const int lda = (trans_A == CblasNoTrans) ? K : M; const int ldb = (trans_B == CblasNoTrans) ? N : K; const int ldc = N; const hipblasOperation_t cu_trans_A = (trans_A == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; const hipblasOperation_t cu_trans_B = (trans_B == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; if (math_type == TensorProto_DataType_FLOAT) { #if TORCH_HIP_VERSION < 9010 // loop over matrices in the batch for (int i = 0; i < batch_size; ++i) { Gemm<at::Half, CUDAContext>( trans_A, trans_B, M, N, K, alpha, A[i], B[i], beta, C[i], context, math_type); } #else thrust::device_vector<const void*> A_device(A, A + batch_size); thrust::device_vector<const void*> B_device(B, B + batch_size); thrust::device_vector<void*> C_device(C, C + batch_size); CUBLAS_ENFORCE(hipblasSetPointerMode( context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST)); CUBLAS_ENFORCE(hipblasGemmBatchedEx( context->cublas_handle(), cu_trans_B, cu_trans_A, N, M, K, &alpha, B_device.data().get(), HIP_R_16F, ldb, A_device.data().get(), HIP_R_16F, lda, &beta, C_device.data().get(), HIP_R_16F, ldc, batch_size, HIP_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); #endif } else if (math_type == TensorProto_DataType_FLOAT16) { // Convert alpha, beta from float -> __half const __half alpha_fp16 = at::Half(alpha); const __half beta_fp16 = at::Half(beta); std::vector<const __half*> A_array(batch_size); std::vector<const __half*> B_array(batch_size); std::vector<__half*> C_array(batch_size); for (int i = 0; i < batch_size; ++i) { A_array[i] = reinterpret_cast<const __half*>(A[i]); B_array[i] = reinterpret_cast<const __half*>(B[i]); C_array[i] = reinterpret_cast<__half*>(C[i]); } thrust::device_vector<const __half*> A_device( A_array.cbegin(), A_array.cend()); thrust::device_vector<const __half*> B_device( B_array.cbegin(), B_array.cend()); thrust::device_vector<__half*> C_device(C_array.cbegin(), C_array.cend()); CUBLAS_ENFORCE(hipblasSetPointerMode( context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST)); CUBLAS_ENFORCE(hipblasHgemmBatched( context->cublas_handle(), cu_trans_B, cu_trans_A, N, M, K, &alpha_fp16, B_device.data().get(), ldb, A_device.data().get(), lda, &beta_fp16, C_device.data().get(), ldc, batch_size)); } else { CAFFE_THROW("Unsupported math type"); } #endif } template <> CAFFE2_CUDA_EXPORT void GemmStridedBatched<at::Half, CUDAContext>( const CBLAS_TRANSPOSE trans_A, const CBLAS_TRANSPOSE trans_B, const int batch_size, const int M, const int N, const int K, const float alpha, const at::Half* A, const int A_stride, const at::Half* B, const int B_stride, const float beta, at::Half* C, const int C_stride, CUDAContext* context, TensorProto::DataType math_type) { #if __CUDACC_VER_MAJOR__ < 8 && !defined(__HIP_PLATFORM_HCC__) // loop over matrices in the batch for (int i = 0; i < batch_size; ++i) { Gemm<at::Half, CUDAContext>( trans_A, trans_B, M, N, K, alpha, A, B, beta, C, context, math_type); A += A_stride; B += B_stride; C += C_stride; } #else // Note that cublas follows fortran order, so the order is different from // the cblas convention. const int lda = (trans_A == CblasNoTrans) ? K : M; const int ldb = (trans_B == CblasNoTrans) ? N : K; const int ldc = N; const hipblasOperation_t cu_trans_A = (trans_A == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; const hipblasOperation_t cu_trans_B = (trans_B == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; if (math_type == TensorProto_DataType_FLOAT) { #if TORCH_HIP_VERSION < 9010 && !defined(__HIP_PLATFORM_HCC__) // loop over matrices in the batch for (int i = 0; i < batch_size; ++i) { Gemm<at::Half, CUDAContext>( trans_A, trans_B, M, N, K, alpha, A, B, beta, C, context, math_type); A += A_stride; B += B_stride; C += C_stride; } #else CUBLAS_ENFORCE(hipblasSetPointerMode( context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST)); #ifdef __HIP_PLATFORM_HCC__ // D[i*stride_d] = alpha*op(A[i*stride_a])*op(B[i*stride_b]) + // beta*C[i*stride_c], for i in [0,batch_count-1] ROCBLAS_ENFORCE(rocblas_gemm_strided_batched_ex( context->rocblashandle(), cu_trans_B, cu_trans_A, N, M, K, &alpha, B, rocblas_datatype_f16_r, ldb, B_stride, A, rocblas_datatype_f16_r, lda, A_stride, &beta, C, rocblas_datatype_f16_r, ldc, C_stride, C, // D rocblas_datatype_f16_r, // D type ldc, // ldd C_stride, // D stride batch_size, rocblas_datatype_f32_r, // compute type rocblas_gemm_algo_standard, // rocblas_gemm_algo 0, // solution index, reserved for future use 0, // flags, reserved for future use NULL, // size of workspace NULL)); // workspace #else CUBLAS_ENFORCE(hipblasGemmStridedBatchedEx( context->cublas_handle(), cu_trans_B, cu_trans_A, N, M, K, &alpha, B, HIP_R_16F, ldb, B_stride, A, HIP_R_16F, lda, A_stride, &beta, C, HIP_R_16F, ldc, C_stride, batch_size, HIP_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); #endif // __HIP_PLATFORM_HCC__ #endif } else if (math_type == TensorProto_DataType_FLOAT16) { // Convert alpha, beta from float -> __half const __half alpha_fp16 = at::Half(alpha); const __half beta_fp16 = at::Half(beta); CUBLAS_ENFORCE(hipblasSetPointerMode( context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST)); CUBLAS_ENFORCE(hipblasHgemmStridedBatched( context->cublas_handle(), cu_trans_B, cu_trans_A, N, M, K, reinterpret_cast<const CUBLAS_HALF_TYPE*>(&alpha_fp16), reinterpret_cast<const CUBLAS_HALF_TYPE*>(B), ldb, B_stride, reinterpret_cast<const CUBLAS_HALF_TYPE*>(A), lda, A_stride, reinterpret_cast<const CUBLAS_HALF_TYPE*>(&beta_fp16), reinterpret_cast<CUBLAS_HALF_TYPE*>(C), ldc, C_stride, batch_size)); } else { CAFFE_THROW("Unsupported math type"); } #endif } #if TORCH_HIP_VERSION >= 9000 // No change, but required. Defer to default CUDA engine template <> CAFFE2_CUDA_EXPORT void Gemm<float, CUDAContext, TensorCoreEngine>( const CBLAS_TRANSPOSE trans_A, const CBLAS_TRANSPOSE trans_B, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C, CUDAContext* context, TensorProto::DataType math_type) { return Gemm<float, CUDAContext>( trans_A, trans_B, M, N, K, alpha, A, B, beta, C, context, math_type); } template <> CAFFE2_CUDA_EXPORT void Gemm<at::Half, CUDAContext, TensorCoreEngine>( const CBLAS_TRANSPOSE trans_A, const CBLAS_TRANSPOSE trans_B, const int M, const int N, const int K, const float alpha, const at::Half* A, const at::Half* B, const float beta, at::Half* C, CUDAContext* context, TensorProto::DataType math_type) { // Note that cublas follows fortran order, so the order is different from // the cblas convention. const int lda = (trans_A == CblasNoTrans) ? K : M; const int ldb = (trans_B == CblasNoTrans) ? N : K; const hipblasOperation_t cu_trans_A = (trans_A == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; const hipblasOperation_t cu_trans_B = (trans_B == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; // enable TensorCore for this call on this handle if (TensorCoreAvailable()) { CUBLAS_ENFORCE( cublasSetMathMode(context->cublas_handle(), CUBLAS_TENSOR_OP_MATH)); } CUBLAS_ENFORCE( hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST)); CUBLAS_ENFORCE(hipblasGemmEx( context->cublas_handle(), cu_trans_B, cu_trans_A, N, M, K, &alpha, B, HIP_R_16F, ldb, A, HIP_R_16F, lda, &beta, C, HIP_R_16F, N, HIP_R_32F, CUBLAS_GEMM_DFALT_TENSOR_OP)); // Now disable TensorCore math for subsequent calls to this handle if (TensorCoreAvailable()) { CUBLAS_ENFORCE( cublasSetMathMode(context->cublas_handle(), CUBLAS_DEFAULT_MATH)); } } template <> CAFFE2_CUDA_EXPORT void GemmStridedBatched<float, CUDAContext, TensorCoreEngine>( const CBLAS_TRANSPOSE trans_A, const CBLAS_TRANSPOSE trans_B, const int batch_size, const int M, const int N, const int K, const float alpha, const float* A, const int A_stride, const float* B, const int B_stride, const float beta, float* C, const int C_stride, CUDAContext* context, TensorProto::DataType math_type) { return GemmStridedBatched<float, CUDAContext, DefaultEngine>( trans_A, trans_B, batch_size, M, N, K, alpha, A, A_stride, B, B_stride, beta, C, C_stride, context, math_type); } template <> CAFFE2_CUDA_EXPORT void GemmStridedBatched<at::Half, CUDAContext, TensorCoreEngine>( const CBLAS_TRANSPOSE trans_A, const CBLAS_TRANSPOSE trans_B, const int batch_size, const int M, const int N, const int K, const float alpha, const at::Half* A, const int A_stride, const at::Half* B, const int B_stride, const float beta, at::Half* C, const int C_stride, CUDAContext* context, TensorProto::DataType math_type) { return GemmStridedBatched<at::Half, CUDAContext, DefaultEngine>( trans_A, trans_B, batch_size, M, N, K, alpha, A, A_stride, B, B_stride, beta, C, C_stride, context, math_type); } #endif // TORCH_HIP_VERSION >= 9000 template <> CAFFE2_CUDA_EXPORT void GemmEx<float, CUDAContext>( const CBLAS_TRANSPOSE trans_A, const CBLAS_TRANSPOSE trans_B, const int M, const int N, const int K, const float alpha, const float* A, const int lda, const float* B, const int ldb, const float beta, float* C, const int ldc, CUDAContext* context) { // Note that cublas follows fortran order, so the order is different from // the cblas convention. const hipblasOperation_t cu_trans_A = (trans_A == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; const hipblasOperation_t cu_trans_B = (trans_B == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; CUBLAS_ENFORCE( hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST)); CUBLAS_ENFORCE(hipblasSgemm( context->cublas_handle(), cu_trans_B, cu_trans_A, N, M, K, &alpha, B, ldb, A, lda, &beta, C, ldc)); } template <> CAFFE2_CUDA_EXPORT void Gemv<float, CUDAContext>( const CBLAS_TRANSPOSE trans_A, const int M, const int N, const float alpha, const float* A, const float* x, const float beta, float* y, CUDAContext* context, TensorProto::DataType math_type) { const hipblasOperation_t cu_trans_A = (trans_A == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N; CUBLAS_ENFORCE( hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST)); CUBLAS_ENFORCE(hipblasSgemv( context->cublas_handle(), cu_trans_A, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } // Batched Add variants namespace { template <typename T> __global__ void AddStripedBatchKernel( const int N, const T* first, T* Y, const int stripe, const int batch) { for (int j = 0; j < batch; j++) { const T* x = first + j * stripe; CUDA_1D_KERNEL_LOOP(i, N) { float tmpY = convert::To<T, float>(Y[i]); tmpY += convert::To<T, float>(x[i]); Y[i] = convert::To<float, T>(tmpY); } } } } // namespace #define CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(T) \ template <> \ CAFFE2_CUDA_EXPORT void AddStripedBatch<T, CUDAContext>( \ const int N, \ const T* first, \ T* Y, \ const int stripe, \ const int batch, \ CUDAContext* context) { \ hipLaunchKernelGGL(( AddStripedBatchKernel<T>) \ , dim3(CAFFE_GET_BLOCKS(N)), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream(), N, first, Y, stripe, batch); \ } CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(float); CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(at::Half); #undef CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH template <> CAFFE2_CUDA_EXPORT void Gemv<at::Half, CUDAContext>( const CBLAS_TRANSPOSE trans_A, const int M, const int N, const float alpha, const at::Half* A, const at::Half* x, const float beta, at::Half* y, CUDAContext* context, TensorProto::DataType math_type) { const hipblasOperation_t cu_trans_A = (trans_A == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N; // sort out what we need to call cublasSgemmEx / hipblasHgemm const int m = (cu_trans_A == HIPBLAS_OP_N) ? N : M; const int k = (cu_trans_A == HIPBLAS_OP_N) ? M : N; const int lda = (cu_trans_A == HIPBLAS_OP_N) ? m : k; const int ldc = m; if (math_type == TensorProto_DataType_FLOAT) { CUBLAS_ENFORCE(hipblasSetPointerMode( context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST)); #ifdef __HIP_PLATFORM_HCC__ // rocblas doesn't support cublasSgemmEx type API yet. // It has more general rocblas_gemm_ex API which is more close to // hipblasGemmEx rocblas_gemm_ex does D = alpha*op( A )*op( B ) + beta*C, // whereas cublasgemmEx does C = alpha*op( A )*op( B ) + beta*C ROCBLAS_ENFORCE(rocblas_gemm_ex( context->rocblashandle(), cu_trans_A, rocblas_operation_none, m, 1, k, &alpha, A, rocblas_datatype_f16_r, lda, x, rocblas_datatype_f16_r, k, &beta, y, rocblas_datatype_f16_r, ldc, y, // D rocblas_datatype_f16_r, // D type ldc, // ldd rocblas_datatype_f32_r, // compute type rocblas_gemm_algo_standard, // rocblas_gemm_algo 0, // solution index, reserved for future use 0, // flags, reserved for future use NULL, // size of workspace NULL)); // workspace #else CUBLAS_ENFORCE(cublasSgemmEx( context->cublas_handle(), cu_trans_A, HIPBLAS_OP_N, m, 1, k, &alpha, A, HIP_R_16F, lda, x, HIP_R_16F, k, &beta, y, HIP_R_16F, ldc)); #endif // __HIP_PLATFORM_HCC__ } else if (math_type == TensorProto_DataType_FLOAT16) { const __half alpha_fp16 = at::Half(alpha); const __half beta_fp16 = at::Half(beta); CUBLAS_ENFORCE(hipblasSetPointerMode( context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST)); CUBLAS_ENFORCE(hipblasHgemm( context->cublas_handle(), cu_trans_A, HIPBLAS_OP_N, m, 1, k, reinterpret_cast<const CUBLAS_HALF_TYPE*>(&alpha_fp16), reinterpret_cast<const CUBLAS_HALF_TYPE*>(A), lda, reinterpret_cast<const CUBLAS_HALF_TYPE*>(x), k, reinterpret_cast<const CUBLAS_HALF_TYPE*>(&beta_fp16), reinterpret_cast<CUBLAS_HALF_TYPE*>(y), ldc)); } else { // fail CAFFE_THROW("Unsupported math type"); } } namespace { template <typename T> __global__ void SetKernel(const int N, const T alpha, T* Y) { CUDA_1D_KERNEL_LOOP(i, N) { Y[i] = alpha; } } } // namespace #define CAFFE2_SPECIALIZED_CUDA_SET(T) \ template <> \ CAFFE2_CUDA_API void Set<T, CUDAContext>( \ const size_t N, const T alpha, T* Y, CUDAContext* context) { \ if (N == 0) { \ return; \ } \ if (alpha == T(0)) { \ hipMemsetAsync(Y, 0, sizeof(T) * N, context->cuda_stream()); \ } else { \ hipLaunchKernelGGL(( SetKernel<T>) \ , dim3(CAFFE_GET_BLOCKS(N)), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream(), N, alpha, Y); \ } \ } CAFFE2_SPECIALIZED_CUDA_SET(float); CAFFE2_SPECIALIZED_CUDA_SET(double); CAFFE2_SPECIALIZED_CUDA_SET(bool); CAFFE2_SPECIALIZED_CUDA_SET(int8_t); CAFFE2_SPECIALIZED_CUDA_SET(int16_t); CAFFE2_SPECIALIZED_CUDA_SET(int); CAFFE2_SPECIALIZED_CUDA_SET(int64_t); CAFFE2_SPECIALIZED_CUDA_SET(char); CAFFE2_SPECIALIZED_CUDA_SET(uint8_t); CAFFE2_SPECIALIZED_CUDA_SET(uint16_t); #undef CAFFE2_SPECIALIZED_CUDA_SET template <> CAFFE2_CUDA_EXPORT void Set<at::Half, CUDAContext>( const size_t N, const at::Half alpha, at::Half* Y, CUDAContext* context) { if (N > 0) { hipLaunchKernelGGL(( SetKernel<at::Half>) , dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), N, alpha, Y); } } namespace { template <typename T> __global__ void UniformShift(const size_t N, const float min, const float max, T* x) { float scale = max - min; CUDA_1D_KERNEL_LOOP(i, N) { x[i] = convert::To<float, T>(convert::To<T, float>(x[i]) * scale + min); } } __global__ void UniformIntFit(const size_t N, const int min, const int max, unsigned int* x) { int* x_int = reinterpret_cast<int*>(x); int range = (max - min + 1); CUDA_1D_KERNEL_LOOP(i, N) { x_int[i] = min + static_cast<int>(x[i] % range); } } } // namespace template <> CAFFE2_CUDA_EXPORT void RandUniform<float, CUDAContext>( const size_t n, const float min, const float max, float* r, CUDAContext* context) { CURAND_ENFORCE(hiprandGenerateUniform(context->curand_generator(), r, n)); hipLaunchKernelGGL(( UniformShift<float>) , dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), n, min, max, r); } template <> CAFFE2_CUDA_EXPORT void RandUniform<double, CUDAContext>( const size_t n, const double min, const double max, double* r, CUDAContext* context) { CURAND_ENFORCE( hiprandGenerateUniformDouble(context->curand_generator(), r, n)); hipLaunchKernelGGL(( UniformShift<double>) , dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), n, min, max, r); } template <> CAFFE2_CUDA_EXPORT void RandUniform<int, CUDAContext>( const size_t n, const int min, const int max, int* r, CUDAContext* context) { CURAND_ENFORCE(hiprandGenerate( context->curand_generator(), reinterpret_cast<unsigned int*>(r), n)); hipLaunchKernelGGL(( UniformIntFit), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), n, min, max, reinterpret_cast<unsigned int*>(r)); } template <typename T> size_t HandleOddLengthRandGaussian( const size_t n, const T mean, const T std, T* r, CUDAContext* context) { if (n % 2 == 1) { std::default_random_engine generator; std::normal_distribution<T> distribution(mean, std); const T random_value = distribution(generator); Set<T, CUDAContext>(1, random_value, r + (n - 1), context); return n - 1; } return n; } template <> CAFFE2_CUDA_EXPORT void RandGaussian<float, CUDAContext>( const size_t n, const float mean, const float std, float* r, CUDAContext* context) { // If n is odd, we add a random Gaussian value at the end manually // and generate n-1 random values using hiprandGenerateNormal. // hiprandGenerateNormal requires n to be even. const size_t even_n = HandleOddLengthRandGaussian<float>(n, mean, std, r, context); CURAND_ENFORCE( hiprandGenerateNormal(context->curand_generator(), r, even_n, mean, std)); } template <> CAFFE2_CUDA_EXPORT void RandGaussian<double, CUDAContext>( const size_t n, const double mean, const double std, double* r, CUDAContext* context) { const size_t even_n = HandleOddLengthRandGaussian<double>(n, mean, std, r, context); CURAND_ENFORCE(hiprandGenerateNormalDouble( context->curand_generator(), r, even_n, mean, std)); } template <> CAFFE2_CUDA_EXPORT void Dot<float, CUDAContext>( const int n, const float* a, const float* b, float* y, CUDAContext* context) { CUBLAS_ENFORCE(hipblasSetPointerMode( context->cublas_handle(), HIPBLAS_POINTER_MODE_DEVICE)); CUBLAS_ENFORCE(hipblasSdot(context->cublas_handle(), n, a, 1, b, 1, y)); } template <> CAFFE2_CUDA_EXPORT void Dot<at::Half, CUDAContext>( const int n, const at::Half* a, const at::Half* b, at::Half* y, CUDAContext* context) { #if defined(__HIP_PLATFORM_HCC__) CAFFE_THROW("HIP currently does not support FP16 completely yet."); #else // execute with 32-bit math CUBLAS_ENFORCE(hipblasSetPointerMode( context->cublas_handle(), HIPBLAS_POINTER_MODE_DEVICE)); CUBLAS_ENFORCE(hipblasDotEx_v2( context->cublas_handle(), n, a, HIP_R_16F, 1, b, HIP_R_16F, 1, y, HIP_R_16F, HIP_R_32F)); #endif } // A previous version of caffe2 used Thrust but it turns out that thrust // reduction has an implicit scratch space allocation and deallocation, which // may interfere with NCCL and create a deadlock. Hence we are using a custom // reduction here. #define SUM_KERNEL_NTHREADS 128 template <typename T> __global__ void SumKernel(const int N, const T* X, T* Y, bool square) { const int idx = threadIdx.x; __shared__ float reduction_buffer[SUM_KERNEL_NTHREADS]; reduction_buffer[idx] = 0; // A multilevel reduction. // N -> 128 if (!square) { for (int i = idx; i < N; i += SUM_KERNEL_NTHREADS) { reduction_buffer[idx] += convert::To<T, float>(X[i]); } } else { for (int i = idx; i < N; i += SUM_KERNEL_NTHREADS) { float Xi = convert::To<T, float>(X[i]); reduction_buffer[idx] += Xi * Xi; } } __syncthreads(); // 128 -> 32 if (idx < 32) { reduction_buffer[idx] += reduction_buffer[idx + 32] + reduction_buffer[idx + 64] + reduction_buffer[idx + 96]; } __syncthreads(); // 32 -> 1 if (idx == 0) { float tmp = 0; for (int i = 0; i < 32; ++i) { tmp += reduction_buffer[i]; } *Y = convert::To<float, T>(tmp); } } // According to the benchmarks script // caffe2/caffe2/experiments/python/device_reduce_sum_bench.py, // device reduce is slower for N <= 10000. #define DEVICE_REDUCE_SIZE_THRESHOLD 10000 namespace { template <typename T> __global__ void SumConvertKernel(float* sum, T* dest) { *dest = convert::To<float, T>(*sum); } template <typename T, typename IterT> CAFFE2_CUDA_EXPORT void SumGenericIter( const int N, IterT it, T*& dest, CUDAContext* context, Tensor* scratch_ptr) { size_t memRequired = 0; hipcub::DeviceReduce::Sum( nullptr, memRequired, it, dest, N, context->cuda_stream()); auto buffer_size = static_cast<int64_t>((memRequired + sizeof(T) - 1) / sizeof(T)); if (!dest) { // allocate one more T at the end of scratch for dest scratch_ptr->Resize(std::vector<int64_t>{buffer_size + 1}); dest = scratch_ptr->template mutable_data<T>() + buffer_size; } else { scratch_ptr->Resize(std::vector<int64_t>{buffer_size}); } hipcub::DeviceReduce::Sum( static_cast<void*>(scratch_ptr->template mutable_data<T>()), memRequired, it, dest, N, context->cuda_stream()); } } // namespace template <> CAFFE2_CUDA_EXPORT void Sum<float, CUDAContext>( const int N, const float* x, float* y, CUDAContext* context, Tensor* scratch_ptr) { if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { SumGenericIter<float>(N, x, y, context, scratch_ptr); } else { hipLaunchKernelGGL(( SumKernel), dim3(1), dim3(SUM_KERNEL_NTHREADS), 0, context->cuda_stream(), N, x, y, false); } } template <> CAFFE2_CUDA_EXPORT void Sum<int32_t, CUDAContext>( const int N, const int32_t* x, int32_t* y, CUDAContext* context, Tensor* scratch_ptr) { if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { SumGenericIter<int32_t>(N, x, y, context, scratch_ptr); } else { hipLaunchKernelGGL(( SumKernel), dim3(1), dim3(SUM_KERNEL_NTHREADS), 0, context->cuda_stream(), N, x, y, false); } } namespace { template <typename T> struct FloatTransform { inline __host__ __device__ float operator()(const T v) const { return convert::To<T, float>(v); } }; } // namespace #define CAFFE2_MATH_SUM_FUNC(T) \ template <> \ CAFFE2_CUDA_EXPORT void Sum<T, CUDAContext>( \ const int N, \ const T* x, \ T* y, \ CUDAContext* context, \ Tensor* scratch_ptr) { \ if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { \ FloatTransform<T> transform; \ hipcub::TransformInputIterator<float, FloatTransform<T>, const T*> it( \ x, transform); \ float* sum = nullptr; \ SumGenericIter<float>(N, it, sum, context, scratch_ptr); \ hipLaunchKernelGGL(( SumConvertKernel), dim3(1), dim3(1), 0, context->cuda_stream(), sum, y); \ } else { \ hipLaunchKernelGGL(( SumKernel), dim3(1), dim3(SUM_KERNEL_NTHREADS), 0, context->cuda_stream(), \ N, x, y, false); \ } \ } CAFFE2_MATH_SUM_FUNC(at::Half) #undef CAFFE2_MATH_SUM_FUNC namespace { template <typename T> struct SqrTransform { inline __host__ __device__ T operator()(const T v) const { return v * v; } }; } // namespace template <> CAFFE2_CUDA_EXPORT void SumSqr<float, CUDAContext>( const int N, const float* x, float* y, CUDAContext* context, Tensor* scratch_ptr) { if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { SqrTransform<float> transform; hipcub::TransformInputIterator<float, SqrTransform<float>, const float*> it( x, transform); SumGenericIter<float>(N, it, y, context, scratch_ptr); } else { hipLaunchKernelGGL(( SumKernel), dim3(1), dim3(SUM_KERNEL_NTHREADS), 0, context->cuda_stream(), N, x, y, true); } } #define CAFFE2_MATH_SUMSQR_FUNC(T) \ template <> \ CAFFE2_CUDA_EXPORT void SumSqr<T, CUDAContext>( \ const int N, \ const T* x, \ T* y, \ CUDAContext* context, \ Tensor* scratch_ptr) { \ if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { \ FloatTransform<T> float_transform; \ hipcub::TransformInputIterator<float, FloatTransform<T>, const T*> \ float_it(x, float_transform); \ SqrTransform<float> sqr_transform; \ hipcub::TransformInputIterator< \ float, \ SqrTransform<float>, \ decltype(float_it)> \ it(float_it, sqr_transform); \ float* sum = nullptr; \ SumGenericIter<float>(N, it, sum, context, scratch_ptr); \ hipLaunchKernelGGL(( SumConvertKernel), dim3(1), dim3(1), 0, context->cuda_stream(), sum, y); \ } else { \ hipLaunchKernelGGL(( SumKernel), dim3(1), dim3(SUM_KERNEL_NTHREADS), 0, context->cuda_stream(), \ N, x, y, true); \ } \ } CAFFE2_MATH_SUMSQR_FUNC(at::Half) #undef CAFFE2_MATH_SUMSQR_FUNC #undef DEVICE_REDUCE_SIZE_THRESHOLD namespace { template <typename T> __global__ void SelectKernel(const int N, const int D, const T* x, const int* idx, T* y) { CUDA_1D_KERNEL_LOOP(i, N) { y[i] = x[i * D + idx[i]]; } } } // namespace template <> CAFFE2_CUDA_EXPORT void Select<float, CUDAContext>( const int N, const int D, const float* x, const int* idx, float* y, CUDAContext* context) { hipLaunchKernelGGL(( SelectKernel<float>) , dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), N, D, x, idx, y); } template <> CAFFE2_CUDA_EXPORT void Select<at::Half, CUDAContext>( const int N, const int D, const at::Half* x, const int* idx, at::Half* y, CUDAContext* context) { hipLaunchKernelGGL(( SelectKernel<at::Half>) , dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), N, D, x, idx, y); } namespace { template <typename TAlpha, typename TData> __global__ void ScaleCUDAKernel(const int n, const TAlpha alpha, const TData* x, TData* y) { CUDA_1D_KERNEL_LOOP(i, n) { #if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__) y[i] = __ldg(x + i) * static_cast<TData>(alpha); #else y[i] = x[i] * static_cast<TData>(alpha); #endif } } template <typename TAlpha, typename TData> __global__ void ScaleCUDAKernel(const int n, const TAlpha* alpha, const TData* x, TData* y) { CUDA_1D_KERNEL_LOOP(i, n) { #if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__) y[i] = __ldg(x + i) * static_cast<TData>(__ldg(alpha)); #else y[i] = x[i] * static_cast<TData>(*alpha); #endif } } template <typename T> __global__ void PowKernel(const int n, const T* x, const T exponent, T* y) { CUDA_1D_KERNEL_LOOP(i, n) { y[i] = powf(x[i], exponent); } } } // namespace template <> CAFFE2_CUDA_EXPORT void Powx<float, CUDAContext>( const int N, const float* a, const float b, float* y, CUDAContext* context) { hipLaunchKernelGGL(( PowKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), N, a, b, y); } #define DELEGATE_CUBLAS_SCALE_FUNCTION(TAlpha, TData, CuBLASFunc) \ template <> \ CAFFE2_CUDA_EXPORT void Scale<TAlpha, TData, CUDAContext>( \ const int N, \ const TAlpha alpha, \ const TData* x, \ TData* y, \ CUDAContext* context) { \ if (N == 0) { \ return; \ } \ if (x != y) { \ hipMemcpyAsync( \ y, \ x, \ sizeof(TData) * N, \ hipMemcpyDeviceToDevice, \ context->cuda_stream()); \ } \ if (alpha != TAlpha(1)) { \ CUBLAS_ENFORCE(hipblasSetPointerMode( \ context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST)); \ CUBLAS_ENFORCE(CuBLASFunc(context->cublas_handle(), N, &alpha, y, 1)); \ } \ } \ template <> \ CAFFE2_CUDA_EXPORT void Scale<TAlpha, TData, CUDAContext>( \ const int N, \ const TAlpha* alpha, \ const TData* x, \ TData* y, \ CUDAContext* context) { \ if (N == 0) { \ return; \ } \ if (x != y) { \ hipMemcpyAsync( \ y, \ x, \ sizeof(TData) * N, \ hipMemcpyDeviceToDevice, \ context->cuda_stream()); \ } \ CUBLAS_ENFORCE(hipblasSetPointerMode( \ context->cublas_handle(), HIPBLAS_POINTER_MODE_DEVICE)); \ CUBLAS_ENFORCE(CuBLASFunc(context->cublas_handle(), N, alpha, y, 1)); \ } DELEGATE_CUBLAS_SCALE_FUNCTION(float, float, hipblasSscal) DELEGATE_CUBLAS_SCALE_FUNCTION(double, double, hipblasDscal) #undef DELEGATE_CUBLAS_SCALE_FUNCTION #define CAFFE2_SPECIALIZED_CUDA_SCALE(TAlpha, TData) \ template <> \ CAFFE2_CUDA_EXPORT void Scale<TAlpha, TData, CUDAContext>( \ const int N, \ const TAlpha alpha, \ const TData* x, \ TData* y, \ CUDAContext* context) { \ if (N == 0) { \ return; \ } \ if (alpha == TAlpha(1)) { \ if (x != y) { \ hipMemcpyAsync( \ y, \ x, \ sizeof(TData) * N, \ hipMemcpyDeviceToDevice, \ context->cuda_stream()); \ } \ return; \ } \ hipLaunchKernelGGL(( ScaleCUDAKernel<TAlpha, TData>) \ , dim3(CAFFE_GET_BLOCKS(N)), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream(), N, alpha, x, y); \ } \ template <> \ CAFFE2_CUDA_EXPORT void Scale<TAlpha, TData, CUDAContext>( \ const int N, \ const TAlpha* alpha, \ const TData* x, \ TData* y, \ CUDAContext* context) { \ if (N == 0) { \ return; \ } \ hipLaunchKernelGGL(( ScaleCUDAKernel<TAlpha, TData>) \ , dim3(CAFFE_GET_BLOCKS(N)), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream(), N, alpha, x, y); \ } CAFFE2_SPECIALIZED_CUDA_SCALE(std::int32_t, std::int32_t) CAFFE2_SPECIALIZED_CUDA_SCALE(std::int64_t, std::int64_t) #ifndef __HIP_PLATFORM_HCC__ template <> CAFFE2_CUDA_EXPORT void Scale<at::Half, at::Half, CUDAContext>( const int N, const at::Half alpha, const at::Half* x, at::Half* y, CUDAContext* context) { if (N == 0) { return; } if (x != y) { hipMemcpyAsync( y, x, sizeof(at::Half) * N, hipMemcpyDeviceToDevice, context->cuda_stream()); } CUBLAS_ENFORCE( hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST)); CUBLAS_ENFORCE(hipblasScalEx_v2( context->cublas_handle(), N, &alpha, HIP_R_16F, y, HIP_R_16F, 1, HIP_R_32F)); } template <> CAFFE2_CUDA_EXPORT void Scale<at::Half, at::Half, CUDAContext>( const int N, const at::Half* alpha, const at::Half* x, at::Half* y, CUDAContext* context) { if (N == 0) { return; } if (x != y) { hipMemcpyAsync( y, x, sizeof(at::Half) * N, hipMemcpyDeviceToDevice, context->cuda_stream()); } CUBLAS_ENFORCE(hipblasSetPointerMode( context->cublas_handle(), HIPBLAS_POINTER_MODE_DEVICE)); CUBLAS_ENFORCE(hipblasScalEx_v2( context->cublas_handle(), N, alpha, HIP_R_16F, y, HIP_R_16F, 1, HIP_R_32F)); } template <> CAFFE2_CUDA_EXPORT void Scale<float, at::Half, CUDAContext>( const int N, const float alpha, const at::Half* x, at::Half* y, CUDAContext* context) { if (N == 0) { return; } if (x != y) { hipMemcpyAsync( y, x, sizeof(at::Half) * N, hipMemcpyDeviceToDevice, context->cuda_stream()); } if (alpha != 1.0f) { CUBLAS_ENFORCE(hipblasSetPointerMode( context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST)); CUBLAS_ENFORCE(hipblasScalEx_v2( context->cublas_handle(), N, &alpha, HIP_R_32F, y, HIP_R_16F, 1, HIP_R_32F)); } } template <> CAFFE2_CUDA_EXPORT void Scale<float, at::Half, CUDAContext>( const int N, const float* alpha, const at::Half* x, at::Half* y, CUDAContext* context) { if (N == 0) { return; } if (x != y) { hipMemcpyAsync( y, x, sizeof(at::Half) * N, hipMemcpyDeviceToDevice, context->cuda_stream()); } CUBLAS_ENFORCE(hipblasSetPointerMode( context->cublas_handle(), HIPBLAS_POINTER_MODE_DEVICE)); CUBLAS_ENFORCE(hipblasScalEx_v2( context->cublas_handle(), N, alpha, HIP_R_32F, y, HIP_R_16F, 1, HIP_R_32F)); } #else // __HIP_PLATFORM_HCC__ namespace { template <> __global__ void ScaleCUDAKernel<at::Half, at::Half>( const int n, const at::Half alpha, const at::Half* x, at::Half* y) { CUDA_1D_KERNEL_LOOP(i, n) { y[i] = convert::To<float, at::Half>( convert::To<at::Half, float>(x[i]) * convert::To<at::Half, float>(alpha)); } } template <> __global__ void ScaleCUDAKernel<at::Half, at::Half>( const int n, const at::Half* alpha, const at::Half* x, at::Half* y) { CUDA_1D_KERNEL_LOOP(i, n) { y[i] = convert::To<float, at::Half>( convert::To<at::Half, float>(x[i]) * convert::To<at::Half, float>(*alpha)); } } template <> __global__ void ScaleCUDAKernel<float, at::Half>( const int n, const float alpha, const at::Half* x, at::Half* y) { CUDA_1D_KERNEL_LOOP(i, n) { y[i] = convert::To<float, at::Half>( convert::To<at::Half, float>(x[i]) * alpha); } } template <> __global__ void ScaleCUDAKernel<float, at::Half>( const int n, const float* alpha, const at::Half* x, at::Half* y) { CUDA_1D_KERNEL_LOOP(i, n) { y[i] = convert::To<float, at::Half>( convert::To<at::Half, float>(x[i]) * (*alpha)); } } } // namespace CAFFE2_SPECIALIZED_HIP_SCALE(at::Half, at::Half) CAFFE2_SPECIALIZED_HIP_SCALE(float, at::Half) #endif // __HIP_PLATFORM_HCC__ #undef CAFFE2_SPECIALIZED_CUDA_SCALE template <> CAFFE2_CUDA_EXPORT void Axpy<float, CUDAContext>( const int N, const float alpha, const float* X, float* Y, CUDAContext* context) { CUBLAS_ENFORCE( hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST)); CUBLAS_ENFORCE(hipblasSaxpy(context->cublas_handle(), N, &alpha, X, 1, Y, 1)); } template <> CAFFE2_CUDA_EXPORT void Axpy<double, CUDAContext>( const int N, const float alpha, const double* X, double* Y, CUDAContext* context) { double alpha_d{alpha}; CUBLAS_ENFORCE( hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST)); CUBLAS_ENFORCE( hipblasDaxpy(context->cublas_handle(), N, &alpha_d, X, 1, Y, 1)); } template <> CAFFE2_CUDA_EXPORT void Axpy<at::Half, CUDAContext>( const int N, const float alpha, const at::Half* X, at::Half* Y, CUDAContext* context) { #if defined(__HIP_PLATFORM_HCC__) CAFFE_THROW("HIP currently does not support FP16 completely yet."); #else CUBLAS_ENFORCE( hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST)); CUBLAS_ENFORCE(hipblasAxpyEx_v2( context->cublas_handle(), N, &alpha, HIP_R_32F, X, HIP_R_16F, 1, Y, HIP_R_16F, 1, HIP_R_32F)); #endif } template <> CAFFE2_CUDA_EXPORT void Axpy<float, CUDAContext>( const int N, const float* alpha, const float* X, float* Y, CUDAContext* context) { CUBLAS_ENFORCE(hipblasSetPointerMode( context->cublas_handle(), HIPBLAS_POINTER_MODE_DEVICE)); CUBLAS_ENFORCE(hipblasSaxpy(context->cublas_handle(), N, alpha, X, 1, Y, 1)); } template <> CAFFE2_CUDA_EXPORT void Axpy<at::Half, CUDAContext>( const int N, const float* alpha, const at::Half* X, at::Half* Y, CUDAContext* context) { #if defined(__HIP_PLATFORM_HCC__) CAFFE_THROW("HIP currently does not support FP16 completely yet."); #else CUBLAS_ENFORCE(hipblasSetPointerMode( context->cublas_handle(), HIPBLAS_POINTER_MODE_DEVICE)); CUBLAS_ENFORCE(hipblasAxpyEx_v2( context->cublas_handle(), N, alpha, HIP_R_32F, X, HIP_R_16F, 1, Y, HIP_R_16F, 1, HIP_R_32F)); #endif } namespace { template <typename TCoeff, typename TData> __global__ void AxpbyCUDAKernel( const int N, const TCoeff a, const TData* x, const TCoeff b, TData* y) { CUDA_1D_KERNEL_LOOP(i, N) { #if __CUDA_ARCH__ >= 350 y[i] = __ldg(x + i) * a + y[i] * b; #else y[i] = x[i] * a + y[i] * b; #endif } } template <> __global__ void AxpbyCUDAKernel<float, at::Half>( const int N, const float a, const at::Half* x, const float b, at::Half* y) { CUDA_1D_KERNEL_LOOP(i, N) { y[i] = convert::To<float, at::Half>( convert::To<at::Half, float>(x[i]) * a + convert::To<at::Half, float>(y[i]) * b); } } template <typename TCoeff, typename TData> __global__ void AxpbyCUDAKernel( const int N, const TCoeff* a, const TData* x, const TCoeff* b, TData* y) { CUDA_1D_KERNEL_LOOP(i, N) { #if __CUDA_ARCH__ >= 350 y[i] = __ldg(x + i) * __ldg(a) + y[i] * __ldg(b); #else y[i] = x[i] * *a + y[i] * *b; #endif } } template <> __global__ void AxpbyCUDAKernel<float, at::Half>( const int N, const float* a, const at::Half* x, const float* b, at::Half* y) { CUDA_1D_KERNEL_LOOP(i, N) { #if __CUDA_ARCH__ >= 350 y[i] = convert::To<float, at::Half>( convert::To<at::Half, float>(x[i]) * __ldg(a) + convert::To<at::Half, float>(y[i]) * __ldg(b)); #else y[i] = convert::To<float, at::Half>( convert::To<at::Half, float>(x[i]) * *a + convert::To<at::Half, float>(y[i]) * *b); #endif } } } // namespace #define CAFFE2_SPECIALIZED_CUDA_AXPBY(TCoeff, TData) \ template <> \ CAFFE2_CUDA_EXPORT void Axpby<TCoeff, TData, CUDAContext>( \ const int n, \ const TCoeff a, \ const TData* x, \ const TCoeff b, \ TData* y, \ CUDAContext* context) { \ hipLaunchKernelGGL(( AxpbyCUDAKernel<TCoeff, TData>) \ , dim3(CAFFE_GET_BLOCKS(n)), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream(), n, a, x, b, y); \ } \ template <> \ CAFFE2_CUDA_EXPORT void Axpby<TCoeff, TData, CUDAContext>( \ const int n, \ const TCoeff* a, \ const TData* x, \ const TCoeff* b, \ TData* y, \ CUDAContext* context) { \ hipLaunchKernelGGL(( AxpbyCUDAKernel<TCoeff, TData>) \ , dim3(CAFFE_GET_BLOCKS(n)), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream(), n, a, x, b, y); \ } CAFFE2_SPECIALIZED_CUDA_AXPBY(float, float) CAFFE2_SPECIALIZED_CUDA_AXPBY(float, at::Half) #undef CAFFE2_SPECIALIZED_CUDA_AXPBY namespace { template <typename T> __global__ void Im2ColNCHWCUDAKernel( const int n, const int input_h, const int input_w, const int kernel_h, const int kernel_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int stride_h, const int stride_w, const int output_h, const int output_w, const T* img_data, T* col_data) { CUDA_1D_KERNEL_LOOP(index, n) { const int w_out = index % output_w; const int h_index = index / output_w; const int h_out = h_index % output_h; const int channel_in = h_index / output_h; const int channel_out = channel_in * kernel_h * kernel_w; const int h_in = h_out * stride_h - pad_t; const int w_in = w_out * stride_w - pad_l; const int output_size = output_h * output_w; T* col_data_ptr = col_data + (channel_out * output_h + h_out) * output_w + w_out; const T* img_data_ptr = img_data + (channel_in * input_h + h_in) * input_w + w_in; int dh = 0; for (int i = 0; i < kernel_h; ++i) { int dw = 0; for (int j = 0; j < kernel_w; ++j) { const int h = h_in + dh; const int w = w_in + dw; #if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__) *col_data_ptr = utils::IsAGeZeroAndALtB(h, input_h) && utils::IsAGeZeroAndALtB(w, input_w) ? __ldg(img_data_ptr + dh * input_w + dw) : 0; #else *col_data_ptr = utils::IsAGeZeroAndALtB(h, input_h) && utils::IsAGeZeroAndALtB(w, input_w) ? img_data_ptr[dh * input_w + dw] : 0; #endif col_data_ptr += output_size; dw += dilation_w; } dh += dilation_h; } } } template <typename T> __global__ void Im2ColNHWCCUDAKernel( const int n, const int input_h, const int input_w, const int kernel_h, const int kernel_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int stride_h, const int stride_w, const int output_w, const int channels, const T* img_data, T* col_data) { CUDA_1D_KERNEL_LOOP(index, n) { const int channel_in = index % channels; const int w_out = index / channels % output_w; const int h_out = index / channels / output_w; const int h_in = h_out * stride_h - pad_t; const int w_in = w_out * stride_w - pad_l; T* col_data_ptr = col_data + (h_out * output_w + w_out) * channels * kernel_h * kernel_w + channel_in; int dh = 0; for (int i = 0; i < kernel_h; ++i) { int dw = 0; for (int j = 0; j < kernel_w; ++j) { const int h = h_in + dh; const int w = w_in + dw; #if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__) *col_data_ptr = utils::IsAGeZeroAndALtB(h, input_h) && utils::IsAGeZeroAndALtB(w, input_w) ? __ldg(img_data + (h * input_w + w) * channels + channel_in) : 0; #else *col_data_ptr = utils::IsAGeZeroAndALtB(h, input_h) && utils::IsAGeZeroAndALtB(w, input_w) ? img_data[(h * input_w + w) * channels + channel_in] : 0; #endif col_data_ptr += channels; dw += dilation_w; } dh += dilation_h; } } } template <typename T> __global__ void Col2ImNCHWCUDAKernel( const int n, const int input_h, const int input_w, const int patch_h, const int patch_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int stride_h, const int stride_w, const int output_h, const int output_w, const T* col_data, T* img_data) { const int dpatch_h = dilation_h * (patch_h - 1) + 1; const int dpatch_w = dilation_w * (patch_w - 1) + 1; CUDA_1D_KERNEL_LOOP(index, n) { T val = 0; const int w = index % input_w + pad_l; const int h = index / input_w % input_h + pad_t; const int c = index / (input_h * input_w); // compute the start and end of the output const int w_col_start = (w < dpatch_w) ? 0 : (w - dpatch_w) / stride_w + 1; const int w_col_end = min(w / stride_w + 1, output_w); const int h_col_start = (h < dpatch_h) ? 0 : (h - dpatch_h) / stride_h + 1; const int h_col_end = min(h / stride_h + 1, output_h); for (int h_col = h_col_start; h_col < h_col_end; ++h_col) { for (int w_col = w_col_start; w_col < w_col_end; ++w_col) { int h_k = (h - h_col * stride_h); int w_k = (w - w_col * stride_w); if (h_k % dilation_h == 0 && w_k % dilation_w == 0) { h_k /= dilation_h; w_k /= dilation_w; const int col_data_index = (((c * patch_h + h_k) * patch_w + w_k) * output_h + h_col) * output_w + w_col; #if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__) val += __ldg(col_data + col_data_index); #else val += col_data[col_data_index]; #endif } } } img_data[index] = val; } } template <typename T> __global__ void Col2ImNHWCCUDAKernel( const int n, const int input_w, const int channels, const int patch_h, const int patch_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int stride_h, const int stride_w, const int output_h, const int output_w, const T* col_data, T* img_data) { const int dpatch_h = dilation_h * (patch_h - 1) + 1; const int dpatch_w = dilation_w * (patch_w - 1) + 1; CUDA_1D_KERNEL_LOOP(index, n) { T val = 0; const int c = index % channels; const int w = index / channels % input_w + pad_l; const int h = index / channels / input_w + pad_t; // compute the start and end of the output const int w_col_start = (w < dpatch_w) ? 0 : (w - dpatch_w) / stride_w + 1; const int w_col_end = min(w / stride_w + 1, output_w); const int h_col_start = (h < dpatch_h) ? 0 : (h - dpatch_h) / stride_h + 1; const int h_col_end = min(h / stride_h + 1, output_h); const int channels_col = patch_h * patch_w * channels; for (int h_col = h_col_start; h_col < h_col_end; ++h_col) { for (int w_col = w_col_start; w_col < w_col_end; ++w_col) { int h_k = h - h_col * stride_h; int w_k = w - w_col * stride_w; if (h_k % dilation_h == 0 && w_k % dilation_w == 0) { h_k /= dilation_h; w_k /= dilation_w; const int c_col = (h_k * patch_w + w_k) * channels + c; #if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__) val += __ldg( col_data + (h_col * output_w + w_col) * channels_col + c_col); #else val += col_data[(h_col * output_w + w_col) * channels_col + c_col]; #endif } } } img_data[index] = val; } } template <typename T, int N, bool kCol2Im> __global__ void Im2ColNdNCHWCUDAKernel( const int outer_size, const int inner_size, const int kernel_size, SimpleArray<int, N + 1> img_shape, SimpleArray<int, N + 1> col_shape, SimpleArray<int, N> kernel_shape, SimpleArray<int, N> stride, SimpleArray<int, N> dilation, SimpleArray<int, N> pad, const T* X_data, T* Y_data) { int d_offset[N]; int d_iter[N]; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { int offset_i = i; #pragma unroll for (int d_i = N - 1; d_i >= 0; --d_i) { d_offset[d_i] = offset_i % kernel_shape.data[d_i]; offset_i /= kernel_shape.data[d_i]; } for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { int offset_j = j; #pragma unroll for (int d_i = N - 1; d_i >= 0; --d_i) { d_iter[d_i] = offset_j % col_shape.data[d_i + 1]; offset_j /= col_shape.data[d_i + 1]; } const int col_index = i * inner_size + j; int img_index = i / kernel_size; bool is_padding = false; #pragma unroll for (int d_i = 0; d_i < N; ++d_i) { const int d_img = d_iter[d_i] * stride.data[d_i] - pad.data[d_i] + d_offset[d_i] * dilation.data[d_i]; is_padding |= !utils::IsAGeZeroAndALtB(d_img, img_shape.data[d_i + 1]); img_index = img_index * img_shape.data[d_i + 1] + d_img; } #if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__) if (!kCol2Im) { Y_data[col_index] = is_padding ? 0 : __ldg(X_data + img_index); } else if (!is_padding) { atomicAdd(Y_data + img_index, __ldg(X_data + col_index)); } #else if (!kCol2Im) { Y_data[col_index] = is_padding ? 0 : X_data[img_index]; } else if (!is_padding) { atomicAdd(Y_data + img_index, X_data[col_index]); } #endif } } } template <typename T, int N> CAFFE2_CUDA_EXPORT void Im2ColNdNCHWCUDAImpl( const int img_size, const int col_size, const int* img_shape, const int* col_shape, const int* kernel_shape, const int* stride, const int* dilation, const int* pad, const float* img_data, float* col_data, CUDAContext* context) { const int outer_size = col_shape[0]; const int inner_size = col_size / outer_size; const int kernel_size = std::accumulate( kernel_shape, kernel_shape + N, 1, std::multiplies<int>()); SimpleArray<int, N + 1> img_shape_array; SimpleArray<int, N + 1> col_shape_array; SimpleArray<int, N> kernel_shape_array; SimpleArray<int, N> stride_array; SimpleArray<int, N> dilation_array; SimpleArray<int, N> pad_array; std::memcpy(img_shape_array.data, img_shape, (N + 1) * sizeof(int)); std::memcpy(col_shape_array.data, col_shape, (N + 1) * sizeof(int)); std::memcpy(kernel_shape_array.data, kernel_shape, N * sizeof(int)); std::memcpy(stride_array.data, stride, N * sizeof(int)); std::memcpy(dilation_array.data, dilation, N * sizeof(int)); std::memcpy(pad_array.data, pad, N * sizeof(int)); hipLaunchKernelGGL(( Im2ColNdNCHWCUDAKernel<T, N, false>) , dim3(::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), outer_size, inner_size, kernel_size, img_shape_array, col_shape_array, kernel_shape_array, stride_array, dilation_array, pad_array, img_data, col_data); } template <typename T, int N> CAFFE2_CUDA_EXPORT void Col2ImNdNCHWCUDAImpl( const int img_size, const int col_size, const int* img_shape, const int* col_shape, const int* kernel_shape, const int* stride, const int* dilation, const int* pad, const float* col_data, float* img_data, CUDAContext* context) { const int outer_size = col_shape[0]; const int inner_size = col_size / outer_size; const int kernel_size = std::accumulate( kernel_shape, kernel_shape + N, 1, std::multiplies<int>()); SimpleArray<int, N + 1> img_shape_array; SimpleArray<int, N + 1> col_shape_array; SimpleArray<int, N> kernel_shape_array; SimpleArray<int, N> stride_array; SimpleArray<int, N> dilation_array; SimpleArray<int, N> pad_array; std::memcpy(img_shape_array.data, img_shape, (N + 1) * sizeof(int)); std::memcpy(col_shape_array.data, col_shape, (N + 1) * sizeof(int)); std::memcpy(kernel_shape_array.data, kernel_shape, N * sizeof(int)); std::memcpy(stride_array.data, stride, N * sizeof(int)); std::memcpy(dilation_array.data, dilation, N * sizeof(int)); std::memcpy(pad_array.data, pad, N * sizeof(int)); Set<T, CUDAContext>(img_size, 0, img_data, context); hipLaunchKernelGGL(( Im2ColNdNCHWCUDAKernel<T, N, true>) , dim3(::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), outer_size, inner_size, kernel_size, img_shape_array, col_shape_array, kernel_shape_array, stride_array, dilation_array, pad_array, col_data, img_data); } } // namespace template <> CAFFE2_CUDA_EXPORT void Im2Col<float, CUDAContext, StorageOrder::NCHW>( const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int pad_b, const int pad_r, const int stride_h, const int stride_w, const float* img_data, float* col_data, CUDAContext* context, const int /* groups */) { const int dkernel_h = dilation_h * (kernel_h - 1) + 1; const int dkernel_w = dilation_w * (kernel_w - 1) + 1; const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1; const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1; const int num_kernels = channels * output_h * output_w; hipLaunchKernelGGL(( Im2ColNCHWCUDAKernel<float>) , dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), num_kernels, height, width, kernel_h, kernel_w, dilation_h, dilation_w, pad_t, pad_l, stride_h, stride_w, output_h, output_w, img_data, col_data); } template <> CAFFE2_CUDA_EXPORT void Im2Col<float, CUDAContext, StorageOrder::NHWC>( const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int pad_b, const int pad_r, const int stride_h, const int stride_w, const float* img_data, float* col_data, CUDAContext* context, const int groups) { CAFFE_ENFORCE_EQ(groups, 1, "groups must be 1 for GPU NHWC Im2Col"); const int dkernel_h = dilation_h * (kernel_h - 1) + 1; const int dkernel_w = dilation_w * (kernel_w - 1) + 1; const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1; const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1; const int num_kernels = output_h * output_w * channels; hipLaunchKernelGGL(( Im2ColNHWCCUDAKernel<float>) , dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), num_kernels, height, width, kernel_h, kernel_w, dilation_h, dilation_w, pad_t, pad_l, stride_h, stride_w, output_w, channels, img_data, col_data); } template <> CAFFE2_CUDA_EXPORT void Col2Im<float, CUDAContext, StorageOrder::NCHW>( const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int pad_b, const int pad_r, const int stride_h, const int stride_w, const float* col_data, float* img_data, CUDAContext* context, const int /* groups */) { // In NCHW, the number of groups doesn't affect Col2Im. const int dkernel_h = dilation_h * (kernel_h - 1) + 1; const int dkernel_w = dilation_w * (kernel_w - 1) + 1; const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1; const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1; const int num_kernels = channels * height * width; hipLaunchKernelGGL(( Col2ImNCHWCUDAKernel<float>) , dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), num_kernels, height, width, kernel_h, kernel_w, dilation_h, dilation_w, pad_t, pad_l, stride_h, stride_w, output_h, output_w, col_data, img_data); } template <> CAFFE2_CUDA_EXPORT void Col2Im<float, CUDAContext, StorageOrder::NHWC>( const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int pad_b, const int pad_r, const int stride_h, const int stride_w, const float* col_data, float* img_data, CUDAContext* context, const int groups) { CAFFE_ENFORCE_EQ(groups, 1, "groups must be 1 for GPU NHWC Col2Im"); const int dkernel_h = dilation_h * (kernel_h - 1) + 1; const int dkernel_w = dilation_w * (kernel_w - 1) + 1; const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1; const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1; const int num_kernels = height * width * channels; hipLaunchKernelGGL(( Col2ImNHWCCUDAKernel<float>) , dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), num_kernels, width, channels, kernel_h, kernel_w, dilation_h, dilation_w, pad_t, pad_l, stride_h, stride_w, output_h, output_w, col_data, img_data); } template <> CAFFE2_CUDA_EXPORT void Im2ColNd<float, CUDAContext, StorageOrder::NCHW>( const int N, const int img_size, const int col_size, const int* img_shape, const int* col_shape, const int* kernel_shape, const int* stride, const int* dilation, const int* pad, const float* img_data, float* col_data, CUDAContext* context, const int /* groups */) { // In NCHW, the number of groups doesn't affect Im2Col. DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1( N, Im2ColNdNCHWCUDAImpl, float, img_size, col_size, img_shape, col_shape, kernel_shape, stride, dilation, pad, img_data, col_data, context); } template <> CAFFE2_CUDA_EXPORT void Im2ColNd<float, CUDAContext, StorageOrder::NHWC>( const int N, const int img_size, const int col_size, const int* img_shape, const int* col_shape, const int* kernel_shape, const int* stride, const int* dilation, const int* pad, const float* img_data, float* col_data, CUDAContext* context, const int groups) { CAFFE_NOT_IMPLEMENTED; } template <> CAFFE2_CUDA_EXPORT void Col2ImNd<float, CUDAContext, StorageOrder::NCHW>( const int N, const int img_size, const int col_size, const int* img_shape, const int* col_shape, const int* kernel_shape, const int* stride, const int* dilation, const int* pad, const float* col_data, float* img_data, CUDAContext* context, int /* groups */) { // In NCHW, the number of groups doesn't affect Col2Im. DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1( N, Col2ImNdNCHWCUDAImpl, float, img_size, col_size, img_shape, col_shape, kernel_shape, stride, dilation, pad, col_data, img_data, context); } template <> CAFFE2_CUDA_EXPORT void Col2ImNd<float, CUDAContext, StorageOrder::NHWC>( const int N, const int img_size, const int col_size, const int* img_shape, const int* col_shape, const int* kernel_shape, const int* stride, const int* dilation, const int* pad, const float* col_data, float* img_data, CUDAContext* context, int groups) { CAFFE_NOT_IMPLEMENTED; } template <> CAFFE2_CUDA_EXPORT void CopyMatrix<CUDAContext>( const size_t itemsize, const int M, const int N, const void* A, const int lda, void* B, const int ldb, CUDAContext* context, TypeMeta::Copy copy) { CAFFE_ENFORCE(!copy, "Copy constructor is not supported in CUDA context"); hipMemcpy2DAsync( B, ldb * itemsize, A, lda * itemsize, N * itemsize, M, hipMemcpyDeviceToDevice, context->cuda_stream()); } #define CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(T) \ template <> \ void CopyMatrix<T, CUDAContext>( \ const int M, \ const int N, \ const T* A, \ const int lda, \ T* B, \ const int ldb, \ CUDAContext* context) { \ if (M == 0 || N == 0) { \ return; \ } \ hipMemcpy2DAsync( \ B, \ sizeof(T) * ldb, \ A, \ sizeof(T) * lda, \ sizeof(T) * N, \ M, \ hipMemcpyDeviceToDevice, \ context->cuda_stream()); \ } CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(float) CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(double) CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(int) CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(int64_t) #undef CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX template <> CAFFE2_CUDA_EXPORT void CopyVector<float, CUDAContext>( const int N, const float* src, float* dst, CUDAContext* context) { if (src != dst && N > 0) { hipMemcpyAsync( dst, src, sizeof(float) * N, hipMemcpyDeviceToDevice, context->cuda_stream()); } } namespace { template <typename T> using BlockReduce = hipcub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS>; template <typename T, class Reducer> __global__ void RowwiseReduceKernel( const int rows, const int cols, const Reducer reducer, const T init, const T alpha, const T* X, T* Y) { __shared__ typename BlockReduce<T>::TempStorage temp_storage; for (int i = blockIdx.x; i < rows; i += gridDim.x) { T val = init; for (int j = threadIdx.x; j < cols; j += blockDim.x) { val = reducer(X[i * cols + j], val); } val = BlockReduce<T>(temp_storage).Reduce(val, reducer); if (threadIdx.x == 0) { Y[i] = val * alpha; } __syncthreads(); } } template <typename T, class Reducer> __global__ void ColwiseReduceKernel( const int rows, const int cols, const Reducer reducer, const T init, const T alpha, const T* X, T* Y) { __shared__ typename BlockReduce<T>::TempStorage temp_storage; for (int i = blockIdx.x; i < cols; i += gridDim.x) { T val = init; for (int j = threadIdx.x; j < rows; j += blockDim.x) { val = reducer(X[j * cols + i], val); } val = BlockReduce<T>(temp_storage).Reduce(val, reducer); if (threadIdx.x == 0) { Y[i] = val * alpha; } __syncthreads(); } } } // namespace #define CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX(T) \ template <> \ CAFFE2_CUDA_EXPORT void RowwiseMax<T, CUDAContext>( \ const int N, const int D, const T* x, T* y, CUDAContext* context) { \ hipLaunchKernelGGL(( RowwiseReduceKernel), \ ::min(N, CAFFE_MAXIMUM_NUM_BLOCKS), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream(), \ N, D, hipcub::Max(), std::numeric_limits<T>::lowest(), T(1), x, y); \ } CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX(float) #undef CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX #define CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX(T) \ template <> \ CAFFE2_CUDA_EXPORT void ColwiseMax<T, CUDAContext>( \ const int N, const int D, const T* x, T* y, CUDAContext* context) { \ hipLaunchKernelGGL(( ColwiseReduceKernel), \ ::min(D, CAFFE_MAXIMUM_NUM_BLOCKS), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream(), \ N, D, hipcub::Max(), std::numeric_limits<T>::lowest(), T(1), x, y); \ } CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX(float) #undef CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX namespace { __global__ void maximum_kernel(const int N, const float alpha, const float* x, float* y) { CUDA_1D_KERNEL_LOOP(i, N) { y[i] = fmaxf(x[i], alpha); } } } // namespace template <> CAFFE2_CUDA_EXPORT void Maximum( const int N, const float alpha, const float* x, float* y, CUDAContext* context) { hipLaunchKernelGGL(( maximum_kernel), dim3(::min(N, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), N, alpha, x, y); } namespace { template <typename T, class Reducer, int D> __global__ void ReduceTensorCUDAKernel( const int outer_size, const int inner_size, SimpleArray<int, D> X_strides, SimpleArray<FIXED_DIVISOR, D> Y_dims, const Reducer reducer, const T init, const T alpha, const T* X, T* Y) { __shared__ typename BlockReduce<T>::TempStorage temp_storage; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { T val = init; for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { int X_index = 0; int Y_index = i * inner_size + j; #pragma unroll for (int d = D - 1; d >= 0; --d) { int r; FIXED_DIVISOR_DIV_MOD(Y_dims.data[d], Y_index, &Y_index, &r); X_index += r * X_strides.data[d]; } #if __CUDA_ARCH__ >= 350 val = reducer(val, __ldg(X + X_index)); #else val = reducer(val, X[X_index]); #endif } val = BlockReduce<T>(temp_storage).Reduce(val, reducer); if (threadIdx.x == 0) { Y[i] = val * alpha; } __syncthreads(); } } template <typename T, class Reducer, int D> CAFFE2_CUDA_EXPORT void ReduceTensorCUDAImpl( const int outer_size, const int inner_size, const int* dims, const int* axes, const Reducer& reducer, const T init, const T alpha, const T* X, T* Y, CUDAContext* context) { SimpleArray<int, D> X_strides; SimpleArray<FIXED_DIVISOR, D> Y_dims; utils::ComputeTransposedStrides(D, dims, axes, X_strides.data); for (int i = 0; i < D; ++i) { Y_dims.data[i] = FIXED_DIVISOR(dims[axes[i]]); } hipLaunchKernelGGL(( ReduceTensorCUDAKernel<T, Reducer, D>) , dim3(::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), outer_size, inner_size, X_strides, Y_dims, reducer, init, alpha, X, Y); } template <typename T, class Reducer> CAFFE2_CUDA_EXPORT void ReduceTensorCUDA( const int num_dims, const int* dims, const int num_axes, const int* axes, const Reducer& reducer, const T init, const T alpha, const T* X, T* Y, CUDAContext* context) { CAFFE_ENFORCE_LE(num_axes, num_dims); std::vector<int> Y_dims_vector(dims, dims + num_dims); for (int i = 0; i < num_axes; ++i) { Y_dims_vector[axes[i]] = 1; } const int* X_dims = dims; const int* Y_dims = Y_dims_vector.data(); const int X_size = std::accumulate(X_dims, X_dims + num_dims, 1, std::multiplies<int>()); const int Y_size = std::accumulate(Y_dims, Y_dims + num_dims, 1, std::multiplies<int>()); if (X_size == 0) { Set<T, CUDAContext>(Y_size, alpha * init, Y, context); return; } if (alpha == T(0)) { Set<T, CUDAContext>(Y_size, T(0), Y, context); return; } if (std::equal(X_dims, X_dims + num_dims, Y_dims)) { Scale<T, T, CUDAContext>(X_size, alpha, X, Y, context); return; } int rows; int cols; if (utils::IsRowwiseReduce(num_dims, X_dims, Y_dims, &rows, &cols)) { hipLaunchKernelGGL(( RowwiseReduceKernel<T>) , dim3(::min(rows, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), rows, cols, reducer, init, alpha, X, Y); return; } if (utils::IsColwiseReduce(num_dims, X_dims, Y_dims, &rows, &cols)) { hipLaunchKernelGGL(( ColwiseReduceKernel<T>) , dim3(::min(rows, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), rows, cols, reducer, init, alpha, X, Y); return; } std::vector<int> transpose_axes(num_dims); utils::ComputeTransposeAxesForReduceOp( num_dims, num_axes, axes, transpose_axes.data()); const int outer_size = Y_size; const int inner_size = X_size / Y_size; DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_2( num_dims, ReduceTensorCUDAImpl, T, Reducer, outer_size, inner_size, dims, transpose_axes.data(), reducer, init, alpha, X, Y, context); } } // namespace #define CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(T) \ template <> \ CAFFE2_CUDA_EXPORT void ReduceMin<T, CUDAContext>( \ const int num_dims, \ const int* dims, \ const int num_axes, \ const int* axes, \ const T alpha, \ const T* X, \ T* Y, \ CUDAContext* context) { \ ReduceTensorCUDA( \ num_dims, \ dims, \ num_axes, \ axes, \ hipcub::Min(), \ std::numeric_limits<T>::max(), \ alpha, \ X, \ Y, \ context); \ } CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(std::int32_t) CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(std::int64_t) CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(float) CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(double) #undef CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN #define CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(T) \ template <> \ CAFFE2_CUDA_EXPORT void ReduceMax<T, CUDAContext>( \ const int num_dims, \ const int* dims, \ const int num_axes, \ const int* axes, \ const T alpha, \ const T* X, \ T* Y, \ CUDAContext* context) { \ ReduceTensorCUDA( \ num_dims, \ dims, \ num_axes, \ axes, \ hipcub::Max(), \ std::numeric_limits<T>::lowest(), \ alpha, \ X, \ Y, \ context); \ } CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(std::int32_t) CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(std::int64_t) CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(float) CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(double) #undef CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX #define CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(T) \ template <> \ CAFFE2_CUDA_EXPORT void ReduceSum<T, CUDAContext>( \ const int num_dims, \ const int* dims, \ const int num_axes, \ const int* axes, \ const T alpha, \ const T* X, \ T* Y, \ CUDAContext* context) { \ ReduceTensorCUDA( \ num_dims, \ dims, \ num_axes, \ axes, \ hipcub::Sum(), \ T(0), \ alpha, \ X, \ Y, \ context); \ } CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(std::int32_t) CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(std::int64_t) CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(float) CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(double) #undef CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM #define CAFFE2_SPECIALIZED_CUDA_REDUCE_MEAN(T) \ template <> \ CAFFE2_CUDA_EXPORT void ReduceMean<T, CUDAContext>( \ const int num_dims, \ const int* dims, \ const int num_axes, \ const int* axes, \ const T alpha, \ const T* X, \ T* Y, \ CUDAContext* context) { \ int scale = 1; \ for (int i = 0; i < num_axes; ++i) { \ scale *= dims[axes[i]]; \ } \ ReduceTensorCUDA( \ num_dims, \ dims, \ num_axes, \ axes, \ hipcub::Sum(), \ T(0), \ alpha / static_cast<T>(scale), \ X, \ Y, \ context); \ } CAFFE2_SPECIALIZED_CUDA_REDUCE_MEAN(float) #undef CAFFE2_SPECIALIZED_CUDA_REDUCE_MEAN namespace { template <typename T, int D> __global__ void BroadcastCUDAKernel( const int Y_size, const SimpleArray<int, D> X_strides, const SimpleArray<FIXED_DIVISOR, D> Y_dims, const T alpha, const T* X, T* Y) { CUDA_1D_KERNEL_LOOP(Y_index, Y_size) { int X_index = 0; int Y_index_val = Y_index; #pragma unroll for (int i = D - 1; i >= 0; --i) { int d; FIXED_DIVISOR_DIV_MOD(Y_dims.data[i], Y_index_val, &Y_index_val, &d); X_index += d * X_strides.data[i]; } #if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__) Y[Y_index] = __ldg(X + X_index) * alpha; #else Y[Y_index] = X[X_index] * alpha; #endif } } template <typename T, int D> CAFFE2_CUDA_EXPORT void BroadcastCUDAImpl( const int X_ndim, const int* X_dims, const int* Y_dims, const T alpha, const T* X, T* Y, CUDAContext* context) { SimpleArray<int, D> X_strides_array; SimpleArray<FIXED_DIVISOR, D> Y_dims_array; const int d = D - X_ndim; std::fill(X_strides_array.data, X_strides_array.data + d, 0); int cur_stride = 1; for (int i = D - 1; i >= d; --i) { CAFFE_ENFORCE(X_dims[i - d] == 1 || X_dims[i - d] == Y_dims[i]); X_strides_array.data[i] = X_dims[i - d] == 1 ? 0 : cur_stride; cur_stride *= X_dims[i - d]; } for (int i = 0; i < D; ++i) { if (Y_dims[i] == 0) { return; } Y_dims_array.data[i] = FIXED_DIVISOR(Y_dims[i]); } const int Y_size = std::accumulate(Y_dims, Y_dims + D, 1, std::multiplies<int>()); hipLaunchKernelGGL(( BroadcastCUDAKernel<T, D>) , dim3(CAFFE_GET_BLOCKS(Y_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), Y_size, X_strides_array, Y_dims_array, alpha, X, Y); } } // namespace #define CAFFE2_SPECIALIZED_CUDA_BROADCAST(T) \ template <> \ CAFFE2_CUDA_EXPORT void Broadcast<T, CUDAContext>( \ const int X_ndim, \ const int* X_dims, \ const int Y_ndim, \ const int* Y_dims, \ const T alpha, \ const T* X, \ T* Y, \ CUDAContext* context) { \ CAFFE_ENFORCE_LE(X_ndim, Y_ndim); \ DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1( \ Y_ndim, \ BroadcastCUDAImpl, \ T, \ X_ndim, \ X_dims, \ Y_dims, \ alpha, \ X, \ Y, \ context); \ } CAFFE2_SPECIALIZED_CUDA_BROADCAST(std::int32_t) CAFFE2_SPECIALIZED_CUDA_BROADCAST(std::int64_t) CAFFE2_SPECIALIZED_CUDA_BROADCAST(float) CAFFE2_SPECIALIZED_CUDA_BROADCAST(double) #undef CAFFE2_SPECIALIZED_CUDA_BROADCAST namespace { template <typename T> __global__ void RowwiseMomentsCUDAKernel( const int rows, const int cols, const T* X, T* mean, T* variance) { __shared__ typename BlockReduce<T>::TempStorage m_storage; __shared__ typename BlockReduce<T>::TempStorage v_storage; const T scale = T(1) / static_cast<T>(cols); for (int i = blockIdx.x; i < rows; i += gridDim.x) { T m_val = 0; T v_val = 0; for (int j = threadIdx.x; j < cols; j += blockDim.x) { const int X_index = i * cols + j; #if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__) m_val += __ldg(X + X_index); v_val += __ldg(X + X_index) * __ldg(X + X_index); #else m_val += X[X_index]; v_val += X[X_index] * X[X_index]; #endif } m_val = BlockReduce<T>(m_storage).Sum(m_val); v_val = BlockReduce<T>(v_storage).Sum(v_val); if (threadIdx.x == 0) { const T mu = m_val * scale; mean[i] = mu; variance[i] = v_val * scale - mu * mu; } __syncthreads(); } } template <typename T> __global__ void ColwiseMomentsCUDAKernel( const int rows, const int cols, const T* X, T* mean, T* variance) { __shared__ typename BlockReduce<T>::TempStorage m_storage; __shared__ typename BlockReduce<T>::TempStorage v_storage; const T scale = T(1) / static_cast<T>(rows); for (int i = blockIdx.x; i < cols; i += gridDim.x) { T m_val = 0; T v_val = 0; for (int j = threadIdx.x; j < rows; j += blockDim.x) { const int X_index = j * cols + i; #if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__) m_val += __ldg(X + X_index); v_val += __ldg(X + X_index) * __ldg(X + X_index); #else m_val += X[X_index]; v_val += X[X_index] * X[X_index]; #endif } m_val = BlockReduce<T>(m_storage).Sum(m_val); v_val = BlockReduce<T>(v_storage).Sum(v_val); if (threadIdx.x == 0) { const T mu = m_val * scale; mean[i] = mu; variance[i] = v_val * scale - mu * mu; } __syncthreads(); } } template <typename T, int D> __global__ void MomentsCUDAKernel( const int outer_size, const int inner_size, SimpleArray<int, D> X_strides, SimpleArray<FIXED_DIVISOR, D> Y_dims, const T* X, T* mean, T* variance) { __shared__ typename BlockReduce<T>::TempStorage m_storage; __shared__ typename BlockReduce<T>::TempStorage v_storage; const T scale = T(1) / static_cast<T>(inner_size); for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { T m_val = 0; T v_val = 0; for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { int X_index = 0; int Y_index = i * inner_size + j; #pragma unroll for (int d = D - 1; d >= 0; --d) { int r; FIXED_DIVISOR_DIV_MOD(Y_dims.data[d], Y_index, &Y_index, &r); X_index += r * X_strides.data[d]; } #if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__) m_val += __ldg(X + X_index); v_val += __ldg(X + X_index) * __ldg(X + X_index); #else m_val += X[X_index]; v_val += X[X_index] * X[X_index]; #endif } m_val = BlockReduce<T>(m_storage).Sum(m_val); v_val = BlockReduce<T>(v_storage).Sum(v_val); if (threadIdx.x == 0) { const T mu = m_val * scale; mean[i] = mu; variance[i] = v_val * scale - mu * mu; } __syncthreads(); } } template <typename T, int D> CAFFE2_CUDA_EXPORT void MomentsCUDAImpl( const int outer_size, const int inner_size, const int* dims, const int* axes, const T* X, T* mean, T* variance, CUDAContext* context) { SimpleArray<int, D> X_strides; SimpleArray<FIXED_DIVISOR, D> Y_dims; utils::ComputeTransposedStrides(D, dims, axes, X_strides.data); for (int i = 0; i < D; ++i) { Y_dims.data[i] = FIXED_DIVISOR(dims[axes[i]]); } hipLaunchKernelGGL(( MomentsCUDAKernel<T, D>) , dim3(::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), outer_size, inner_size, X_strides, Y_dims, X, mean, variance); } template <typename T> CAFFE2_CUDA_EXPORT void MomentsCUDA( const int num_dims, const int* dims, const int num_axes, const int* axes, const T* X, T* mean, T* variance, CUDAContext* context) { CAFFE_ENFORCE_LE(num_axes, num_dims); std::vector<int> Y_dims_vector(dims, dims + num_dims); for (int i = 0; i < num_axes; ++i) { Y_dims_vector[axes[i]] = 1; } const int* X_dims = dims; const int* Y_dims = Y_dims_vector.data(); const int X_size = std::accumulate(X_dims, X_dims + num_dims, 1, std::multiplies<int>()); const int Y_size = std::accumulate(Y_dims, Y_dims + num_dims, 1, std::multiplies<int>()); if (X_size == 0) { Set<T, CUDAContext>(Y_size, T(0), mean, context); Set<T, CUDAContext>(Y_size, T(0), variance, context); return; } if (std::equal(X_dims, X_dims + num_dims, Y_dims)) { hipMemcpyAsync( mean, X, sizeof(T) * X_size, hipMemcpyDeviceToDevice, context->cuda_stream()); Set<T, CUDAContext>(Y_size, T(0), variance, context); return; } int rows; int cols; if (utils::IsRowwiseReduce(num_dims, X_dims, Y_dims, &rows, &cols)) { hipLaunchKernelGGL(( RowwiseMomentsCUDAKernel<T>) , dim3(::min(rows, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), rows, cols, X, mean, variance); return; } if (utils::IsColwiseReduce(num_dims, X_dims, Y_dims, &rows, &cols)) { hipLaunchKernelGGL(( ColwiseMomentsCUDAKernel<T>) , dim3(::min(rows, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), rows, cols, X, mean, variance); return; } std::vector<int> transpose_axes(num_dims); utils::ComputeTransposeAxesForReduceOp( num_dims, num_axes, axes, transpose_axes.data()); const int pivot = num_dims - num_axes; int outer_size = 1; for (int i = 0; i < pivot; ++i) { outer_size *= dims[transpose_axes[i]]; } int inner_size = 1; for (int i = pivot; i < num_dims; ++i) { inner_size *= dims[transpose_axes[i]]; } DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1( num_dims, MomentsCUDAImpl, T, outer_size, inner_size, dims, transpose_axes.data(), X, mean, variance, context); } } // namespace #define CAFFE2_SPECIALIZED_CUDA_MOMENTS(T) \ template <> \ CAFFE2_CUDA_EXPORT void Moments<T, CUDAContext>( \ const int num_dims, \ const int* dims, \ const int num_axes, \ const int* axes, \ const T* X, \ T* mean, \ T* variance, \ CUDAContext* context) { \ MomentsCUDA<T>( \ num_dims, dims, num_axes, axes, X, mean, variance, context); \ } CAFFE2_SPECIALIZED_CUDA_MOMENTS(float) #undef CAFFE2_SPECIALIZED_CUDA_MOMENTS namespace { template <typename T> __global__ void InvStdCUDAKernel(const int N, const T epsilon, const T* var, T* inv_std); #define DELEGATE_INV_STD_KERNEL_FUNCTION(T, Func) \ template <> \ __global__ void InvStdCUDAKernel<T>( \ const int N, const T epsilon, const T* var, T* inv_std) { \ CUDA_1D_KERNEL_LOOP(i, N) { \ inv_std[i] = Func(var[i] + epsilon); \ } \ } DELEGATE_INV_STD_KERNEL_FUNCTION(float, rsqrtf) #undef DELEGATE_INV_STD_KERNEL_FUNCTION } // namespace #define CAFFE2_SPECIALIZED_CUDA_INV_STD(T) \ template <> \ CAFFE2_CUDA_EXPORT void InvStd<T, CUDAContext>( \ const int N, \ const T epsilon, \ const T* var, \ T* inv_std, \ CUDAContext* context) { \ hipLaunchKernelGGL(( InvStdCUDAKernel<T>) \ , dim3(CAFFE_GET_BLOCKS(N)), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream(), N, epsilon, var, inv_std); \ } CAFFE2_SPECIALIZED_CUDA_INV_STD(float) #undef CAFFE2_SPECIALIZED_CUDA_INV_STD namespace { constexpr int kTileDim = 32; constexpr int kBlockRows = 8; // Splits the original matrix into submatrices with size 32 * 32. // Each block transposes one submatrix by loading it into shared memory. // Reference https://devblogs.nvidia.com/efficient-matrix-transpose-cuda-cc/ template <typename T> __global__ void BatchTranspose2DCUDAKernel( const int N, const int H, const int W, const T* X, T* Y) { __shared__ T tile[kTileDim][kTileDim + 1]; const int h = (H + kTileDim - 1) / kTileDim; const int w = (W + kTileDim - 1) / kTileDim; const int outer_size = N * h * w; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { const int n = i / (h * w); const int k = i % (h * w); const int r = k / w; const int c = k % w; const int offset = n * H * W; int x = c * kTileDim + threadIdx.x; int y = r * kTileDim + threadIdx.y; if (x < W) { for (int j = 0; j < kTileDim && y + j < H; j += kBlockRows) { #if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__) tile[threadIdx.y + j][threadIdx.x] = __ldg(X + offset + (y + j) * W + x); #else tile[threadIdx.y + j][threadIdx.x] = X[offset + (y + j) * W + x]; #endif } } __syncthreads(); x = r * kTileDim + threadIdx.x; y = c * kTileDim + threadIdx.y; if (x < H) { for (int j = 0; j < kTileDim && y + j < W; j += kBlockRows) { Y[offset + (y + j) * H + x] = tile[threadIdx.x][threadIdx.y + j]; } } __syncthreads(); } } template <typename T, int D> __global__ void TransposeCUDAKernel( const int size, const SimpleArray<int, D> X_strides, const SimpleArray<FIXED_DIVISOR, D> Y_dims, const T* X, T* Y) { CUDA_1D_KERNEL_LOOP(Y_index, size) { int X_index = 0; int Y_index_val = Y_index; #pragma unroll for (int i = D - 1; i >= 0; --i) { int d; FIXED_DIVISOR_DIV_MOD(Y_dims.data[i], Y_index_val, &Y_index_val, &d); X_index += d * X_strides.data[i]; } #if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__) Y[Y_index] = __ldg(X + X_index); #else Y[Y_index] = X[X_index]; #endif } } template <typename T, int D> CAFFE2_CUDA_EXPORT void TransposeCUDAImpl( const int* dims, const int* axes, const T* X, T* Y, CUDAContext* context) { SimpleArray<int, D> X_strides; SimpleArray<FIXED_DIVISOR, D> Y_dims; utils::ComputeTransposedStrides(D, dims, axes, X_strides.data); int size = 1; for (int i = 0; i < D; ++i) { Y_dims.data[i] = FIXED_DIVISOR(dims[axes[i]]); size *= dims[i]; } hipLaunchKernelGGL(( TransposeCUDAKernel<T, D>) , dim3(CAFFE_GET_BLOCKS(size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), size, X_strides, Y_dims, X, Y); } } // namespace #define CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(T) \ template <> \ CAFFE2_CUDA_EXPORT void Transpose<T, CUDAContext>( \ const int ndim, \ const int* dims, \ const int* axes, \ const T* X, \ T* Y, \ CUDAContext* context) { \ if (utils::IsIdentityPermutation(ndim, axes)) { \ const int size = \ std::accumulate(dims, dims + ndim, 1, std::multiplies<int>()); \ context->template CopySameDevice<T>(size, X, Y); \ return; \ } \ if (utils::IsBatchTranspose2D(ndim, axes)) { \ const int N = \ std::accumulate(dims, dims + ndim - 2, 1, std::multiplies<int>()); \ const int H = dims[ndim - 2]; \ const int W = dims[ndim - 1]; \ const int h = (H + kTileDim - 1) / kTileDim; \ const int w = (W + kTileDim - 1) / kTileDim; \ const int outer_size = N * h * w; \ const dim3 dim_block(kTileDim, kBlockRows, 1); \ hipLaunchKernelGGL(( BatchTranspose2DCUDAKernel<T>) \ , dim3(::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS)), \ dim_block, \ 0, \ context->cuda_stream(), N, H, W, X, Y); \ return; \ } \ DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1( \ ndim, TransposeCUDAImpl, T, dims, axes, X, Y, context); \ } CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(float) CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(double) CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(int) CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(int64_t) #undef CAFFE2_SPECIALIZED_CUDA_TRANSPOSE #define CAFFE2_SPECIALIZED_CUDA_NCHW2NHWC(T) \ template <> \ CAFFE2_CUDA_EXPORT void NCHW2NHWC<T, CUDAContext>( \ const int N, \ const int C, \ const int HxW, \ const T* X, \ T* Y, \ CUDAContext* context) { \ const int h = (C + kTileDim - 1) / kTileDim; \ const int w = (HxW + kTileDim - 1) / kTileDim; \ const int outer_size = N * h * w; \ const dim3 dim_block(kTileDim, kBlockRows, 1); \ hipLaunchKernelGGL(( BatchTranspose2DCUDAKernel<T>) \ , dim3(::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS)), \ dim_block, \ 0, \ context->cuda_stream(), N, C, HxW, X, Y); \ } CAFFE2_SPECIALIZED_CUDA_NCHW2NHWC(float) #undef CAFFE2_SPECIALIZED_CUDA_NCHW2NHWC #define CAFFE2_SPECIALIZED_CUDA_NHWC2NCHW(T) \ template <> \ CAFFE2_CUDA_EXPORT void NHWC2NCHW<T, CUDAContext>( \ const int N, \ const int C, \ const int HxW, \ const T* X, \ T* Y, \ CUDAContext* context) { \ const int h = (HxW + kTileDim - 1) / kTileDim; \ const int w = (C + kTileDim - 1) / kTileDim; \ const int outer_size = N * h * w; \ const dim3 dim_block(kTileDim, kBlockRows, 1); \ hipLaunchKernelGGL(( BatchTranspose2DCUDAKernel<T>) \ , dim3(::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS)), \ dim_block, \ 0, \ context->cuda_stream(), N, HxW, C, X, Y); \ } CAFFE2_SPECIALIZED_CUDA_NHWC2NCHW(float) #undef CAFFE2_SPECIALIZED_CUDA_NHWC2NCHW } // namespace math } // namespace caffe2
68da2eb9575c97061a28ba0c4ebb3f2dacc1c7eb.cu
// Implements the math functions for GPU. #include "caffe2/utils/math.h" #include <cstring> #include <limits> #include <numeric> #include <vector> #include <cub/block/block_reduce.cuh> #include <cub/cub.cuh> #include <thrust/device_vector.h> #include <thrust/functional.h> #include "caffe2/core/context_gpu.h" #include "caffe2/utils/conversions.h" #include "caffe2/utils/fixed_divisor.h" // TODO: Move this to fixed_divisor.h #ifdef __HIP_PLATFORM_HCC__ #define FIXED_DIVISOR int32_t #define FIXED_DIVISOR_DIV(d, n) (n / d) #define FIXED_DIVISOR_MOD(d, n) (n % d) #define FIXED_DIVISOR_DIV_MOD(d, n, q, r) \ do { \ const auto n_copy = n; \ *q = n_copy / d; \ *r = n_copy % d; \ } while (0) #else // __HIP_PLATFORM_HCC__ #define FIXED_DIVISOR FixedDivisor<int32_t> #define FIXED_DIVISOR_DIV(d, n) (d.Div(n)) #define FIXED_DIVISOR_MOD(d, n) (d.Mod(n)) #define FIXED_DIVISOR_DIV_MOD(d, n, q, r) (d.DivMod(n, q, r)) #endif // __HIP_PLATFORM_HCC__ #ifdef __HIP_PLATFORM_HCC__ using CUBLAS_HALF_TYPE = rocblas_half; #else // __HIP_PLATFORM_HCC using CUBLAS_HALF_TYPE = __half; #endif // __HIP_PLATFORM_HCC #include "caffe2/utils/math_utils.h" #if THRUST_VERSION >= 100800 #define THRUST_SUPPORTS_PER_THREAD #endif // THRUST_VERSION >= 100800 namespace caffe2 { namespace math { namespace { #define DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Func, expr) \ template <typename T> \ struct Func##Functor { \ inline __host__ __device__ T \ operator()(const T& lhs, const T& rhs) const { \ return lhs expr rhs; \ } \ }; \ template <> \ struct Func##Functor<at::Half> { \ inline __host__ __device__ at::Half operator()( \ const at::Half& lhs, \ const at::Half& rhs) const { \ return convert::To<float, at::Half>(convert::To<at::Half, float>( \ lhs) expr convert::To<at::Half, float>(rhs)); \ } \ }; DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Add, +) DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Sub, -) DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Mul, *) DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Div, /) #undef DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR template <typename T> __global__ void SinCosCUDAKernel(const int N, const T* X, T* S, T* C) { CUDA_1D_KERNEL_LOOP(i, N) { #if __CUDA_ARCH__ >= 350 c10::cuda::compat::sincos(__ldg(X + i), S + i, C + i); #else c10::cuda::compat::sincos(X[i], S + i, C + i); #endif } } template <typename TIn, typename TOut, class BinaryOperator> __global__ void SimpleBinaryOpCUDAKernel( const int N, const BinaryOperator op, const TIn* A, const TIn* B, TOut* C) { CUDA_1D_KERNEL_LOOP(i, N) { C[i] = op(A[i], B[i]); } } template <typename TIn, typename TOut, class BinaryOperator, bool broadcast_1st> __global__ void RowwiseBinaryOpCUDAKenel( const int size, const FIXED_DIVISOR cols, const BinaryOperator op, const TIn* A, const TIn* B, TOut* C) { CUDA_1D_KERNEL_LOOP(C_index, size) { const int j = FIXED_DIVISOR_MOD(cols, C_index); const int A_index = broadcast_1st ? j : C_index; const int B_index = broadcast_1st ? C_index : j; C[C_index] = op(A[A_index], B[B_index]); } } template <typename TIn, typename TOut, class BinaryOperator, bool broadcast_1st> __global__ void ColwiseBinaryOpCUDAKenel( const int size, const FIXED_DIVISOR cols, const BinaryOperator op, const TIn* A, const TIn* B, TOut* C) { CUDA_1D_KERNEL_LOOP(C_index, size) { const int i = FIXED_DIVISOR_DIV(cols, C_index); const int A_index = broadcast_1st ? i : C_index; const int B_index = broadcast_1st ? C_index : i; C[C_index] = op(A[A_index], B[B_index]); } } template <typename TIn, typename TOut, class BinaryOperator, int D> __global__ void BroadcastBinaryOpCUDAKernel( const int size, const SimpleArray<int, D> A_strides, const SimpleArray<int, D> B_strides, const SimpleArray<FIXED_DIVISOR, D> C_dims, const BinaryOperator op, const TIn* A, const TIn* B, TOut* C) { CUDA_1D_KERNEL_LOOP(C_index, size) { int A_index = 0; int B_index = 0; int C_index_val = C_index; #pragma unroll for (int i = D - 1; i >= 0; --i) { int d; FIXED_DIVISOR_DIV_MOD(C_dims.data[i], C_index_val, &C_index_val, &d); A_index += d * A_strides.data[i]; B_index += d * B_strides.data[i]; } C[C_index] = op(A[A_index], B[B_index]); } } template <typename TIn, typename TOut, class BinaryOperator> CAFFE2_CUDA_EXPORT void BinaryOpWith2DBroadcasting( const int rows, const int cols, const bool rowwise_broadcast, const bool broadcast_1st, const BinaryOperator& op, const TIn* A, const TIn* B, TOut* C, CUDAContext* context) { if (rows == 0 || cols == 0) { return; } const int size = rows * cols; const FIXED_DIVISOR cols_div(cols); if (rowwise_broadcast) { if (broadcast_1st) { RowwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, true> <<<CAFFE_GET_BLOCKS(size), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(size, cols_div, op, A, B, C); } else { RowwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, false> <<<CAFFE_GET_BLOCKS(size), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(size, cols_div, op, A, B, C); } } else { if (broadcast_1st) { ColwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, true> <<<CAFFE_GET_BLOCKS(size), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(size, cols_div, op, A, B, C); } else { ColwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, false> <<<CAFFE_GET_BLOCKS(size), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(size, cols_div, op, A, B, C); } } } template <typename TIn, typename TOut, class BinaryOperator, int D> CAFFE2_CUDA_EXPORT void BroadcastBinaryOpImpl( const int* A_dims, const int* B_dims, const int* C_dims, const BinaryOperator& op, const TIn* A, const TIn* B, TOut* C, CUDAContext* context) { SimpleArray<int, D> A_strides_array; SimpleArray<int, D> B_strides_array; SimpleArray<FIXED_DIVISOR, D> C_dims_array; int A_stride = 1; int B_stride = 1; for (int i = D - 1; i >= 0; --i) { if (C_dims[i] == 0) { return; } A_strides_array.data[i] = A_dims[i] == 1 ? 0 : A_stride; B_strides_array.data[i] = B_dims[i] == 1 ? 0 : B_stride; A_stride *= A_dims[i]; B_stride *= B_dims[i]; C_dims_array.data[i] = FIXED_DIVISOR(C_dims[i]); } const int size = std::accumulate(C_dims, C_dims + D, 1, std::multiplies<int>()); BroadcastBinaryOpCUDAKernel<TIn, TOut, BinaryOperator, D> <<<CAFFE_GET_BLOCKS(size), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( size, A_strides_array, B_strides_array, C_dims_array, op, A, B, C); } template <typename TIn, typename TOut, class BinaryOperator> CAFFE2_CUDA_EXPORT void BroadcastBinaryOp( const int A_ndim, const int* A_dims, const int B_ndim, const int* B_dims, const BinaryOperator& op, const TIn* A, const TIn* B, TOut* C, CUDAContext* context) { const int ndim = std::max(A_ndim, B_ndim); std::vector<int> A_dims_array(ndim); std::vector<int> B_dims_array(ndim); std::vector<int> C_dims_array(ndim); utils::ComputeBroadcastBinaryOpDims( A_ndim, A_dims, B_ndim, B_dims, A_dims_array.data(), B_dims_array.data(), C_dims_array.data()); if (A_dims_array == B_dims_array) { const int size = std::accumulate( C_dims_array.cbegin(), C_dims_array.cend(), 1, std::multiplies<int>()); SimpleBinaryOpCUDAKernel<TIn, TOut, BinaryOperator> <<<CAFFE_GET_BLOCKS(size), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(size, op, A, B, C); return; } int rows; int cols; bool broadcast_1st; if (utils::IsRowwiseBroadcastBinaryOp( ndim, A_dims_array.data(), B_dims_array.data(), &rows, &cols, &broadcast_1st)) { BinaryOpWith2DBroadcasting<TIn, TOut, BinaryOperator>( rows, cols, true, broadcast_1st, op, A, B, C, context); return; } if (utils::IsColwiseBroadcastBinaryOp( ndim, A_dims_array.data(), B_dims_array.data(), &rows, &cols, &broadcast_1st)) { BinaryOpWith2DBroadcasting<TIn, TOut, BinaryOperator>( rows, cols, false, broadcast_1st, op, A, B, C, context); return; } DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_3( ndim, BroadcastBinaryOpImpl, TIn, TOut, BinaryOperator, A_dims_array.data(), B_dims_array.data(), C_dims_array.data(), op, A, B, C, context); } } // namespace #define DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(T, Func, op) \ __global__ void Func##CUDAKernel(const int N, const T* X, T* Y) { \ CUDA_1D_KERNEL_LOOP(i, N) { \ Y[i] = op(X[i]); \ } \ } \ template <> \ CAFFE2_CUDA_EXPORT void Func<T, CUDAContext>( \ const int N, const T* x, T* y, CUDAContext* context) { \ Func##CUDAKernel<<< \ CAFFE_GET_BLOCKS(N), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream()>>>(N, x, y); \ } DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Exp, expf) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Log, logf) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cos, cosf) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Acos, acosf) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sin, sinf) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Asin, asinf) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Tan, tanf) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Atan, atanf) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sinh, sinhf) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cosh, coshf) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Tanh, tanhf) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Abs, fabsf) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sqr, utils::Square<float>) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sqrt, sqrtf) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Rsqrt, rsqrtf) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cbrt, cbrtf) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Erf, erff) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Erf, erf) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cube, utils::Cube<float>) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Cube, utils::Cube<double>) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION( std::int32_t, Cube, utils::Cube<std::int32_t>) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION( std::int64_t, Cube, utils::Cube<std::int64_t>) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(bool, Not, utils::Not) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Neg, utils::Negate<float>) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Neg, utils::Negate<double>) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION( std::int32_t, Neg, utils::Negate<std::int32_t>) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION( std::int64_t, Neg, utils::Negate<std::int64_t>) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sign, utils::Sign<float>) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Sign, utils::Sign<double>) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION( std::int32_t, Sign, utils::Sign<std::int32_t>) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION( std::int64_t, Sign, utils::Sign<std::int64_t>) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Inv, utils::Inv<float>) DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Inv, utils::Inv<double>) #undef DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION #define CAFFE2_SPECIALIZED_CUDA_SINCOS(T) \ template <> \ CAFFE2_CUDA_EXPORT void SinCos<T, CUDAContext>( \ const int N, const T* x, T* ys, T* yc, CUDAContext* context) { \ SinCosCUDAKernel<<< \ CAFFE_GET_BLOCKS(N), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream()>>>(N, x, ys, yc); \ } CAFFE2_SPECIALIZED_CUDA_SINCOS(float) CAFFE2_SPECIALIZED_CUDA_SINCOS(double) #undef CAFFE2_SPECIALIZED_CUDA_SINCOS #define DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(TIn, TOut, Func, Op) \ template <> \ CAFFE2_CUDA_EXPORT void Func<TIn, CUDAContext>( \ const int N, \ const TIn* A, \ const TIn* B, \ TOut* C, \ CUDAContext* context) { \ SimpleBinaryOpCUDAKernel<TIn, TOut, Op<TIn>> \ <<<CAFFE_GET_BLOCKS(N), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream()>>>(N, Op<TIn>(), A, B, C); \ } #define DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(Func, Op) \ DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int32_t, bool, Func, Op) \ DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int64_t, bool, Func, Op) \ DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, bool, Func, Op) \ DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, bool, Func, Op) \ DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, bool, Func, Op) DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(EQ, thrust::equal_to) DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(NE, thrust::not_equal_to) DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(LT, thrust::less) DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(LE, thrust::less_equal) DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(GT, thrust::greater) DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(GE, thrust::greater_equal) #undef DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION #define DEFINE_SIMPLE_CUDA_BINARY_FUNCTION(Func, Op) \ DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int32_t, std::int32_t, Func, Op) \ DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int64_t, std::int64_t, Func, Op) \ DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, float, Func, Op) \ DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, double, Func, Op) \ DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(at::Half, at::Half, Func, Op) DEFINE_SIMPLE_CUDA_BINARY_FUNCTION(Add, AddFunctor) DEFINE_SIMPLE_CUDA_BINARY_FUNCTION(Sub, SubFunctor) DEFINE_SIMPLE_CUDA_BINARY_FUNCTION(Mul, MulFunctor) DEFINE_SIMPLE_CUDA_BINARY_FUNCTION(Div, DivFunctor) #undef DEFINE_SIMPLE_CUDA_BINARY_FUNCTION DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, bool, And, thrust::logical_and) DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, bool, Or, thrust::logical_or) DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, bool, Xor, thrust::bit_xor) #define DEFINE_SIMPLE_CUDA_BITWISE_BINARY_FUNCTION(Func, Op) \ DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, bool, Func, Op) \ DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int32_t, std::int32_t, Func, Op) \ DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int64_t, std::int64_t, Func, Op) DEFINE_SIMPLE_CUDA_BITWISE_BINARY_FUNCTION(BitwiseAnd, thrust::bit_and) DEFINE_SIMPLE_CUDA_BITWISE_BINARY_FUNCTION(BitwiseOr, thrust::bit_or) DEFINE_SIMPLE_CUDA_BITWISE_BINARY_FUNCTION(BitwiseXor, thrust::bit_xor) #undef DEFINE_SIMPLE_CUDA_BITWISE_BINARY_FUNCTION DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION( float, float, ElemwiseMax, thrust::maximum); #undef DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION #define DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(TIn, TOut, Func, Op) \ template <> \ CAFFE2_CUDA_EXPORT void Rowwise##Func<TIn, CUDAContext, true>( \ const int rows, \ const int cols, \ const TIn* A, \ const TIn* B, \ TOut* C, \ CUDAContext* context) { \ if (rows == 0 || cols == 0) { \ return; \ } \ const int size = rows * cols; \ const FIXED_DIVISOR cols_div(cols); \ RowwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, true> \ <<<CAFFE_GET_BLOCKS(size), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream()>>>(size, cols_div, Op<TIn>(), A, B, C); \ } \ template <> \ CAFFE2_CUDA_EXPORT void Rowwise##Func<TIn, CUDAContext, false>( \ const int rows, \ const int cols, \ const TIn* A, \ const TIn* B, \ TOut* C, \ CUDAContext* context) { \ if (rows == 0 || cols == 0) { \ return; \ } \ const int size = rows * cols; \ const FIXED_DIVISOR cols_div(cols); \ RowwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, false> \ <<<CAFFE_GET_BLOCKS(size), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream()>>>(size, cols_div, Op<TIn>(), A, B, C); \ } \ template <> \ CAFFE2_CUDA_EXPORT void Colwise##Func<TIn, CUDAContext, true>( \ const int rows, \ const int cols, \ const TIn* A, \ const TIn* B, \ TOut* C, \ CUDAContext* context) { \ if (rows == 0 || cols == 0) { \ return; \ } \ const int size = rows * cols; \ const FIXED_DIVISOR cols_div(cols); \ ColwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, true> \ <<<CAFFE_GET_BLOCKS(size), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream()>>>(size, cols_div, Op<TIn>(), A, B, C); \ } \ template <> \ CAFFE2_CUDA_EXPORT void Colwise##Func<TIn, CUDAContext, false>( \ const int rows, \ const int cols, \ const TIn* A, \ const TIn* B, \ TOut* C, \ CUDAContext* context) { \ if (rows == 0 || cols == 0) { \ return; \ } \ const int size = rows * cols; \ const FIXED_DIVISOR cols_div(cols); \ ColwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, false> \ <<<CAFFE_GET_BLOCKS(size), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream()>>>(size, cols_div, Op<TIn>(), A, B, C); \ } #define DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(Func, Op) \ DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(std::int32_t, bool, Func, Op) \ DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(std::int64_t, bool, Func, Op) \ DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(float, bool, Func, Op) \ DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(double, bool, Func, Op) \ DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op) DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(EQ, thrust::equal_to) DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(NE, thrust::not_equal_to) DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(LT, thrust::less) DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(LE, thrust::less_equal) DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(GT, thrust::greater) DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(GE, thrust::greater_equal) #undef DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION #define DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Func, Op) \ DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \ std::int32_t, std::int32_t, Func, Op) \ DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \ std::int64_t, std::int64_t, Func, Op) \ DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(float, float, Func, Op) \ DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(double, double, Func, Op) \ DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(at::Half, at::Half, Func, Op) DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Add, AddFunctor) DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Sub, SubFunctor) DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Mul, MulFunctor) DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Div, DivFunctor) #undef DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, And, thrust::logical_and) DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Or, thrust::logical_or) DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Xor, thrust::bit_xor) #define DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(Func, Op) \ DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op) \ DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \ std::int32_t, std::int32_t, Func, Op) \ DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \ std::int64_t, std::int64_t, Func, Op) DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseAnd, thrust::bit_and) DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseOr, thrust::bit_or) DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseXor, thrust::bit_xor) #undef DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION #undef DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION #define DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(TIn, TOut, Func, Op) \ template <> \ CAFFE2_CUDA_EXPORT void Func<TIn, CUDAContext>( \ const int A_ndim, \ const int* A_dims, \ const int B_ndim, \ const int* B_dims, \ const TIn* A, \ const TIn* B, \ TOut* C, \ CUDAContext* context) { \ BroadcastBinaryOp<TIn, TOut, Op<TIn>>( \ A_ndim, A_dims, B_ndim, B_dims, Op<TIn>(), A, B, C, context); \ } #define DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(Func, Op) \ DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(std::int32_t, bool, Func, Op) \ DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(std::int64_t, bool, Func, Op) \ DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(float, bool, Func, Op) \ DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(double, bool, Func, Op) \ DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op) DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(EQ, thrust::equal_to) DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(NE, thrust::not_equal_to) DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(LT, thrust::less) DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(LE, thrust::less_equal) DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(GT, thrust::greater) DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(GE, thrust::greater_equal) #undef DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION #define DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Func, Op) \ DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION( \ std::int32_t, std::int32_t, Func, Op) \ DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION( \ std::int64_t, std::int64_t, Func, Op) \ DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(float, float, Func, Op) \ DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(double, double, Func, Op) \ DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(at::Half, at::Half, Func, Op) DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Add, AddFunctor) DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Sub, SubFunctor) DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Mul, MulFunctor) DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Div, DivFunctor) #undef DEFINE_BROADCAST_CUDA_BINARY_FUNCTION DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, And, thrust::logical_and) DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Or, thrust::logical_or) DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Xor, thrust::bit_xor) #define DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(Func, Op) \ DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op) \ DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION( \ std::int32_t, std::int32_t, Func, Op) \ DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(std::int64_t, std::int64_t, Func, Op) DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseAnd, thrust::bit_and) DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseOr, thrust::bit_or) DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseXor, thrust::bit_xor) #undef DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION #undef DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION #define DELEGATE_REDUCTION_FUNCTION(T, Funcname, func) \ template <> \ CAFFE2_CUDA_EXPORT void Funcname<T, CUDAContext>( \ const int N, \ const T* src, \ T* dst, \ Tensor* scratch_ptr, \ CUDAContext* context) { \ size_t memRequired = 0; \ cub::DeviceReduce::func( \ nullptr, memRequired, src, dst, N, context->cuda_stream()); \ auto buffer_size = \ static_cast<int64_t>((memRequired + sizeof(T) - 1) / sizeof(T)); \ scratch_ptr->Resize(std::vector<int64_t>{buffer_size}); \ cub::DeviceReduce::func( \ static_cast<void*>(scratch_ptr->mutable_data<T>()), \ memRequired, \ src, \ dst, \ N, \ context->cuda_stream()); \ } DELEGATE_REDUCTION_FUNCTION(float, ReduceMin, Min) DELEGATE_REDUCTION_FUNCTION(float, ReduceMax, Max) DELEGATE_REDUCTION_FUNCTION(int32_t, ReduceMax, Max) DELEGATE_REDUCTION_FUNCTION(int64_t, ReduceMax, Max) #undef DELEGATE_REDUCTION_FUNCTION // Caffe2 gemm provides a simpler interface to the gemm functions, with the // limitation that the data has to be contiguous in memory. template <> CAFFE2_CUDA_EXPORT void Gemm<float, CUDAContext>( const CBLAS_TRANSPOSE trans_A, const CBLAS_TRANSPOSE trans_B, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C, CUDAContext* context, TensorProto::DataType math_type) { // Note that cublas follows fortran order, so the order is different from // the cblas convention. const int lda = (trans_A == CblasNoTrans) ? K : M; const int ldb = (trans_B == CblasNoTrans) ? N : K; const cublasOperation_t cu_trans_A = (trans_A == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; const cublasOperation_t cu_trans_B = (trans_B == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_ENFORCE( cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST)); CUBLAS_ENFORCE(cublasSgemm( context->cublas_handle(), cu_trans_B, cu_trans_A, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> CAFFE2_CUDA_EXPORT void Gemm<at::Half, CUDAContext>( const CBLAS_TRANSPOSE trans_A, const CBLAS_TRANSPOSE trans_B, const int M, const int N, const int K, const float alpha, const at::Half* A, const at::Half* B, const float beta, at::Half* C, CUDAContext* context, TensorProto::DataType math_type) { // Note that cublas follows fortran order, so the order is different from // the cblas convention. const int lda = (trans_A == CblasNoTrans) ? K : M; const int ldb = (trans_B == CblasNoTrans) ? N : K; const cublasOperation_t cu_trans_A = (trans_A == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; const cublasOperation_t cu_trans_B = (trans_B == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; if (math_type == TensorProto_DataType_FLOAT) { CUBLAS_ENFORCE(cublasSetPointerMode( context->cublas_handle(), CUBLAS_POINTER_MODE_HOST)); #ifdef __HIP_PLATFORM_HCC__ // rocblas doesn't support cublasSgemmEx type API yet. // It has more general rocblas_gemm_ex API which is more close to // cublasGemmEx rocblas_gemm_ex does D = alpha*op( A )*op( B ) + beta*C, // whereas cublasgemmEx does C = alpha*op( A )*op( B ) + beta*C ROCBLAS_ENFORCE(rocblas_gemm_ex( context->rocblashandle(), cu_trans_B, cu_trans_A, N, M, K, &alpha, B, rocblas_datatype_f16_r, ldb, A, rocblas_datatype_f16_r, lda, &beta, C, rocblas_datatype_f16_r, N, C, // D rocblas_datatype_f16_r, // D type N, // ldd rocblas_datatype_f32_r, // compute type rocblas_gemm_algo_standard, // rocblas_gemm_algo 0, // solution index, reserved for future use 0, // flags, reserved for future use NULL, // size of workspace NULL)); // workspace #else CUBLAS_ENFORCE(cublasSgemmEx( context->cublas_handle(), cu_trans_B, cu_trans_A, N, M, K, &alpha, B, CUDA_R_16F, ldb, A, CUDA_R_16F, lda, &beta, C, CUDA_R_16F, N)); #endif // __HIP_PLATFORM_HCC__ } else if (math_type == TensorProto_DataType_FLOAT16) { // convert alpha, beta from float -> __half const __half alpha_fp16 = at::Half(alpha); const __half beta_fp16 = at::Half(beta); // call cublasHgemm CUBLAS_ENFORCE(cublasSetPointerMode( context->cublas_handle(), CUBLAS_POINTER_MODE_HOST)); CUBLAS_ENFORCE(cublasHgemm( context->cublas_handle(), cu_trans_B, cu_trans_A, N, M, K, reinterpret_cast<const CUBLAS_HALF_TYPE*>(&alpha_fp16), reinterpret_cast<const CUBLAS_HALF_TYPE*>(B), ldb, reinterpret_cast<const CUBLAS_HALF_TYPE*>(A), lda, reinterpret_cast<const CUBLAS_HALF_TYPE*>(&beta_fp16), reinterpret_cast<CUBLAS_HALF_TYPE*>(C), N)); } else { // fail CAFFE_THROW("Unsupported math type"); } } template <> CAFFE2_CUDA_EXPORT void BiasCHW<float, CUDAContext>( const float* bias, const float* bias_multiplier, const int bias_channels, const int image_size, float* image, CUDAContext* context) { Gemm<float, CUDAContext>( CblasNoTrans, CblasNoTrans, bias_channels, image_size, 1, 1, bias, bias_multiplier, 1, image, context); } template <> CAFFE2_CUDA_EXPORT void GemmBatched<float, CUDAContext>( const CBLAS_TRANSPOSE trans_A, const CBLAS_TRANSPOSE trans_B, const int batch_size, const int M, const int N, const int K, const float alpha, const float** A, const float** B, const float beta, float** C, CUDAContext* context, TensorProto::DataType math_type) { #if __CUDACC_VER_MAJOR__ < 8 || defined(__HIP_PLATFORM_HCC__) // loop over matrices in the batch for (int i = 0; i < batch_size; ++i) { Gemm<float, CUDAContext>( trans_A, trans_B, M, N, K, alpha, A[i], B[i], beta, C[i], context, math_type); } #else // Note that cublas follows fortran order, so the order is different from // the cblas convention. const int lda = (trans_A == CblasNoTrans) ? K : M; const int ldb = (trans_B == CblasNoTrans) ? N : K; const int ldc = N; const cublasOperation_t cu_trans_A = (trans_A == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; const cublasOperation_t cu_trans_B = (trans_B == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; thrust::device_vector<const float*> A_device(A, A + batch_size); thrust::device_vector<const float*> B_device(B, B + batch_size); thrust::device_vector<float*> C_device(C, C + batch_size); CUBLAS_ENFORCE( cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST)); CUBLAS_ENFORCE(cublasSgemmBatched( context->cublas_handle(), cu_trans_B, cu_trans_A, N, M, K, &alpha, B_device.data().get(), ldb, A_device.data().get(), lda, &beta, C_device.data().get(), ldc, batch_size)); #endif } template <> CAFFE2_CUDA_EXPORT void GemmStridedBatched<float, CUDAContext>( const CBLAS_TRANSPOSE trans_A, const CBLAS_TRANSPOSE trans_B, const int batch_size, const int M, const int N, const int K, const float alpha, const float* A, const int A_stride, const float* B, const int B_stride, const float beta, float* C, const int C_stride, CUDAContext* context, TensorProto::DataType math_type) { #if __CUDACC_VER_MAJOR__ < 8 && !defined(__HIP_PLATFORM_HCC__) // loop over matrices in the batch for (int i = 0; i < batch_size; ++i) { Gemm<float, CUDAContext>( trans_A, trans_B, M, N, K, alpha, A, B, beta, C, context, math_type); A += A_stride; B += B_stride; C += C_stride; } #else // Note that cublas follows fortran order, so the order is different from // the cblas convention. const int lda = (trans_A == CblasNoTrans) ? K : M; const int ldb = (trans_B == CblasNoTrans) ? N : K; const int ldc = N; const cublasOperation_t cu_trans_A = (trans_A == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; const cublasOperation_t cu_trans_B = (trans_B == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_ENFORCE( cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST)); CUBLAS_ENFORCE(cublasSgemmStridedBatched( context->cublas_handle(), cu_trans_B, cu_trans_A, N, M, K, &alpha, B, ldb, B_stride, A, lda, A_stride, &beta, C, ldc, C_stride, batch_size)); #endif } template <> CAFFE2_CUDA_EXPORT void GemmBatched<at::Half, CUDAContext>( const CBLAS_TRANSPOSE trans_A, const CBLAS_TRANSPOSE trans_B, const int batch_size, const int M, const int N, const int K, const float alpha, const at::Half** A, const at::Half** B, const float beta, at::Half** C, CUDAContext* context, TensorProto::DataType math_type) { #if __CUDACC_VER_MAJOR__ < 9 // loop over matrices in the batch for (int i = 0; i < batch_size; ++i) { Gemm<at::Half, CUDAContext>( trans_A, trans_B, M, N, K, alpha, A[i], B[i], beta, C[i], context, math_type); } #else // Note that cublas follows fortran order, so the order is different from // the cblas convention. const int lda = (trans_A == CblasNoTrans) ? K : M; const int ldb = (trans_B == CblasNoTrans) ? N : K; const int ldc = N; const cublasOperation_t cu_trans_A = (trans_A == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; const cublasOperation_t cu_trans_B = (trans_B == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; if (math_type == TensorProto_DataType_FLOAT) { #if CUDA_VERSION < 9010 // loop over matrices in the batch for (int i = 0; i < batch_size; ++i) { Gemm<at::Half, CUDAContext>( trans_A, trans_B, M, N, K, alpha, A[i], B[i], beta, C[i], context, math_type); } #else thrust::device_vector<const void*> A_device(A, A + batch_size); thrust::device_vector<const void*> B_device(B, B + batch_size); thrust::device_vector<void*> C_device(C, C + batch_size); CUBLAS_ENFORCE(cublasSetPointerMode( context->cublas_handle(), CUBLAS_POINTER_MODE_HOST)); CUBLAS_ENFORCE(cublasGemmBatchedEx( context->cublas_handle(), cu_trans_B, cu_trans_A, N, M, K, &alpha, B_device.data().get(), CUDA_R_16F, ldb, A_device.data().get(), CUDA_R_16F, lda, &beta, C_device.data().get(), CUDA_R_16F, ldc, batch_size, CUDA_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); #endif } else if (math_type == TensorProto_DataType_FLOAT16) { // Convert alpha, beta from float -> __half const __half alpha_fp16 = at::Half(alpha); const __half beta_fp16 = at::Half(beta); std::vector<const __half*> A_array(batch_size); std::vector<const __half*> B_array(batch_size); std::vector<__half*> C_array(batch_size); for (int i = 0; i < batch_size; ++i) { A_array[i] = reinterpret_cast<const __half*>(A[i]); B_array[i] = reinterpret_cast<const __half*>(B[i]); C_array[i] = reinterpret_cast<__half*>(C[i]); } thrust::device_vector<const __half*> A_device( A_array.cbegin(), A_array.cend()); thrust::device_vector<const __half*> B_device( B_array.cbegin(), B_array.cend()); thrust::device_vector<__half*> C_device(C_array.cbegin(), C_array.cend()); CUBLAS_ENFORCE(cublasSetPointerMode( context->cublas_handle(), CUBLAS_POINTER_MODE_HOST)); CUBLAS_ENFORCE(cublasHgemmBatched( context->cublas_handle(), cu_trans_B, cu_trans_A, N, M, K, &alpha_fp16, B_device.data().get(), ldb, A_device.data().get(), lda, &beta_fp16, C_device.data().get(), ldc, batch_size)); } else { CAFFE_THROW("Unsupported math type"); } #endif } template <> CAFFE2_CUDA_EXPORT void GemmStridedBatched<at::Half, CUDAContext>( const CBLAS_TRANSPOSE trans_A, const CBLAS_TRANSPOSE trans_B, const int batch_size, const int M, const int N, const int K, const float alpha, const at::Half* A, const int A_stride, const at::Half* B, const int B_stride, const float beta, at::Half* C, const int C_stride, CUDAContext* context, TensorProto::DataType math_type) { #if __CUDACC_VER_MAJOR__ < 8 && !defined(__HIP_PLATFORM_HCC__) // loop over matrices in the batch for (int i = 0; i < batch_size; ++i) { Gemm<at::Half, CUDAContext>( trans_A, trans_B, M, N, K, alpha, A, B, beta, C, context, math_type); A += A_stride; B += B_stride; C += C_stride; } #else // Note that cublas follows fortran order, so the order is different from // the cblas convention. const int lda = (trans_A == CblasNoTrans) ? K : M; const int ldb = (trans_B == CblasNoTrans) ? N : K; const int ldc = N; const cublasOperation_t cu_trans_A = (trans_A == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; const cublasOperation_t cu_trans_B = (trans_B == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; if (math_type == TensorProto_DataType_FLOAT) { #if CUDA_VERSION < 9010 && !defined(__HIP_PLATFORM_HCC__) // loop over matrices in the batch for (int i = 0; i < batch_size; ++i) { Gemm<at::Half, CUDAContext>( trans_A, trans_B, M, N, K, alpha, A, B, beta, C, context, math_type); A += A_stride; B += B_stride; C += C_stride; } #else CUBLAS_ENFORCE(cublasSetPointerMode( context->cublas_handle(), CUBLAS_POINTER_MODE_HOST)); #ifdef __HIP_PLATFORM_HCC__ // D[i*stride_d] = alpha*op(A[i*stride_a])*op(B[i*stride_b]) + // beta*C[i*stride_c], for i in [0,batch_count-1] ROCBLAS_ENFORCE(rocblas_gemm_strided_batched_ex( context->rocblashandle(), cu_trans_B, cu_trans_A, N, M, K, &alpha, B, rocblas_datatype_f16_r, ldb, B_stride, A, rocblas_datatype_f16_r, lda, A_stride, &beta, C, rocblas_datatype_f16_r, ldc, C_stride, C, // D rocblas_datatype_f16_r, // D type ldc, // ldd C_stride, // D stride batch_size, rocblas_datatype_f32_r, // compute type rocblas_gemm_algo_standard, // rocblas_gemm_algo 0, // solution index, reserved for future use 0, // flags, reserved for future use NULL, // size of workspace NULL)); // workspace #else CUBLAS_ENFORCE(cublasGemmStridedBatchedEx( context->cublas_handle(), cu_trans_B, cu_trans_A, N, M, K, &alpha, B, CUDA_R_16F, ldb, B_stride, A, CUDA_R_16F, lda, A_stride, &beta, C, CUDA_R_16F, ldc, C_stride, batch_size, CUDA_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); #endif // __HIP_PLATFORM_HCC__ #endif } else if (math_type == TensorProto_DataType_FLOAT16) { // Convert alpha, beta from float -> __half const __half alpha_fp16 = at::Half(alpha); const __half beta_fp16 = at::Half(beta); CUBLAS_ENFORCE(cublasSetPointerMode( context->cublas_handle(), CUBLAS_POINTER_MODE_HOST)); CUBLAS_ENFORCE(cublasHgemmStridedBatched( context->cublas_handle(), cu_trans_B, cu_trans_A, N, M, K, reinterpret_cast<const CUBLAS_HALF_TYPE*>(&alpha_fp16), reinterpret_cast<const CUBLAS_HALF_TYPE*>(B), ldb, B_stride, reinterpret_cast<const CUBLAS_HALF_TYPE*>(A), lda, A_stride, reinterpret_cast<const CUBLAS_HALF_TYPE*>(&beta_fp16), reinterpret_cast<CUBLAS_HALF_TYPE*>(C), ldc, C_stride, batch_size)); } else { CAFFE_THROW("Unsupported math type"); } #endif } #if CUDA_VERSION >= 9000 // No change, but required. Defer to default CUDA engine template <> CAFFE2_CUDA_EXPORT void Gemm<float, CUDAContext, TensorCoreEngine>( const CBLAS_TRANSPOSE trans_A, const CBLAS_TRANSPOSE trans_B, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C, CUDAContext* context, TensorProto::DataType math_type) { return Gemm<float, CUDAContext>( trans_A, trans_B, M, N, K, alpha, A, B, beta, C, context, math_type); } template <> CAFFE2_CUDA_EXPORT void Gemm<at::Half, CUDAContext, TensorCoreEngine>( const CBLAS_TRANSPOSE trans_A, const CBLAS_TRANSPOSE trans_B, const int M, const int N, const int K, const float alpha, const at::Half* A, const at::Half* B, const float beta, at::Half* C, CUDAContext* context, TensorProto::DataType math_type) { // Note that cublas follows fortran order, so the order is different from // the cblas convention. const int lda = (trans_A == CblasNoTrans) ? K : M; const int ldb = (trans_B == CblasNoTrans) ? N : K; const cublasOperation_t cu_trans_A = (trans_A == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; const cublasOperation_t cu_trans_B = (trans_B == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; // enable TensorCore for this call on this handle if (TensorCoreAvailable()) { CUBLAS_ENFORCE( cublasSetMathMode(context->cublas_handle(), CUBLAS_TENSOR_OP_MATH)); } CUBLAS_ENFORCE( cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST)); CUBLAS_ENFORCE(cublasGemmEx( context->cublas_handle(), cu_trans_B, cu_trans_A, N, M, K, &alpha, B, CUDA_R_16F, ldb, A, CUDA_R_16F, lda, &beta, C, CUDA_R_16F, N, CUDA_R_32F, CUBLAS_GEMM_DFALT_TENSOR_OP)); // Now disable TensorCore math for subsequent calls to this handle if (TensorCoreAvailable()) { CUBLAS_ENFORCE( cublasSetMathMode(context->cublas_handle(), CUBLAS_DEFAULT_MATH)); } } template <> CAFFE2_CUDA_EXPORT void GemmStridedBatched<float, CUDAContext, TensorCoreEngine>( const CBLAS_TRANSPOSE trans_A, const CBLAS_TRANSPOSE trans_B, const int batch_size, const int M, const int N, const int K, const float alpha, const float* A, const int A_stride, const float* B, const int B_stride, const float beta, float* C, const int C_stride, CUDAContext* context, TensorProto::DataType math_type) { return GemmStridedBatched<float, CUDAContext, DefaultEngine>( trans_A, trans_B, batch_size, M, N, K, alpha, A, A_stride, B, B_stride, beta, C, C_stride, context, math_type); } template <> CAFFE2_CUDA_EXPORT void GemmStridedBatched<at::Half, CUDAContext, TensorCoreEngine>( const CBLAS_TRANSPOSE trans_A, const CBLAS_TRANSPOSE trans_B, const int batch_size, const int M, const int N, const int K, const float alpha, const at::Half* A, const int A_stride, const at::Half* B, const int B_stride, const float beta, at::Half* C, const int C_stride, CUDAContext* context, TensorProto::DataType math_type) { return GemmStridedBatched<at::Half, CUDAContext, DefaultEngine>( trans_A, trans_B, batch_size, M, N, K, alpha, A, A_stride, B, B_stride, beta, C, C_stride, context, math_type); } #endif // CUDA_VERSION >= 9000 template <> CAFFE2_CUDA_EXPORT void GemmEx<float, CUDAContext>( const CBLAS_TRANSPOSE trans_A, const CBLAS_TRANSPOSE trans_B, const int M, const int N, const int K, const float alpha, const float* A, const int lda, const float* B, const int ldb, const float beta, float* C, const int ldc, CUDAContext* context) { // Note that cublas follows fortran order, so the order is different from // the cblas convention. const cublasOperation_t cu_trans_A = (trans_A == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; const cublasOperation_t cu_trans_B = (trans_B == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_ENFORCE( cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST)); CUBLAS_ENFORCE(cublasSgemm( context->cublas_handle(), cu_trans_B, cu_trans_A, N, M, K, &alpha, B, ldb, A, lda, &beta, C, ldc)); } template <> CAFFE2_CUDA_EXPORT void Gemv<float, CUDAContext>( const CBLAS_TRANSPOSE trans_A, const int M, const int N, const float alpha, const float* A, const float* x, const float beta, float* y, CUDAContext* context, TensorProto::DataType math_type) { const cublasOperation_t cu_trans_A = (trans_A == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N; CUBLAS_ENFORCE( cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST)); CUBLAS_ENFORCE(cublasSgemv( context->cublas_handle(), cu_trans_A, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } // Batched Add variants namespace { template <typename T> __global__ void AddStripedBatchKernel( const int N, const T* first, T* Y, const int stripe, const int batch) { for (int j = 0; j < batch; j++) { const T* x = first + j * stripe; CUDA_1D_KERNEL_LOOP(i, N) { float tmpY = convert::To<T, float>(Y[i]); tmpY += convert::To<T, float>(x[i]); Y[i] = convert::To<float, T>(tmpY); } } } } // namespace #define CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(T) \ template <> \ CAFFE2_CUDA_EXPORT void AddStripedBatch<T, CUDAContext>( \ const int N, \ const T* first, \ T* Y, \ const int stripe, \ const int batch, \ CUDAContext* context) { \ AddStripedBatchKernel<T> \ <<<CAFFE_GET_BLOCKS(N), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream()>>>(N, first, Y, stripe, batch); \ } CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(float); CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(at::Half); #undef CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH template <> CAFFE2_CUDA_EXPORT void Gemv<at::Half, CUDAContext>( const CBLAS_TRANSPOSE trans_A, const int M, const int N, const float alpha, const at::Half* A, const at::Half* x, const float beta, at::Half* y, CUDAContext* context, TensorProto::DataType math_type) { const cublasOperation_t cu_trans_A = (trans_A == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N; // sort out what we need to call cublasSgemmEx / cublasHgemm const int m = (cu_trans_A == CUBLAS_OP_N) ? N : M; const int k = (cu_trans_A == CUBLAS_OP_N) ? M : N; const int lda = (cu_trans_A == CUBLAS_OP_N) ? m : k; const int ldc = m; if (math_type == TensorProto_DataType_FLOAT) { CUBLAS_ENFORCE(cublasSetPointerMode( context->cublas_handle(), CUBLAS_POINTER_MODE_HOST)); #ifdef __HIP_PLATFORM_HCC__ // rocblas doesn't support cublasSgemmEx type API yet. // It has more general rocblas_gemm_ex API which is more close to // cublasGemmEx rocblas_gemm_ex does D = alpha*op( A )*op( B ) + beta*C, // whereas cublasgemmEx does C = alpha*op( A )*op( B ) + beta*C ROCBLAS_ENFORCE(rocblas_gemm_ex( context->rocblashandle(), cu_trans_A, rocblas_operation_none, m, 1, k, &alpha, A, rocblas_datatype_f16_r, lda, x, rocblas_datatype_f16_r, k, &beta, y, rocblas_datatype_f16_r, ldc, y, // D rocblas_datatype_f16_r, // D type ldc, // ldd rocblas_datatype_f32_r, // compute type rocblas_gemm_algo_standard, // rocblas_gemm_algo 0, // solution index, reserved for future use 0, // flags, reserved for future use NULL, // size of workspace NULL)); // workspace #else CUBLAS_ENFORCE(cublasSgemmEx( context->cublas_handle(), cu_trans_A, CUBLAS_OP_N, m, 1, k, &alpha, A, CUDA_R_16F, lda, x, CUDA_R_16F, k, &beta, y, CUDA_R_16F, ldc)); #endif // __HIP_PLATFORM_HCC__ } else if (math_type == TensorProto_DataType_FLOAT16) { const __half alpha_fp16 = at::Half(alpha); const __half beta_fp16 = at::Half(beta); CUBLAS_ENFORCE(cublasSetPointerMode( context->cublas_handle(), CUBLAS_POINTER_MODE_HOST)); CUBLAS_ENFORCE(cublasHgemm( context->cublas_handle(), cu_trans_A, CUBLAS_OP_N, m, 1, k, reinterpret_cast<const CUBLAS_HALF_TYPE*>(&alpha_fp16), reinterpret_cast<const CUBLAS_HALF_TYPE*>(A), lda, reinterpret_cast<const CUBLAS_HALF_TYPE*>(x), k, reinterpret_cast<const CUBLAS_HALF_TYPE*>(&beta_fp16), reinterpret_cast<CUBLAS_HALF_TYPE*>(y), ldc)); } else { // fail CAFFE_THROW("Unsupported math type"); } } namespace { template <typename T> __global__ void SetKernel(const int N, const T alpha, T* Y) { CUDA_1D_KERNEL_LOOP(i, N) { Y[i] = alpha; } } } // namespace #define CAFFE2_SPECIALIZED_CUDA_SET(T) \ template <> \ CAFFE2_CUDA_API void Set<T, CUDAContext>( \ const size_t N, const T alpha, T* Y, CUDAContext* context) { \ if (N == 0) { \ return; \ } \ if (alpha == T(0)) { \ cudaMemsetAsync(Y, 0, sizeof(T) * N, context->cuda_stream()); \ } else { \ SetKernel<T> \ <<<CAFFE_GET_BLOCKS(N), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream()>>>(N, alpha, Y); \ } \ } CAFFE2_SPECIALIZED_CUDA_SET(float); CAFFE2_SPECIALIZED_CUDA_SET(double); CAFFE2_SPECIALIZED_CUDA_SET(bool); CAFFE2_SPECIALIZED_CUDA_SET(int8_t); CAFFE2_SPECIALIZED_CUDA_SET(int16_t); CAFFE2_SPECIALIZED_CUDA_SET(int); CAFFE2_SPECIALIZED_CUDA_SET(int64_t); CAFFE2_SPECIALIZED_CUDA_SET(char); CAFFE2_SPECIALIZED_CUDA_SET(uint8_t); CAFFE2_SPECIALIZED_CUDA_SET(uint16_t); #undef CAFFE2_SPECIALIZED_CUDA_SET template <> CAFFE2_CUDA_EXPORT void Set<at::Half, CUDAContext>( const size_t N, const at::Half alpha, at::Half* Y, CUDAContext* context) { if (N > 0) { SetKernel<at::Half> <<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(N, alpha, Y); } } namespace { template <typename T> __global__ void UniformShift(const size_t N, const float min, const float max, T* x) { float scale = max - min; CUDA_1D_KERNEL_LOOP(i, N) { x[i] = convert::To<float, T>(convert::To<T, float>(x[i]) * scale + min); } } __global__ void UniformIntFit(const size_t N, const int min, const int max, unsigned int* x) { int* x_int = reinterpret_cast<int*>(x); int range = (max - min + 1); CUDA_1D_KERNEL_LOOP(i, N) { x_int[i] = min + static_cast<int>(x[i] % range); } } } // namespace template <> CAFFE2_CUDA_EXPORT void RandUniform<float, CUDAContext>( const size_t n, const float min, const float max, float* r, CUDAContext* context) { CURAND_ENFORCE(curandGenerateUniform(context->curand_generator(), r, n)); UniformShift<float> <<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(n, min, max, r); } template <> CAFFE2_CUDA_EXPORT void RandUniform<double, CUDAContext>( const size_t n, const double min, const double max, double* r, CUDAContext* context) { CURAND_ENFORCE( curandGenerateUniformDouble(context->curand_generator(), r, n)); UniformShift<double> <<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(n, min, max, r); } template <> CAFFE2_CUDA_EXPORT void RandUniform<int, CUDAContext>( const size_t n, const int min, const int max, int* r, CUDAContext* context) { CURAND_ENFORCE(curandGenerate( context->curand_generator(), reinterpret_cast<unsigned int*>(r), n)); UniformIntFit<<< CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( n, min, max, reinterpret_cast<unsigned int*>(r)); } template <typename T> size_t HandleOddLengthRandGaussian( const size_t n, const T mean, const T std, T* r, CUDAContext* context) { if (n % 2 == 1) { std::default_random_engine generator; std::normal_distribution<T> distribution(mean, std); const T random_value = distribution(generator); Set<T, CUDAContext>(1, random_value, r + (n - 1), context); return n - 1; } return n; } template <> CAFFE2_CUDA_EXPORT void RandGaussian<float, CUDAContext>( const size_t n, const float mean, const float std, float* r, CUDAContext* context) { // If n is odd, we add a random Gaussian value at the end manually // and generate n-1 random values using curandGenerateNormal. // curandGenerateNormal requires n to be even. const size_t even_n = HandleOddLengthRandGaussian<float>(n, mean, std, r, context); CURAND_ENFORCE( curandGenerateNormal(context->curand_generator(), r, even_n, mean, std)); } template <> CAFFE2_CUDA_EXPORT void RandGaussian<double, CUDAContext>( const size_t n, const double mean, const double std, double* r, CUDAContext* context) { const size_t even_n = HandleOddLengthRandGaussian<double>(n, mean, std, r, context); CURAND_ENFORCE(curandGenerateNormalDouble( context->curand_generator(), r, even_n, mean, std)); } template <> CAFFE2_CUDA_EXPORT void Dot<float, CUDAContext>( const int n, const float* a, const float* b, float* y, CUDAContext* context) { CUBLAS_ENFORCE(cublasSetPointerMode( context->cublas_handle(), CUBLAS_POINTER_MODE_DEVICE)); CUBLAS_ENFORCE(cublasSdot(context->cublas_handle(), n, a, 1, b, 1, y)); } template <> CAFFE2_CUDA_EXPORT void Dot<at::Half, CUDAContext>( const int n, const at::Half* a, const at::Half* b, at::Half* y, CUDAContext* context) { #if defined(__HIP_PLATFORM_HCC__) CAFFE_THROW("HIP currently does not support FP16 completely yet."); #else // execute with 32-bit math CUBLAS_ENFORCE(cublasSetPointerMode( context->cublas_handle(), CUBLAS_POINTER_MODE_DEVICE)); CUBLAS_ENFORCE(cublasDotEx( context->cublas_handle(), n, a, CUDA_R_16F, 1, b, CUDA_R_16F, 1, y, CUDA_R_16F, CUDA_R_32F)); #endif } // A previous version of caffe2 used Thrust but it turns out that thrust // reduction has an implicit scratch space allocation and deallocation, which // may interfere with NCCL and create a deadlock. Hence we are using a custom // reduction here. #define SUM_KERNEL_NTHREADS 128 template <typename T> __global__ void SumKernel(const int N, const T* X, T* Y, bool square) { const int idx = threadIdx.x; __shared__ float reduction_buffer[SUM_KERNEL_NTHREADS]; reduction_buffer[idx] = 0; // A multilevel reduction. // N -> 128 if (!square) { for (int i = idx; i < N; i += SUM_KERNEL_NTHREADS) { reduction_buffer[idx] += convert::To<T, float>(X[i]); } } else { for (int i = idx; i < N; i += SUM_KERNEL_NTHREADS) { float Xi = convert::To<T, float>(X[i]); reduction_buffer[idx] += Xi * Xi; } } __syncthreads(); // 128 -> 32 if (idx < 32) { reduction_buffer[idx] += reduction_buffer[idx + 32] + reduction_buffer[idx + 64] + reduction_buffer[idx + 96]; } __syncthreads(); // 32 -> 1 if (idx == 0) { float tmp = 0; for (int i = 0; i < 32; ++i) { tmp += reduction_buffer[i]; } *Y = convert::To<float, T>(tmp); } } // According to the benchmarks script // caffe2/caffe2/experiments/python/device_reduce_sum_bench.py, // device reduce is slower for N <= 10000. #define DEVICE_REDUCE_SIZE_THRESHOLD 10000 namespace { template <typename T> __global__ void SumConvertKernel(float* sum, T* dest) { *dest = convert::To<float, T>(*sum); } template <typename T, typename IterT> CAFFE2_CUDA_EXPORT void SumGenericIter( const int N, IterT it, T*& dest, CUDAContext* context, Tensor* scratch_ptr) { size_t memRequired = 0; cub::DeviceReduce::Sum( nullptr, memRequired, it, dest, N, context->cuda_stream()); auto buffer_size = static_cast<int64_t>((memRequired + sizeof(T) - 1) / sizeof(T)); if (!dest) { // allocate one more T at the end of scratch for dest scratch_ptr->Resize(std::vector<int64_t>{buffer_size + 1}); dest = scratch_ptr->template mutable_data<T>() + buffer_size; } else { scratch_ptr->Resize(std::vector<int64_t>{buffer_size}); } cub::DeviceReduce::Sum( static_cast<void*>(scratch_ptr->template mutable_data<T>()), memRequired, it, dest, N, context->cuda_stream()); } } // namespace template <> CAFFE2_CUDA_EXPORT void Sum<float, CUDAContext>( const int N, const float* x, float* y, CUDAContext* context, Tensor* scratch_ptr) { if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { SumGenericIter<float>(N, x, y, context, scratch_ptr); } else { SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>( N, x, y, false); } } template <> CAFFE2_CUDA_EXPORT void Sum<int32_t, CUDAContext>( const int N, const int32_t* x, int32_t* y, CUDAContext* context, Tensor* scratch_ptr) { if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { SumGenericIter<int32_t>(N, x, y, context, scratch_ptr); } else { SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>( N, x, y, false); } } namespace { template <typename T> struct FloatTransform { inline __host__ __device__ float operator()(const T v) const { return convert::To<T, float>(v); } }; } // namespace #define CAFFE2_MATH_SUM_FUNC(T) \ template <> \ CAFFE2_CUDA_EXPORT void Sum<T, CUDAContext>( \ const int N, \ const T* x, \ T* y, \ CUDAContext* context, \ Tensor* scratch_ptr) { \ if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { \ FloatTransform<T> transform; \ cub::TransformInputIterator<float, FloatTransform<T>, const T*> it( \ x, transform); \ float* sum = nullptr; \ SumGenericIter<float>(N, it, sum, context, scratch_ptr); \ SumConvertKernel<<<1, 1, 0, context->cuda_stream()>>>(sum, y); \ } else { \ SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>( \ N, x, y, false); \ } \ } CAFFE2_MATH_SUM_FUNC(at::Half) #undef CAFFE2_MATH_SUM_FUNC namespace { template <typename T> struct SqrTransform { inline __host__ __device__ T operator()(const T v) const { return v * v; } }; } // namespace template <> CAFFE2_CUDA_EXPORT void SumSqr<float, CUDAContext>( const int N, const float* x, float* y, CUDAContext* context, Tensor* scratch_ptr) { if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { SqrTransform<float> transform; cub::TransformInputIterator<float, SqrTransform<float>, const float*> it( x, transform); SumGenericIter<float>(N, it, y, context, scratch_ptr); } else { SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>( N, x, y, true); } } #define CAFFE2_MATH_SUMSQR_FUNC(T) \ template <> \ CAFFE2_CUDA_EXPORT void SumSqr<T, CUDAContext>( \ const int N, \ const T* x, \ T* y, \ CUDAContext* context, \ Tensor* scratch_ptr) { \ if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { \ FloatTransform<T> float_transform; \ cub::TransformInputIterator<float, FloatTransform<T>, const T*> \ float_it(x, float_transform); \ SqrTransform<float> sqr_transform; \ cub::TransformInputIterator< \ float, \ SqrTransform<float>, \ decltype(float_it)> \ it(float_it, sqr_transform); \ float* sum = nullptr; \ SumGenericIter<float>(N, it, sum, context, scratch_ptr); \ SumConvertKernel<<<1, 1, 0, context->cuda_stream()>>>(sum, y); \ } else { \ SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>( \ N, x, y, true); \ } \ } CAFFE2_MATH_SUMSQR_FUNC(at::Half) #undef CAFFE2_MATH_SUMSQR_FUNC #undef DEVICE_REDUCE_SIZE_THRESHOLD namespace { template <typename T> __global__ void SelectKernel(const int N, const int D, const T* x, const int* idx, T* y) { CUDA_1D_KERNEL_LOOP(i, N) { y[i] = x[i * D + idx[i]]; } } } // namespace template <> CAFFE2_CUDA_EXPORT void Select<float, CUDAContext>( const int N, const int D, const float* x, const int* idx, float* y, CUDAContext* context) { SelectKernel<float> <<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(N, D, x, idx, y); } template <> CAFFE2_CUDA_EXPORT void Select<at::Half, CUDAContext>( const int N, const int D, const at::Half* x, const int* idx, at::Half* y, CUDAContext* context) { SelectKernel<at::Half> <<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(N, D, x, idx, y); } namespace { template <typename TAlpha, typename TData> __global__ void ScaleCUDAKernel(const int n, const TAlpha alpha, const TData* x, TData* y) { CUDA_1D_KERNEL_LOOP(i, n) { #if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__) y[i] = __ldg(x + i) * static_cast<TData>(alpha); #else y[i] = x[i] * static_cast<TData>(alpha); #endif } } template <typename TAlpha, typename TData> __global__ void ScaleCUDAKernel(const int n, const TAlpha* alpha, const TData* x, TData* y) { CUDA_1D_KERNEL_LOOP(i, n) { #if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__) y[i] = __ldg(x + i) * static_cast<TData>(__ldg(alpha)); #else y[i] = x[i] * static_cast<TData>(*alpha); #endif } } template <typename T> __global__ void PowKernel(const int n, const T* x, const T exponent, T* y) { CUDA_1D_KERNEL_LOOP(i, n) { y[i] = powf(x[i], exponent); } } } // namespace template <> CAFFE2_CUDA_EXPORT void Powx<float, CUDAContext>( const int N, const float* a, const float b, float* y, CUDAContext* context) { PowKernel<<< CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(N, a, b, y); } #define DELEGATE_CUBLAS_SCALE_FUNCTION(TAlpha, TData, CuBLASFunc) \ template <> \ CAFFE2_CUDA_EXPORT void Scale<TAlpha, TData, CUDAContext>( \ const int N, \ const TAlpha alpha, \ const TData* x, \ TData* y, \ CUDAContext* context) { \ if (N == 0) { \ return; \ } \ if (x != y) { \ cudaMemcpyAsync( \ y, \ x, \ sizeof(TData) * N, \ cudaMemcpyDeviceToDevice, \ context->cuda_stream()); \ } \ if (alpha != TAlpha(1)) { \ CUBLAS_ENFORCE(cublasSetPointerMode( \ context->cublas_handle(), CUBLAS_POINTER_MODE_HOST)); \ CUBLAS_ENFORCE(CuBLASFunc(context->cublas_handle(), N, &alpha, y, 1)); \ } \ } \ template <> \ CAFFE2_CUDA_EXPORT void Scale<TAlpha, TData, CUDAContext>( \ const int N, \ const TAlpha* alpha, \ const TData* x, \ TData* y, \ CUDAContext* context) { \ if (N == 0) { \ return; \ } \ if (x != y) { \ cudaMemcpyAsync( \ y, \ x, \ sizeof(TData) * N, \ cudaMemcpyDeviceToDevice, \ context->cuda_stream()); \ } \ CUBLAS_ENFORCE(cublasSetPointerMode( \ context->cublas_handle(), CUBLAS_POINTER_MODE_DEVICE)); \ CUBLAS_ENFORCE(CuBLASFunc(context->cublas_handle(), N, alpha, y, 1)); \ } DELEGATE_CUBLAS_SCALE_FUNCTION(float, float, cublasSscal) DELEGATE_CUBLAS_SCALE_FUNCTION(double, double, cublasDscal) #undef DELEGATE_CUBLAS_SCALE_FUNCTION #define CAFFE2_SPECIALIZED_CUDA_SCALE(TAlpha, TData) \ template <> \ CAFFE2_CUDA_EXPORT void Scale<TAlpha, TData, CUDAContext>( \ const int N, \ const TAlpha alpha, \ const TData* x, \ TData* y, \ CUDAContext* context) { \ if (N == 0) { \ return; \ } \ if (alpha == TAlpha(1)) { \ if (x != y) { \ cudaMemcpyAsync( \ y, \ x, \ sizeof(TData) * N, \ cudaMemcpyDeviceToDevice, \ context->cuda_stream()); \ } \ return; \ } \ ScaleCUDAKernel<TAlpha, TData> \ <<<CAFFE_GET_BLOCKS(N), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream()>>>(N, alpha, x, y); \ } \ template <> \ CAFFE2_CUDA_EXPORT void Scale<TAlpha, TData, CUDAContext>( \ const int N, \ const TAlpha* alpha, \ const TData* x, \ TData* y, \ CUDAContext* context) { \ if (N == 0) { \ return; \ } \ ScaleCUDAKernel<TAlpha, TData> \ <<<CAFFE_GET_BLOCKS(N), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream()>>>(N, alpha, x, y); \ } CAFFE2_SPECIALIZED_CUDA_SCALE(std::int32_t, std::int32_t) CAFFE2_SPECIALIZED_CUDA_SCALE(std::int64_t, std::int64_t) #ifndef __HIP_PLATFORM_HCC__ template <> CAFFE2_CUDA_EXPORT void Scale<at::Half, at::Half, CUDAContext>( const int N, const at::Half alpha, const at::Half* x, at::Half* y, CUDAContext* context) { if (N == 0) { return; } if (x != y) { cudaMemcpyAsync( y, x, sizeof(at::Half) * N, cudaMemcpyDeviceToDevice, context->cuda_stream()); } CUBLAS_ENFORCE( cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST)); CUBLAS_ENFORCE(cublasScalEx( context->cublas_handle(), N, &alpha, CUDA_R_16F, y, CUDA_R_16F, 1, CUDA_R_32F)); } template <> CAFFE2_CUDA_EXPORT void Scale<at::Half, at::Half, CUDAContext>( const int N, const at::Half* alpha, const at::Half* x, at::Half* y, CUDAContext* context) { if (N == 0) { return; } if (x != y) { cudaMemcpyAsync( y, x, sizeof(at::Half) * N, cudaMemcpyDeviceToDevice, context->cuda_stream()); } CUBLAS_ENFORCE(cublasSetPointerMode( context->cublas_handle(), CUBLAS_POINTER_MODE_DEVICE)); CUBLAS_ENFORCE(cublasScalEx( context->cublas_handle(), N, alpha, CUDA_R_16F, y, CUDA_R_16F, 1, CUDA_R_32F)); } template <> CAFFE2_CUDA_EXPORT void Scale<float, at::Half, CUDAContext>( const int N, const float alpha, const at::Half* x, at::Half* y, CUDAContext* context) { if (N == 0) { return; } if (x != y) { cudaMemcpyAsync( y, x, sizeof(at::Half) * N, cudaMemcpyDeviceToDevice, context->cuda_stream()); } if (alpha != 1.0f) { CUBLAS_ENFORCE(cublasSetPointerMode( context->cublas_handle(), CUBLAS_POINTER_MODE_HOST)); CUBLAS_ENFORCE(cublasScalEx( context->cublas_handle(), N, &alpha, CUDA_R_32F, y, CUDA_R_16F, 1, CUDA_R_32F)); } } template <> CAFFE2_CUDA_EXPORT void Scale<float, at::Half, CUDAContext>( const int N, const float* alpha, const at::Half* x, at::Half* y, CUDAContext* context) { if (N == 0) { return; } if (x != y) { cudaMemcpyAsync( y, x, sizeof(at::Half) * N, cudaMemcpyDeviceToDevice, context->cuda_stream()); } CUBLAS_ENFORCE(cublasSetPointerMode( context->cublas_handle(), CUBLAS_POINTER_MODE_DEVICE)); CUBLAS_ENFORCE(cublasScalEx( context->cublas_handle(), N, alpha, CUDA_R_32F, y, CUDA_R_16F, 1, CUDA_R_32F)); } #else // __HIP_PLATFORM_HCC__ namespace { template <> __global__ void ScaleCUDAKernel<at::Half, at::Half>( const int n, const at::Half alpha, const at::Half* x, at::Half* y) { CUDA_1D_KERNEL_LOOP(i, n) { y[i] = convert::To<float, at::Half>( convert::To<at::Half, float>(x[i]) * convert::To<at::Half, float>(alpha)); } } template <> __global__ void ScaleCUDAKernel<at::Half, at::Half>( const int n, const at::Half* alpha, const at::Half* x, at::Half* y) { CUDA_1D_KERNEL_LOOP(i, n) { y[i] = convert::To<float, at::Half>( convert::To<at::Half, float>(x[i]) * convert::To<at::Half, float>(*alpha)); } } template <> __global__ void ScaleCUDAKernel<float, at::Half>( const int n, const float alpha, const at::Half* x, at::Half* y) { CUDA_1D_KERNEL_LOOP(i, n) { y[i] = convert::To<float, at::Half>( convert::To<at::Half, float>(x[i]) * alpha); } } template <> __global__ void ScaleCUDAKernel<float, at::Half>( const int n, const float* alpha, const at::Half* x, at::Half* y) { CUDA_1D_KERNEL_LOOP(i, n) { y[i] = convert::To<float, at::Half>( convert::To<at::Half, float>(x[i]) * (*alpha)); } } } // namespace CAFFE2_SPECIALIZED_HIP_SCALE(at::Half, at::Half) CAFFE2_SPECIALIZED_HIP_SCALE(float, at::Half) #endif // __HIP_PLATFORM_HCC__ #undef CAFFE2_SPECIALIZED_CUDA_SCALE template <> CAFFE2_CUDA_EXPORT void Axpy<float, CUDAContext>( const int N, const float alpha, const float* X, float* Y, CUDAContext* context) { CUBLAS_ENFORCE( cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST)); CUBLAS_ENFORCE(cublasSaxpy(context->cublas_handle(), N, &alpha, X, 1, Y, 1)); } template <> CAFFE2_CUDA_EXPORT void Axpy<double, CUDAContext>( const int N, const float alpha, const double* X, double* Y, CUDAContext* context) { double alpha_d{alpha}; CUBLAS_ENFORCE( cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST)); CUBLAS_ENFORCE( cublasDaxpy(context->cublas_handle(), N, &alpha_d, X, 1, Y, 1)); } template <> CAFFE2_CUDA_EXPORT void Axpy<at::Half, CUDAContext>( const int N, const float alpha, const at::Half* X, at::Half* Y, CUDAContext* context) { #if defined(__HIP_PLATFORM_HCC__) CAFFE_THROW("HIP currently does not support FP16 completely yet."); #else CUBLAS_ENFORCE( cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST)); CUBLAS_ENFORCE(cublasAxpyEx( context->cublas_handle(), N, &alpha, CUDA_R_32F, X, CUDA_R_16F, 1, Y, CUDA_R_16F, 1, CUDA_R_32F)); #endif } template <> CAFFE2_CUDA_EXPORT void Axpy<float, CUDAContext>( const int N, const float* alpha, const float* X, float* Y, CUDAContext* context) { CUBLAS_ENFORCE(cublasSetPointerMode( context->cublas_handle(), CUBLAS_POINTER_MODE_DEVICE)); CUBLAS_ENFORCE(cublasSaxpy(context->cublas_handle(), N, alpha, X, 1, Y, 1)); } template <> CAFFE2_CUDA_EXPORT void Axpy<at::Half, CUDAContext>( const int N, const float* alpha, const at::Half* X, at::Half* Y, CUDAContext* context) { #if defined(__HIP_PLATFORM_HCC__) CAFFE_THROW("HIP currently does not support FP16 completely yet."); #else CUBLAS_ENFORCE(cublasSetPointerMode( context->cublas_handle(), CUBLAS_POINTER_MODE_DEVICE)); CUBLAS_ENFORCE(cublasAxpyEx( context->cublas_handle(), N, alpha, CUDA_R_32F, X, CUDA_R_16F, 1, Y, CUDA_R_16F, 1, CUDA_R_32F)); #endif } namespace { template <typename TCoeff, typename TData> __global__ void AxpbyCUDAKernel( const int N, const TCoeff a, const TData* x, const TCoeff b, TData* y) { CUDA_1D_KERNEL_LOOP(i, N) { #if __CUDA_ARCH__ >= 350 y[i] = __ldg(x + i) * a + y[i] * b; #else y[i] = x[i] * a + y[i] * b; #endif } } template <> __global__ void AxpbyCUDAKernel<float, at::Half>( const int N, const float a, const at::Half* x, const float b, at::Half* y) { CUDA_1D_KERNEL_LOOP(i, N) { y[i] = convert::To<float, at::Half>( convert::To<at::Half, float>(x[i]) * a + convert::To<at::Half, float>(y[i]) * b); } } template <typename TCoeff, typename TData> __global__ void AxpbyCUDAKernel( const int N, const TCoeff* a, const TData* x, const TCoeff* b, TData* y) { CUDA_1D_KERNEL_LOOP(i, N) { #if __CUDA_ARCH__ >= 350 y[i] = __ldg(x + i) * __ldg(a) + y[i] * __ldg(b); #else y[i] = x[i] * *a + y[i] * *b; #endif } } template <> __global__ void AxpbyCUDAKernel<float, at::Half>( const int N, const float* a, const at::Half* x, const float* b, at::Half* y) { CUDA_1D_KERNEL_LOOP(i, N) { #if __CUDA_ARCH__ >= 350 y[i] = convert::To<float, at::Half>( convert::To<at::Half, float>(x[i]) * __ldg(a) + convert::To<at::Half, float>(y[i]) * __ldg(b)); #else y[i] = convert::To<float, at::Half>( convert::To<at::Half, float>(x[i]) * *a + convert::To<at::Half, float>(y[i]) * *b); #endif } } } // namespace #define CAFFE2_SPECIALIZED_CUDA_AXPBY(TCoeff, TData) \ template <> \ CAFFE2_CUDA_EXPORT void Axpby<TCoeff, TData, CUDAContext>( \ const int n, \ const TCoeff a, \ const TData* x, \ const TCoeff b, \ TData* y, \ CUDAContext* context) { \ AxpbyCUDAKernel<TCoeff, TData> \ <<<CAFFE_GET_BLOCKS(n), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream()>>>(n, a, x, b, y); \ } \ template <> \ CAFFE2_CUDA_EXPORT void Axpby<TCoeff, TData, CUDAContext>( \ const int n, \ const TCoeff* a, \ const TData* x, \ const TCoeff* b, \ TData* y, \ CUDAContext* context) { \ AxpbyCUDAKernel<TCoeff, TData> \ <<<CAFFE_GET_BLOCKS(n), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream()>>>(n, a, x, b, y); \ } CAFFE2_SPECIALIZED_CUDA_AXPBY(float, float) CAFFE2_SPECIALIZED_CUDA_AXPBY(float, at::Half) #undef CAFFE2_SPECIALIZED_CUDA_AXPBY namespace { template <typename T> __global__ void Im2ColNCHWCUDAKernel( const int n, const int input_h, const int input_w, const int kernel_h, const int kernel_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int stride_h, const int stride_w, const int output_h, const int output_w, const T* img_data, T* col_data) { CUDA_1D_KERNEL_LOOP(index, n) { const int w_out = index % output_w; const int h_index = index / output_w; const int h_out = h_index % output_h; const int channel_in = h_index / output_h; const int channel_out = channel_in * kernel_h * kernel_w; const int h_in = h_out * stride_h - pad_t; const int w_in = w_out * stride_w - pad_l; const int output_size = output_h * output_w; T* col_data_ptr = col_data + (channel_out * output_h + h_out) * output_w + w_out; const T* img_data_ptr = img_data + (channel_in * input_h + h_in) * input_w + w_in; int dh = 0; for (int i = 0; i < kernel_h; ++i) { int dw = 0; for (int j = 0; j < kernel_w; ++j) { const int h = h_in + dh; const int w = w_in + dw; #if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__) *col_data_ptr = utils::IsAGeZeroAndALtB(h, input_h) && utils::IsAGeZeroAndALtB(w, input_w) ? __ldg(img_data_ptr + dh * input_w + dw) : 0; #else *col_data_ptr = utils::IsAGeZeroAndALtB(h, input_h) && utils::IsAGeZeroAndALtB(w, input_w) ? img_data_ptr[dh * input_w + dw] : 0; #endif col_data_ptr += output_size; dw += dilation_w; } dh += dilation_h; } } } template <typename T> __global__ void Im2ColNHWCCUDAKernel( const int n, const int input_h, const int input_w, const int kernel_h, const int kernel_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int stride_h, const int stride_w, const int output_w, const int channels, const T* img_data, T* col_data) { CUDA_1D_KERNEL_LOOP(index, n) { const int channel_in = index % channels; const int w_out = index / channels % output_w; const int h_out = index / channels / output_w; const int h_in = h_out * stride_h - pad_t; const int w_in = w_out * stride_w - pad_l; T* col_data_ptr = col_data + (h_out * output_w + w_out) * channels * kernel_h * kernel_w + channel_in; int dh = 0; for (int i = 0; i < kernel_h; ++i) { int dw = 0; for (int j = 0; j < kernel_w; ++j) { const int h = h_in + dh; const int w = w_in + dw; #if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__) *col_data_ptr = utils::IsAGeZeroAndALtB(h, input_h) && utils::IsAGeZeroAndALtB(w, input_w) ? __ldg(img_data + (h * input_w + w) * channels + channel_in) : 0; #else *col_data_ptr = utils::IsAGeZeroAndALtB(h, input_h) && utils::IsAGeZeroAndALtB(w, input_w) ? img_data[(h * input_w + w) * channels + channel_in] : 0; #endif col_data_ptr += channels; dw += dilation_w; } dh += dilation_h; } } } template <typename T> __global__ void Col2ImNCHWCUDAKernel( const int n, const int input_h, const int input_w, const int patch_h, const int patch_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int stride_h, const int stride_w, const int output_h, const int output_w, const T* col_data, T* img_data) { const int dpatch_h = dilation_h * (patch_h - 1) + 1; const int dpatch_w = dilation_w * (patch_w - 1) + 1; CUDA_1D_KERNEL_LOOP(index, n) { T val = 0; const int w = index % input_w + pad_l; const int h = index / input_w % input_h + pad_t; const int c = index / (input_h * input_w); // compute the start and end of the output const int w_col_start = (w < dpatch_w) ? 0 : (w - dpatch_w) / stride_w + 1; const int w_col_end = min(w / stride_w + 1, output_w); const int h_col_start = (h < dpatch_h) ? 0 : (h - dpatch_h) / stride_h + 1; const int h_col_end = min(h / stride_h + 1, output_h); for (int h_col = h_col_start; h_col < h_col_end; ++h_col) { for (int w_col = w_col_start; w_col < w_col_end; ++w_col) { int h_k = (h - h_col * stride_h); int w_k = (w - w_col * stride_w); if (h_k % dilation_h == 0 && w_k % dilation_w == 0) { h_k /= dilation_h; w_k /= dilation_w; const int col_data_index = (((c * patch_h + h_k) * patch_w + w_k) * output_h + h_col) * output_w + w_col; #if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__) val += __ldg(col_data + col_data_index); #else val += col_data[col_data_index]; #endif } } } img_data[index] = val; } } template <typename T> __global__ void Col2ImNHWCCUDAKernel( const int n, const int input_w, const int channels, const int patch_h, const int patch_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int stride_h, const int stride_w, const int output_h, const int output_w, const T* col_data, T* img_data) { const int dpatch_h = dilation_h * (patch_h - 1) + 1; const int dpatch_w = dilation_w * (patch_w - 1) + 1; CUDA_1D_KERNEL_LOOP(index, n) { T val = 0; const int c = index % channels; const int w = index / channels % input_w + pad_l; const int h = index / channels / input_w + pad_t; // compute the start and end of the output const int w_col_start = (w < dpatch_w) ? 0 : (w - dpatch_w) / stride_w + 1; const int w_col_end = min(w / stride_w + 1, output_w); const int h_col_start = (h < dpatch_h) ? 0 : (h - dpatch_h) / stride_h + 1; const int h_col_end = min(h / stride_h + 1, output_h); const int channels_col = patch_h * patch_w * channels; for (int h_col = h_col_start; h_col < h_col_end; ++h_col) { for (int w_col = w_col_start; w_col < w_col_end; ++w_col) { int h_k = h - h_col * stride_h; int w_k = w - w_col * stride_w; if (h_k % dilation_h == 0 && w_k % dilation_w == 0) { h_k /= dilation_h; w_k /= dilation_w; const int c_col = (h_k * patch_w + w_k) * channels + c; #if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__) val += __ldg( col_data + (h_col * output_w + w_col) * channels_col + c_col); #else val += col_data[(h_col * output_w + w_col) * channels_col + c_col]; #endif } } } img_data[index] = val; } } template <typename T, int N, bool kCol2Im> __global__ void Im2ColNdNCHWCUDAKernel( const int outer_size, const int inner_size, const int kernel_size, SimpleArray<int, N + 1> img_shape, SimpleArray<int, N + 1> col_shape, SimpleArray<int, N> kernel_shape, SimpleArray<int, N> stride, SimpleArray<int, N> dilation, SimpleArray<int, N> pad, const T* X_data, T* Y_data) { int d_offset[N]; int d_iter[N]; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { int offset_i = i; #pragma unroll for (int d_i = N - 1; d_i >= 0; --d_i) { d_offset[d_i] = offset_i % kernel_shape.data[d_i]; offset_i /= kernel_shape.data[d_i]; } for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { int offset_j = j; #pragma unroll for (int d_i = N - 1; d_i >= 0; --d_i) { d_iter[d_i] = offset_j % col_shape.data[d_i + 1]; offset_j /= col_shape.data[d_i + 1]; } const int col_index = i * inner_size + j; int img_index = i / kernel_size; bool is_padding = false; #pragma unroll for (int d_i = 0; d_i < N; ++d_i) { const int d_img = d_iter[d_i] * stride.data[d_i] - pad.data[d_i] + d_offset[d_i] * dilation.data[d_i]; is_padding |= !utils::IsAGeZeroAndALtB(d_img, img_shape.data[d_i + 1]); img_index = img_index * img_shape.data[d_i + 1] + d_img; } #if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__) if (!kCol2Im) { Y_data[col_index] = is_padding ? 0 : __ldg(X_data + img_index); } else if (!is_padding) { atomicAdd(Y_data + img_index, __ldg(X_data + col_index)); } #else if (!kCol2Im) { Y_data[col_index] = is_padding ? 0 : X_data[img_index]; } else if (!is_padding) { atomicAdd(Y_data + img_index, X_data[col_index]); } #endif } } } template <typename T, int N> CAFFE2_CUDA_EXPORT void Im2ColNdNCHWCUDAImpl( const int img_size, const int col_size, const int* img_shape, const int* col_shape, const int* kernel_shape, const int* stride, const int* dilation, const int* pad, const float* img_data, float* col_data, CUDAContext* context) { const int outer_size = col_shape[0]; const int inner_size = col_size / outer_size; const int kernel_size = std::accumulate( kernel_shape, kernel_shape + N, 1, std::multiplies<int>()); SimpleArray<int, N + 1> img_shape_array; SimpleArray<int, N + 1> col_shape_array; SimpleArray<int, N> kernel_shape_array; SimpleArray<int, N> stride_array; SimpleArray<int, N> dilation_array; SimpleArray<int, N> pad_array; std::memcpy(img_shape_array.data, img_shape, (N + 1) * sizeof(int)); std::memcpy(col_shape_array.data, col_shape, (N + 1) * sizeof(int)); std::memcpy(kernel_shape_array.data, kernel_shape, N * sizeof(int)); std::memcpy(stride_array.data, stride, N * sizeof(int)); std::memcpy(dilation_array.data, dilation, N * sizeof(int)); std::memcpy(pad_array.data, pad, N * sizeof(int)); Im2ColNdNCHWCUDAKernel<T, N, false> <<<std::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( outer_size, inner_size, kernel_size, img_shape_array, col_shape_array, kernel_shape_array, stride_array, dilation_array, pad_array, img_data, col_data); } template <typename T, int N> CAFFE2_CUDA_EXPORT void Col2ImNdNCHWCUDAImpl( const int img_size, const int col_size, const int* img_shape, const int* col_shape, const int* kernel_shape, const int* stride, const int* dilation, const int* pad, const float* col_data, float* img_data, CUDAContext* context) { const int outer_size = col_shape[0]; const int inner_size = col_size / outer_size; const int kernel_size = std::accumulate( kernel_shape, kernel_shape + N, 1, std::multiplies<int>()); SimpleArray<int, N + 1> img_shape_array; SimpleArray<int, N + 1> col_shape_array; SimpleArray<int, N> kernel_shape_array; SimpleArray<int, N> stride_array; SimpleArray<int, N> dilation_array; SimpleArray<int, N> pad_array; std::memcpy(img_shape_array.data, img_shape, (N + 1) * sizeof(int)); std::memcpy(col_shape_array.data, col_shape, (N + 1) * sizeof(int)); std::memcpy(kernel_shape_array.data, kernel_shape, N * sizeof(int)); std::memcpy(stride_array.data, stride, N * sizeof(int)); std::memcpy(dilation_array.data, dilation, N * sizeof(int)); std::memcpy(pad_array.data, pad, N * sizeof(int)); Set<T, CUDAContext>(img_size, 0, img_data, context); Im2ColNdNCHWCUDAKernel<T, N, true> <<<std::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( outer_size, inner_size, kernel_size, img_shape_array, col_shape_array, kernel_shape_array, stride_array, dilation_array, pad_array, col_data, img_data); } } // namespace template <> CAFFE2_CUDA_EXPORT void Im2Col<float, CUDAContext, StorageOrder::NCHW>( const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int pad_b, const int pad_r, const int stride_h, const int stride_w, const float* img_data, float* col_data, CUDAContext* context, const int /* groups */) { const int dkernel_h = dilation_h * (kernel_h - 1) + 1; const int dkernel_w = dilation_w * (kernel_w - 1) + 1; const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1; const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1; const int num_kernels = channels * output_h * output_w; Im2ColNCHWCUDAKernel<float> <<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( num_kernels, height, width, kernel_h, kernel_w, dilation_h, dilation_w, pad_t, pad_l, stride_h, stride_w, output_h, output_w, img_data, col_data); } template <> CAFFE2_CUDA_EXPORT void Im2Col<float, CUDAContext, StorageOrder::NHWC>( const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int pad_b, const int pad_r, const int stride_h, const int stride_w, const float* img_data, float* col_data, CUDAContext* context, const int groups) { CAFFE_ENFORCE_EQ(groups, 1, "groups must be 1 for GPU NHWC Im2Col"); const int dkernel_h = dilation_h * (kernel_h - 1) + 1; const int dkernel_w = dilation_w * (kernel_w - 1) + 1; const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1; const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1; const int num_kernels = output_h * output_w * channels; Im2ColNHWCCUDAKernel<float> <<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( num_kernels, height, width, kernel_h, kernel_w, dilation_h, dilation_w, pad_t, pad_l, stride_h, stride_w, output_w, channels, img_data, col_data); } template <> CAFFE2_CUDA_EXPORT void Col2Im<float, CUDAContext, StorageOrder::NCHW>( const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int pad_b, const int pad_r, const int stride_h, const int stride_w, const float* col_data, float* img_data, CUDAContext* context, const int /* groups */) { // In NCHW, the number of groups doesn't affect Col2Im. const int dkernel_h = dilation_h * (kernel_h - 1) + 1; const int dkernel_w = dilation_w * (kernel_w - 1) + 1; const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1; const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1; const int num_kernels = channels * height * width; Col2ImNCHWCUDAKernel<float> <<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( num_kernels, height, width, kernel_h, kernel_w, dilation_h, dilation_w, pad_t, pad_l, stride_h, stride_w, output_h, output_w, col_data, img_data); } template <> CAFFE2_CUDA_EXPORT void Col2Im<float, CUDAContext, StorageOrder::NHWC>( const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int pad_b, const int pad_r, const int stride_h, const int stride_w, const float* col_data, float* img_data, CUDAContext* context, const int groups) { CAFFE_ENFORCE_EQ(groups, 1, "groups must be 1 for GPU NHWC Col2Im"); const int dkernel_h = dilation_h * (kernel_h - 1) + 1; const int dkernel_w = dilation_w * (kernel_w - 1) + 1; const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1; const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1; const int num_kernels = height * width * channels; Col2ImNHWCCUDAKernel<float> <<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( num_kernels, width, channels, kernel_h, kernel_w, dilation_h, dilation_w, pad_t, pad_l, stride_h, stride_w, output_h, output_w, col_data, img_data); } template <> CAFFE2_CUDA_EXPORT void Im2ColNd<float, CUDAContext, StorageOrder::NCHW>( const int N, const int img_size, const int col_size, const int* img_shape, const int* col_shape, const int* kernel_shape, const int* stride, const int* dilation, const int* pad, const float* img_data, float* col_data, CUDAContext* context, const int /* groups */) { // In NCHW, the number of groups doesn't affect Im2Col. DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1( N, Im2ColNdNCHWCUDAImpl, float, img_size, col_size, img_shape, col_shape, kernel_shape, stride, dilation, pad, img_data, col_data, context); } template <> CAFFE2_CUDA_EXPORT void Im2ColNd<float, CUDAContext, StorageOrder::NHWC>( const int N, const int img_size, const int col_size, const int* img_shape, const int* col_shape, const int* kernel_shape, const int* stride, const int* dilation, const int* pad, const float* img_data, float* col_data, CUDAContext* context, const int groups) { CAFFE_NOT_IMPLEMENTED; } template <> CAFFE2_CUDA_EXPORT void Col2ImNd<float, CUDAContext, StorageOrder::NCHW>( const int N, const int img_size, const int col_size, const int* img_shape, const int* col_shape, const int* kernel_shape, const int* stride, const int* dilation, const int* pad, const float* col_data, float* img_data, CUDAContext* context, int /* groups */) { // In NCHW, the number of groups doesn't affect Col2Im. DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1( N, Col2ImNdNCHWCUDAImpl, float, img_size, col_size, img_shape, col_shape, kernel_shape, stride, dilation, pad, col_data, img_data, context); } template <> CAFFE2_CUDA_EXPORT void Col2ImNd<float, CUDAContext, StorageOrder::NHWC>( const int N, const int img_size, const int col_size, const int* img_shape, const int* col_shape, const int* kernel_shape, const int* stride, const int* dilation, const int* pad, const float* col_data, float* img_data, CUDAContext* context, int groups) { CAFFE_NOT_IMPLEMENTED; } template <> CAFFE2_CUDA_EXPORT void CopyMatrix<CUDAContext>( const size_t itemsize, const int M, const int N, const void* A, const int lda, void* B, const int ldb, CUDAContext* context, TypeMeta::Copy copy) { CAFFE_ENFORCE(!copy, "Copy constructor is not supported in CUDA context"); cudaMemcpy2DAsync( B, ldb * itemsize, A, lda * itemsize, N * itemsize, M, cudaMemcpyDeviceToDevice, context->cuda_stream()); } #define CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(T) \ template <> \ void CopyMatrix<T, CUDAContext>( \ const int M, \ const int N, \ const T* A, \ const int lda, \ T* B, \ const int ldb, \ CUDAContext* context) { \ if (M == 0 || N == 0) { \ return; \ } \ cudaMemcpy2DAsync( \ B, \ sizeof(T) * ldb, \ A, \ sizeof(T) * lda, \ sizeof(T) * N, \ M, \ cudaMemcpyDeviceToDevice, \ context->cuda_stream()); \ } CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(float) CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(double) CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(int) CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(int64_t) #undef CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX template <> CAFFE2_CUDA_EXPORT void CopyVector<float, CUDAContext>( const int N, const float* src, float* dst, CUDAContext* context) { if (src != dst && N > 0) { cudaMemcpyAsync( dst, src, sizeof(float) * N, cudaMemcpyDeviceToDevice, context->cuda_stream()); } } namespace { template <typename T> using BlockReduce = cub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS>; template <typename T, class Reducer> __global__ void RowwiseReduceKernel( const int rows, const int cols, const Reducer reducer, const T init, const T alpha, const T* X, T* Y) { __shared__ typename BlockReduce<T>::TempStorage temp_storage; for (int i = blockIdx.x; i < rows; i += gridDim.x) { T val = init; for (int j = threadIdx.x; j < cols; j += blockDim.x) { val = reducer(X[i * cols + j], val); } val = BlockReduce<T>(temp_storage).Reduce(val, reducer); if (threadIdx.x == 0) { Y[i] = val * alpha; } __syncthreads(); } } template <typename T, class Reducer> __global__ void ColwiseReduceKernel( const int rows, const int cols, const Reducer reducer, const T init, const T alpha, const T* X, T* Y) { __shared__ typename BlockReduce<T>::TempStorage temp_storage; for (int i = blockIdx.x; i < cols; i += gridDim.x) { T val = init; for (int j = threadIdx.x; j < rows; j += blockDim.x) { val = reducer(X[j * cols + i], val); } val = BlockReduce<T>(temp_storage).Reduce(val, reducer); if (threadIdx.x == 0) { Y[i] = val * alpha; } __syncthreads(); } } } // namespace #define CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX(T) \ template <> \ CAFFE2_CUDA_EXPORT void RowwiseMax<T, CUDAContext>( \ const int N, const int D, const T* x, T* y, CUDAContext* context) { \ RowwiseReduceKernel<<< \ std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream()>>>( \ N, D, cub::Max(), std::numeric_limits<T>::lowest(), T(1), x, y); \ } CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX(float) #undef CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX #define CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX(T) \ template <> \ CAFFE2_CUDA_EXPORT void ColwiseMax<T, CUDAContext>( \ const int N, const int D, const T* x, T* y, CUDAContext* context) { \ ColwiseReduceKernel<<< \ std::min(D, CAFFE_MAXIMUM_NUM_BLOCKS), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream()>>>( \ N, D, cub::Max(), std::numeric_limits<T>::lowest(), T(1), x, y); \ } CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX(float) #undef CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX namespace { __global__ void maximum_kernel(const int N, const float alpha, const float* x, float* y) { CUDA_1D_KERNEL_LOOP(i, N) { y[i] = fmaxf(x[i], alpha); } } } // namespace template <> CAFFE2_CUDA_EXPORT void Maximum( const int N, const float alpha, const float* x, float* y, CUDAContext* context) { maximum_kernel<<< std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(N, alpha, x, y); } namespace { template <typename T, class Reducer, int D> __global__ void ReduceTensorCUDAKernel( const int outer_size, const int inner_size, SimpleArray<int, D> X_strides, SimpleArray<FIXED_DIVISOR, D> Y_dims, const Reducer reducer, const T init, const T alpha, const T* X, T* Y) { __shared__ typename BlockReduce<T>::TempStorage temp_storage; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { T val = init; for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { int X_index = 0; int Y_index = i * inner_size + j; #pragma unroll for (int d = D - 1; d >= 0; --d) { int r; FIXED_DIVISOR_DIV_MOD(Y_dims.data[d], Y_index, &Y_index, &r); X_index += r * X_strides.data[d]; } #if __CUDA_ARCH__ >= 350 val = reducer(val, __ldg(X + X_index)); #else val = reducer(val, X[X_index]); #endif } val = BlockReduce<T>(temp_storage).Reduce(val, reducer); if (threadIdx.x == 0) { Y[i] = val * alpha; } __syncthreads(); } } template <typename T, class Reducer, int D> CAFFE2_CUDA_EXPORT void ReduceTensorCUDAImpl( const int outer_size, const int inner_size, const int* dims, const int* axes, const Reducer& reducer, const T init, const T alpha, const T* X, T* Y, CUDAContext* context) { SimpleArray<int, D> X_strides; SimpleArray<FIXED_DIVISOR, D> Y_dims; utils::ComputeTransposedStrides(D, dims, axes, X_strides.data); for (int i = 0; i < D; ++i) { Y_dims.data[i] = FIXED_DIVISOR(dims[axes[i]]); } ReduceTensorCUDAKernel<T, Reducer, D> <<<std::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( outer_size, inner_size, X_strides, Y_dims, reducer, init, alpha, X, Y); } template <typename T, class Reducer> CAFFE2_CUDA_EXPORT void ReduceTensorCUDA( const int num_dims, const int* dims, const int num_axes, const int* axes, const Reducer& reducer, const T init, const T alpha, const T* X, T* Y, CUDAContext* context) { CAFFE_ENFORCE_LE(num_axes, num_dims); std::vector<int> Y_dims_vector(dims, dims + num_dims); for (int i = 0; i < num_axes; ++i) { Y_dims_vector[axes[i]] = 1; } const int* X_dims = dims; const int* Y_dims = Y_dims_vector.data(); const int X_size = std::accumulate(X_dims, X_dims + num_dims, 1, std::multiplies<int>()); const int Y_size = std::accumulate(Y_dims, Y_dims + num_dims, 1, std::multiplies<int>()); if (X_size == 0) { Set<T, CUDAContext>(Y_size, alpha * init, Y, context); return; } if (alpha == T(0)) { Set<T, CUDAContext>(Y_size, T(0), Y, context); return; } if (std::equal(X_dims, X_dims + num_dims, Y_dims)) { Scale<T, T, CUDAContext>(X_size, alpha, X, Y, context); return; } int rows; int cols; if (utils::IsRowwiseReduce(num_dims, X_dims, Y_dims, &rows, &cols)) { RowwiseReduceKernel<T> <<<std::min(rows, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(rows, cols, reducer, init, alpha, X, Y); return; } if (utils::IsColwiseReduce(num_dims, X_dims, Y_dims, &rows, &cols)) { ColwiseReduceKernel<T> <<<std::min(rows, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(rows, cols, reducer, init, alpha, X, Y); return; } std::vector<int> transpose_axes(num_dims); utils::ComputeTransposeAxesForReduceOp( num_dims, num_axes, axes, transpose_axes.data()); const int outer_size = Y_size; const int inner_size = X_size / Y_size; DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_2( num_dims, ReduceTensorCUDAImpl, T, Reducer, outer_size, inner_size, dims, transpose_axes.data(), reducer, init, alpha, X, Y, context); } } // namespace #define CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(T) \ template <> \ CAFFE2_CUDA_EXPORT void ReduceMin<T, CUDAContext>( \ const int num_dims, \ const int* dims, \ const int num_axes, \ const int* axes, \ const T alpha, \ const T* X, \ T* Y, \ CUDAContext* context) { \ ReduceTensorCUDA( \ num_dims, \ dims, \ num_axes, \ axes, \ cub::Min(), \ std::numeric_limits<T>::max(), \ alpha, \ X, \ Y, \ context); \ } CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(std::int32_t) CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(std::int64_t) CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(float) CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(double) #undef CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN #define CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(T) \ template <> \ CAFFE2_CUDA_EXPORT void ReduceMax<T, CUDAContext>( \ const int num_dims, \ const int* dims, \ const int num_axes, \ const int* axes, \ const T alpha, \ const T* X, \ T* Y, \ CUDAContext* context) { \ ReduceTensorCUDA( \ num_dims, \ dims, \ num_axes, \ axes, \ cub::Max(), \ std::numeric_limits<T>::lowest(), \ alpha, \ X, \ Y, \ context); \ } CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(std::int32_t) CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(std::int64_t) CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(float) CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(double) #undef CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX #define CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(T) \ template <> \ CAFFE2_CUDA_EXPORT void ReduceSum<T, CUDAContext>( \ const int num_dims, \ const int* dims, \ const int num_axes, \ const int* axes, \ const T alpha, \ const T* X, \ T* Y, \ CUDAContext* context) { \ ReduceTensorCUDA( \ num_dims, \ dims, \ num_axes, \ axes, \ cub::Sum(), \ T(0), \ alpha, \ X, \ Y, \ context); \ } CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(std::int32_t) CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(std::int64_t) CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(float) CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(double) #undef CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM #define CAFFE2_SPECIALIZED_CUDA_REDUCE_MEAN(T) \ template <> \ CAFFE2_CUDA_EXPORT void ReduceMean<T, CUDAContext>( \ const int num_dims, \ const int* dims, \ const int num_axes, \ const int* axes, \ const T alpha, \ const T* X, \ T* Y, \ CUDAContext* context) { \ int scale = 1; \ for (int i = 0; i < num_axes; ++i) { \ scale *= dims[axes[i]]; \ } \ ReduceTensorCUDA( \ num_dims, \ dims, \ num_axes, \ axes, \ cub::Sum(), \ T(0), \ alpha / static_cast<T>(scale), \ X, \ Y, \ context); \ } CAFFE2_SPECIALIZED_CUDA_REDUCE_MEAN(float) #undef CAFFE2_SPECIALIZED_CUDA_REDUCE_MEAN namespace { template <typename T, int D> __global__ void BroadcastCUDAKernel( const int Y_size, const SimpleArray<int, D> X_strides, const SimpleArray<FIXED_DIVISOR, D> Y_dims, const T alpha, const T* X, T* Y) { CUDA_1D_KERNEL_LOOP(Y_index, Y_size) { int X_index = 0; int Y_index_val = Y_index; #pragma unroll for (int i = D - 1; i >= 0; --i) { int d; FIXED_DIVISOR_DIV_MOD(Y_dims.data[i], Y_index_val, &Y_index_val, &d); X_index += d * X_strides.data[i]; } #if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__) Y[Y_index] = __ldg(X + X_index) * alpha; #else Y[Y_index] = X[X_index] * alpha; #endif } } template <typename T, int D> CAFFE2_CUDA_EXPORT void BroadcastCUDAImpl( const int X_ndim, const int* X_dims, const int* Y_dims, const T alpha, const T* X, T* Y, CUDAContext* context) { SimpleArray<int, D> X_strides_array; SimpleArray<FIXED_DIVISOR, D> Y_dims_array; const int d = D - X_ndim; std::fill(X_strides_array.data, X_strides_array.data + d, 0); int cur_stride = 1; for (int i = D - 1; i >= d; --i) { CAFFE_ENFORCE(X_dims[i - d] == 1 || X_dims[i - d] == Y_dims[i]); X_strides_array.data[i] = X_dims[i - d] == 1 ? 0 : cur_stride; cur_stride *= X_dims[i - d]; } for (int i = 0; i < D; ++i) { if (Y_dims[i] == 0) { return; } Y_dims_array.data[i] = FIXED_DIVISOR(Y_dims[i]); } const int Y_size = std::accumulate(Y_dims, Y_dims + D, 1, std::multiplies<int>()); BroadcastCUDAKernel<T, D> <<<CAFFE_GET_BLOCKS(Y_size), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( Y_size, X_strides_array, Y_dims_array, alpha, X, Y); } } // namespace #define CAFFE2_SPECIALIZED_CUDA_BROADCAST(T) \ template <> \ CAFFE2_CUDA_EXPORT void Broadcast<T, CUDAContext>( \ const int X_ndim, \ const int* X_dims, \ const int Y_ndim, \ const int* Y_dims, \ const T alpha, \ const T* X, \ T* Y, \ CUDAContext* context) { \ CAFFE_ENFORCE_LE(X_ndim, Y_ndim); \ DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1( \ Y_ndim, \ BroadcastCUDAImpl, \ T, \ X_ndim, \ X_dims, \ Y_dims, \ alpha, \ X, \ Y, \ context); \ } CAFFE2_SPECIALIZED_CUDA_BROADCAST(std::int32_t) CAFFE2_SPECIALIZED_CUDA_BROADCAST(std::int64_t) CAFFE2_SPECIALIZED_CUDA_BROADCAST(float) CAFFE2_SPECIALIZED_CUDA_BROADCAST(double) #undef CAFFE2_SPECIALIZED_CUDA_BROADCAST namespace { template <typename T> __global__ void RowwiseMomentsCUDAKernel( const int rows, const int cols, const T* X, T* mean, T* variance) { __shared__ typename BlockReduce<T>::TempStorage m_storage; __shared__ typename BlockReduce<T>::TempStorage v_storage; const T scale = T(1) / static_cast<T>(cols); for (int i = blockIdx.x; i < rows; i += gridDim.x) { T m_val = 0; T v_val = 0; for (int j = threadIdx.x; j < cols; j += blockDim.x) { const int X_index = i * cols + j; #if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__) m_val += __ldg(X + X_index); v_val += __ldg(X + X_index) * __ldg(X + X_index); #else m_val += X[X_index]; v_val += X[X_index] * X[X_index]; #endif } m_val = BlockReduce<T>(m_storage).Sum(m_val); v_val = BlockReduce<T>(v_storage).Sum(v_val); if (threadIdx.x == 0) { const T mu = m_val * scale; mean[i] = mu; variance[i] = v_val * scale - mu * mu; } __syncthreads(); } } template <typename T> __global__ void ColwiseMomentsCUDAKernel( const int rows, const int cols, const T* X, T* mean, T* variance) { __shared__ typename BlockReduce<T>::TempStorage m_storage; __shared__ typename BlockReduce<T>::TempStorage v_storage; const T scale = T(1) / static_cast<T>(rows); for (int i = blockIdx.x; i < cols; i += gridDim.x) { T m_val = 0; T v_val = 0; for (int j = threadIdx.x; j < rows; j += blockDim.x) { const int X_index = j * cols + i; #if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__) m_val += __ldg(X + X_index); v_val += __ldg(X + X_index) * __ldg(X + X_index); #else m_val += X[X_index]; v_val += X[X_index] * X[X_index]; #endif } m_val = BlockReduce<T>(m_storage).Sum(m_val); v_val = BlockReduce<T>(v_storage).Sum(v_val); if (threadIdx.x == 0) { const T mu = m_val * scale; mean[i] = mu; variance[i] = v_val * scale - mu * mu; } __syncthreads(); } } template <typename T, int D> __global__ void MomentsCUDAKernel( const int outer_size, const int inner_size, SimpleArray<int, D> X_strides, SimpleArray<FIXED_DIVISOR, D> Y_dims, const T* X, T* mean, T* variance) { __shared__ typename BlockReduce<T>::TempStorage m_storage; __shared__ typename BlockReduce<T>::TempStorage v_storage; const T scale = T(1) / static_cast<T>(inner_size); for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { T m_val = 0; T v_val = 0; for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { int X_index = 0; int Y_index = i * inner_size + j; #pragma unroll for (int d = D - 1; d >= 0; --d) { int r; FIXED_DIVISOR_DIV_MOD(Y_dims.data[d], Y_index, &Y_index, &r); X_index += r * X_strides.data[d]; } #if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__) m_val += __ldg(X + X_index); v_val += __ldg(X + X_index) * __ldg(X + X_index); #else m_val += X[X_index]; v_val += X[X_index] * X[X_index]; #endif } m_val = BlockReduce<T>(m_storage).Sum(m_val); v_val = BlockReduce<T>(v_storage).Sum(v_val); if (threadIdx.x == 0) { const T mu = m_val * scale; mean[i] = mu; variance[i] = v_val * scale - mu * mu; } __syncthreads(); } } template <typename T, int D> CAFFE2_CUDA_EXPORT void MomentsCUDAImpl( const int outer_size, const int inner_size, const int* dims, const int* axes, const T* X, T* mean, T* variance, CUDAContext* context) { SimpleArray<int, D> X_strides; SimpleArray<FIXED_DIVISOR, D> Y_dims; utils::ComputeTransposedStrides(D, dims, axes, X_strides.data); for (int i = 0; i < D; ++i) { Y_dims.data[i] = FIXED_DIVISOR(dims[axes[i]]); } MomentsCUDAKernel<T, D> <<<std::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( outer_size, inner_size, X_strides, Y_dims, X, mean, variance); } template <typename T> CAFFE2_CUDA_EXPORT void MomentsCUDA( const int num_dims, const int* dims, const int num_axes, const int* axes, const T* X, T* mean, T* variance, CUDAContext* context) { CAFFE_ENFORCE_LE(num_axes, num_dims); std::vector<int> Y_dims_vector(dims, dims + num_dims); for (int i = 0; i < num_axes; ++i) { Y_dims_vector[axes[i]] = 1; } const int* X_dims = dims; const int* Y_dims = Y_dims_vector.data(); const int X_size = std::accumulate(X_dims, X_dims + num_dims, 1, std::multiplies<int>()); const int Y_size = std::accumulate(Y_dims, Y_dims + num_dims, 1, std::multiplies<int>()); if (X_size == 0) { Set<T, CUDAContext>(Y_size, T(0), mean, context); Set<T, CUDAContext>(Y_size, T(0), variance, context); return; } if (std::equal(X_dims, X_dims + num_dims, Y_dims)) { cudaMemcpyAsync( mean, X, sizeof(T) * X_size, cudaMemcpyDeviceToDevice, context->cuda_stream()); Set<T, CUDAContext>(Y_size, T(0), variance, context); return; } int rows; int cols; if (utils::IsRowwiseReduce(num_dims, X_dims, Y_dims, &rows, &cols)) { RowwiseMomentsCUDAKernel<T> <<<std::min(rows, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(rows, cols, X, mean, variance); return; } if (utils::IsColwiseReduce(num_dims, X_dims, Y_dims, &rows, &cols)) { ColwiseMomentsCUDAKernel<T> <<<std::min(rows, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(rows, cols, X, mean, variance); return; } std::vector<int> transpose_axes(num_dims); utils::ComputeTransposeAxesForReduceOp( num_dims, num_axes, axes, transpose_axes.data()); const int pivot = num_dims - num_axes; int outer_size = 1; for (int i = 0; i < pivot; ++i) { outer_size *= dims[transpose_axes[i]]; } int inner_size = 1; for (int i = pivot; i < num_dims; ++i) { inner_size *= dims[transpose_axes[i]]; } DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1( num_dims, MomentsCUDAImpl, T, outer_size, inner_size, dims, transpose_axes.data(), X, mean, variance, context); } } // namespace #define CAFFE2_SPECIALIZED_CUDA_MOMENTS(T) \ template <> \ CAFFE2_CUDA_EXPORT void Moments<T, CUDAContext>( \ const int num_dims, \ const int* dims, \ const int num_axes, \ const int* axes, \ const T* X, \ T* mean, \ T* variance, \ CUDAContext* context) { \ MomentsCUDA<T>( \ num_dims, dims, num_axes, axes, X, mean, variance, context); \ } CAFFE2_SPECIALIZED_CUDA_MOMENTS(float) #undef CAFFE2_SPECIALIZED_CUDA_MOMENTS namespace { template <typename T> __global__ void InvStdCUDAKernel(const int N, const T epsilon, const T* var, T* inv_std); #define DELEGATE_INV_STD_KERNEL_FUNCTION(T, Func) \ template <> \ __global__ void InvStdCUDAKernel<T>( \ const int N, const T epsilon, const T* var, T* inv_std) { \ CUDA_1D_KERNEL_LOOP(i, N) { \ inv_std[i] = Func(var[i] + epsilon); \ } \ } DELEGATE_INV_STD_KERNEL_FUNCTION(float, rsqrtf) #undef DELEGATE_INV_STD_KERNEL_FUNCTION } // namespace #define CAFFE2_SPECIALIZED_CUDA_INV_STD(T) \ template <> \ CAFFE2_CUDA_EXPORT void InvStd<T, CUDAContext>( \ const int N, \ const T epsilon, \ const T* var, \ T* inv_std, \ CUDAContext* context) { \ InvStdCUDAKernel<T> \ <<<CAFFE_GET_BLOCKS(N), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream()>>>(N, epsilon, var, inv_std); \ } CAFFE2_SPECIALIZED_CUDA_INV_STD(float) #undef CAFFE2_SPECIALIZED_CUDA_INV_STD namespace { constexpr int kTileDim = 32; constexpr int kBlockRows = 8; // Splits the original matrix into submatrices with size 32 * 32. // Each block transposes one submatrix by loading it into shared memory. // Reference https://devblogs.nvidia.com/efficient-matrix-transpose-cuda-cc/ template <typename T> __global__ void BatchTranspose2DCUDAKernel( const int N, const int H, const int W, const T* X, T* Y) { __shared__ T tile[kTileDim][kTileDim + 1]; const int h = (H + kTileDim - 1) / kTileDim; const int w = (W + kTileDim - 1) / kTileDim; const int outer_size = N * h * w; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { const int n = i / (h * w); const int k = i % (h * w); const int r = k / w; const int c = k % w; const int offset = n * H * W; int x = c * kTileDim + threadIdx.x; int y = r * kTileDim + threadIdx.y; if (x < W) { for (int j = 0; j < kTileDim && y + j < H; j += kBlockRows) { #if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__) tile[threadIdx.y + j][threadIdx.x] = __ldg(X + offset + (y + j) * W + x); #else tile[threadIdx.y + j][threadIdx.x] = X[offset + (y + j) * W + x]; #endif } } __syncthreads(); x = r * kTileDim + threadIdx.x; y = c * kTileDim + threadIdx.y; if (x < H) { for (int j = 0; j < kTileDim && y + j < W; j += kBlockRows) { Y[offset + (y + j) * H + x] = tile[threadIdx.x][threadIdx.y + j]; } } __syncthreads(); } } template <typename T, int D> __global__ void TransposeCUDAKernel( const int size, const SimpleArray<int, D> X_strides, const SimpleArray<FIXED_DIVISOR, D> Y_dims, const T* X, T* Y) { CUDA_1D_KERNEL_LOOP(Y_index, size) { int X_index = 0; int Y_index_val = Y_index; #pragma unroll for (int i = D - 1; i >= 0; --i) { int d; FIXED_DIVISOR_DIV_MOD(Y_dims.data[i], Y_index_val, &Y_index_val, &d); X_index += d * X_strides.data[i]; } #if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__) Y[Y_index] = __ldg(X + X_index); #else Y[Y_index] = X[X_index]; #endif } } template <typename T, int D> CAFFE2_CUDA_EXPORT void TransposeCUDAImpl( const int* dims, const int* axes, const T* X, T* Y, CUDAContext* context) { SimpleArray<int, D> X_strides; SimpleArray<FIXED_DIVISOR, D> Y_dims; utils::ComputeTransposedStrides(D, dims, axes, X_strides.data); int size = 1; for (int i = 0; i < D; ++i) { Y_dims.data[i] = FIXED_DIVISOR(dims[axes[i]]); size *= dims[i]; } TransposeCUDAKernel<T, D> <<<CAFFE_GET_BLOCKS(size), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(size, X_strides, Y_dims, X, Y); } } // namespace #define CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(T) \ template <> \ CAFFE2_CUDA_EXPORT void Transpose<T, CUDAContext>( \ const int ndim, \ const int* dims, \ const int* axes, \ const T* X, \ T* Y, \ CUDAContext* context) { \ if (utils::IsIdentityPermutation(ndim, axes)) { \ const int size = \ std::accumulate(dims, dims + ndim, 1, std::multiplies<int>()); \ context->template CopySameDevice<T>(size, X, Y); \ return; \ } \ if (utils::IsBatchTranspose2D(ndim, axes)) { \ const int N = \ std::accumulate(dims, dims + ndim - 2, 1, std::multiplies<int>()); \ const int H = dims[ndim - 2]; \ const int W = dims[ndim - 1]; \ const int h = (H + kTileDim - 1) / kTileDim; \ const int w = (W + kTileDim - 1) / kTileDim; \ const int outer_size = N * h * w; \ const dim3 dim_block(kTileDim, kBlockRows, 1); \ BatchTranspose2DCUDAKernel<T> \ <<<std::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS), \ dim_block, \ 0, \ context->cuda_stream()>>>(N, H, W, X, Y); \ return; \ } \ DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1( \ ndim, TransposeCUDAImpl, T, dims, axes, X, Y, context); \ } CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(float) CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(double) CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(int) CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(int64_t) #undef CAFFE2_SPECIALIZED_CUDA_TRANSPOSE #define CAFFE2_SPECIALIZED_CUDA_NCHW2NHWC(T) \ template <> \ CAFFE2_CUDA_EXPORT void NCHW2NHWC<T, CUDAContext>( \ const int N, \ const int C, \ const int HxW, \ const T* X, \ T* Y, \ CUDAContext* context) { \ const int h = (C + kTileDim - 1) / kTileDim; \ const int w = (HxW + kTileDim - 1) / kTileDim; \ const int outer_size = N * h * w; \ const dim3 dim_block(kTileDim, kBlockRows, 1); \ BatchTranspose2DCUDAKernel<T> \ <<<std::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS), \ dim_block, \ 0, \ context->cuda_stream()>>>(N, C, HxW, X, Y); \ } CAFFE2_SPECIALIZED_CUDA_NCHW2NHWC(float) #undef CAFFE2_SPECIALIZED_CUDA_NCHW2NHWC #define CAFFE2_SPECIALIZED_CUDA_NHWC2NCHW(T) \ template <> \ CAFFE2_CUDA_EXPORT void NHWC2NCHW<T, CUDAContext>( \ const int N, \ const int C, \ const int HxW, \ const T* X, \ T* Y, \ CUDAContext* context) { \ const int h = (HxW + kTileDim - 1) / kTileDim; \ const int w = (C + kTileDim - 1) / kTileDim; \ const int outer_size = N * h * w; \ const dim3 dim_block(kTileDim, kBlockRows, 1); \ BatchTranspose2DCUDAKernel<T> \ <<<std::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS), \ dim_block, \ 0, \ context->cuda_stream()>>>(N, HxW, C, X, Y); \ } CAFFE2_SPECIALIZED_CUDA_NHWC2NCHW(float) #undef CAFFE2_SPECIALIZED_CUDA_NHWC2NCHW } // namespace math } // namespace caffe2
0a880196844586636f6b585235c41516da708288.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "glog/logging.h" #include "paddle/phi/core/flags.h" #include "paddle/phi/kernels/fusion/gpu/fused_bias_act_utils.h" PHI_DECLARE_bool(use_fast_math); namespace phi { namespace fusion { #ifndef PADDLE_WITH_HIP template <typename T, typename Functor, int VecSize, typename LoadFunc, typename StoreFunc> __global__ void ActFFNGlu(const T *bias, Functor act_functor, const int token_num, const int hid_dim, const int elem_num, LoadFunc load_func, StoreFunc store_func) { using LoadT = phi::AlignedVector<T, VecSize>; LoadT src_vec1; LoadT src_vec2; LoadT bias_vec1; LoadT bias_vec2; const int global_tid = blockIdx.x * blockDim.x + threadIdx.x; for (int i = global_tid * VecSize; i < elem_num; i += gridDim.x * blockDim.x * VecSize) { int bi = i / hid_dim; int idx = i % hid_dim; load_func.template load<VecSize>(&src_vec1, bi * hid_dim * 2 + idx); load_func.template load<VecSize>(&src_vec2, bi * hid_dim * 2 + idx + hid_dim); if (bias) { phi::Load<T, VecSize>(&bias[idx], &bias_vec1); phi::Load<T, VecSize>(&bias[idx + hid_dim], &bias_vec2); } #pragma unroll for (int j = 0; j < VecSize; j++) { if (bias) { src_vec1[j] += bias_vec1[j]; src_vec2[j] += bias_vec2[j]; } src_vec1[j] = act_functor(src_vec1[j]); src_vec1[j] *= src_vec2[j]; } store_func.template store<VecSize>(src_vec1, bi * hid_dim + idx); } } template <typename T, typename Context, typename Functor, typename LoadFunc, typename StoreFunc, typename LoadT = T> void LaunchActFFNGlu(const Context &dev_ctx, const T *bias, const int token_num, const int hid_dim, LoadFunc load_func, StoreFunc store_func) { constexpr int VecSize = 16; constexpr int PackSize = VecSize / sizeof(LoadT); const int elem_cnt = token_num * hid_dim; const int blocksize = 128; int grid_size = 1; Functor functor; switch (hid_dim % PackSize) { case 0: GetNumBlocks(elem_cnt / PackSize, &grid_size); hipLaunchKernelGGL(( ActFFNGlu<T, Functor, PackSize>) , dim3(grid_size), dim3(blocksize), 0, dev_ctx.stream(), bias, functor, token_num, hid_dim, elem_cnt, load_func, store_func); break; default: GetNumBlocks(elem_cnt, &grid_size); hipLaunchKernelGGL(( ActFFNGlu<T, Functor, 1>), dim3(grid_size), dim3(blocksize), 0, dev_ctx.stream(), bias, functor, token_num, hid_dim, elem_cnt, load_func, store_func); break; } } template <typename T, typename Functor, int VecSize, typename LoadFunc, typename StoreFunc> __global__ void BiasAct(const T *bias, Functor act_functor, const int rows, const int cols, const int elem_num, LoadFunc load_func, StoreFunc store_func) { using LoadT = phi::AlignedVector<T, VecSize>; LoadT src_vec; LoadT bias_vec; // Zero Initialize BiasVec. #pragma unroll for (int unroll_idx = 0; unroll_idx < VecSize; unroll_idx++) { bias_vec[unroll_idx] = 0; } const int global_tid = blockIdx.x * blockDim.x + threadIdx.x; for (int i = global_tid * VecSize; i < elem_num; i += gridDim.x * blockDim.x * VecSize) { int row_idx = i / cols; int col_idx = i % cols; int linear_idx = row_idx * cols + col_idx; load_func.template load<VecSize>(&src_vec, linear_idx); if (bias) { phi::Load<T, VecSize>(&bias[col_idx], &bias_vec); } #pragma unroll for (int j = 0; j < VecSize; j++) { if (bias) { src_vec[j] += bias_vec[j]; } src_vec[j] = act_functor(src_vec[j]); } store_func.template store<VecSize>(src_vec, linear_idx); } } template <typename T, typename Context, typename Functor, typename LoadFunc, typename StoreFunc, typename LoadT = T> void LaunchBiasAct(const Context &dev_ctx, const T *bias, const int token_num, const int hid_dim, LoadFunc load_func, StoreFunc store_func) { constexpr int VecSize = 16; constexpr int PackSize = VecSize / sizeof(LoadT); const int elem_cnt = token_num * hid_dim; const int blocksize = 128; int grid_size = 1; Functor functor; switch (hid_dim % PackSize) { case 0: GetNumBlocks(elem_cnt / PackSize, &grid_size); hipLaunchKernelGGL(( BiasAct<T, Functor, PackSize>) , dim3(grid_size), dim3(blocksize), 0, dev_ctx.stream(), bias, functor, token_num, hid_dim, elem_cnt, load_func, store_func); break; default: GetNumBlocks(elem_cnt, &grid_size); hipLaunchKernelGGL(( BiasAct<T, Functor, 1>), dim3(grid_size), dim3(blocksize), 0, dev_ctx.stream(), bias, functor, token_num, hid_dim, elem_cnt, load_func, store_func); break; } } template <typename T, typename Context, typename LoadFunc, typename StoreFunc, typename LoadT = T> void ComputeImpl(const Context &dev_ctx, const T *bias_data, const std::string &act_method, int rows, int cols, LoadFunc load_func, StoreFunc store_func) { if (act_method == "geglu") { // Note(Zhengzekang): For GLU structure, we need divide the cols by 2. VLOG(8) << "Doing geglu"; LaunchActFFNGlu<T, Context, GeluFunctor<T>, LoadFunc, StoreFunc, LoadT>( dev_ctx, bias_data, rows, cols / 2, load_func, store_func); } else if (act_method == "swiglu") { VLOG(8) << "Doing swiglu"; LaunchActFFNGlu<T, Context, CudaSwishFunctor<T>, LoadFunc, StoreFunc, LoadT>( dev_ctx, bias_data, rows, cols / 2, load_func, store_func); } else if (act_method == "gelu") { if (FLAGS_use_fast_math) { VLOG(8) << "Doing Fast GELU"; LaunchBiasAct<T, Context, FastGeluFunctor<T>, LoadFunc, StoreFunc, LoadT>( dev_ctx, bias_data, rows, cols, load_func, store_func); } else { VLOG(8) << "Doing GELU"; LaunchBiasAct<T, Context, GeluFunctor<T>, LoadFunc, StoreFunc, LoadT>( dev_ctx, bias_data, rows, cols, load_func, store_func); } } else { PADDLE_THROW(phi::errors::Unimplemented( "Currently Only Support GeGLU, SwiGLU, GeLU")); } } template <typename T, typename Context> void DispatchComputeImpl(const Context &dev_ctx, const DenseTensor &x, const DenseTensor *bias, const DenseTensor *dequant_scales, const std::string &act_method, int rows, int cols, const float quant_scale, const int quant_round_type, const float quant_max_bound, const float quant_min_bound, DenseTensor *out) { const T *bias_data = bias == nullptr ? nullptr : bias->data<T>(); if (dequant_scales != nullptr && quant_scale > 0) { DequantLoad<T> load_func( x.data<int32_t>(), dequant_scales->data<float>(), cols); QuantStore<T> store_func(dev_ctx.template Alloc<int8_t>(out), quant_round_type, quant_scale, quant_max_bound, quant_min_bound); ComputeImpl<T, Context, DequantLoad<T>, QuantStore<T>, int32_t>( dev_ctx, bias_data, act_method, rows, cols, load_func, store_func); } else if (dequant_scales == nullptr && quant_scale > 0) { Load<T> load_func(x.data<T>()); QuantStore<T> store_func(dev_ctx.template Alloc<int8_t>(out), quant_round_type, quant_scale, quant_max_bound, quant_min_bound); ComputeImpl<T>( dev_ctx, bias_data, act_method, rows, cols, load_func, store_func); } else if (dequant_scales != nullptr && quant_scale <= 0) { DequantLoad<T> load_func( x.data<int32_t>(), dequant_scales->data<float>(), cols); Store<T> store_func(dev_ctx.template Alloc<T>(out)); ComputeImpl<T, Context, DequantLoad<T>, Store<T>, int32_t>( dev_ctx, bias_data, act_method, rows, cols, load_func, store_func); } else { Load<T> load_func(x.data<T>()); Store<T> store_func(dev_ctx.template Alloc<T>(out)); ComputeImpl<T>( dev_ctx, bias_data, act_method, rows, cols, load_func, store_func); } } template <typename T, typename Context> void DispatchComputeImpl(const Context &dev_ctx, const DenseTensor &x, const DenseTensor *bias, const DenseTensor *dequant_scales, const DenseTensor *shift, const DenseTensor *smooth, const std::string &act_method, int rows, int cols, const float quant_scale, const int quant_round_type, const float quant_max_bound, const float quant_min_bound, DenseTensor *out) { bool use_glu = (act_method == "geglu" || act_method == "swiglu"); const T *bias_data = bias == nullptr ? nullptr : bias->data<T>(); if (dequant_scales != nullptr && quant_scale > 0) { int8_t *out_data = dev_ctx.template Alloc<int8_t>(out); DequantLoad<T> load_func( x.data<int32_t>(), dequant_scales->data<float>(), cols); QuantStore<T, true> store_func(dev_ctx.template Alloc<int8_t>(out), shift->data<T>(), smooth->data<T>(), use_glu ? cols / 2 : cols, quant_round_type, quant_scale, quant_max_bound, quant_min_bound); ComputeImpl<T, Context, DequantLoad<T>, QuantStore<T, true>, int32_t>( dev_ctx, bias_data, act_method, rows, cols, load_func, store_func); } else if (dequant_scales == nullptr && quant_scale > 0) { Load<T> load_func(x.data<T>()); QuantStore<T, true> store_func(dev_ctx.template Alloc<int8_t>(out), shift->data<T>(), smooth->data<T>(), use_glu ? cols / 2 : cols, quant_round_type, quant_scale, quant_max_bound, quant_min_bound); ComputeImpl<T>( dev_ctx, bias_data, act_method, rows, cols, load_func, store_func); } else if (dequant_scales != nullptr && quant_scale <= 0) { DequantLoad<T> load_func( x.data<int32_t>(), dequant_scales->data<float>(), cols); Store<T, true> store_func(dev_ctx.template Alloc<T>(out), shift->data<T>(), smooth->data<T>(), use_glu ? cols / 2 : cols); ComputeImpl<T, Context, DequantLoad<T>, Store<T, true>, int32_t>( dev_ctx, bias_data, act_method, rows, cols, load_func, store_func); } else { Load<T> load_func(x.data<T>()); Store<T, true> store_func(dev_ctx.template Alloc<T>(out), shift->data<T>(), smooth->data<T>(), use_glu ? cols / 2 : cols); ComputeImpl<T>( dev_ctx, bias_data, act_method, rows, cols, load_func, store_func); } } struct NormalVersion {}; struct UnusedVersion {}; template <typename T> struct DispatchDtypeTrait { using FuncVersion = NormalVersion; }; template <> struct DispatchDtypeTrait<int32_t> { using FuncVersion = UnusedVersion; }; template <typename T, typename Context> void DispatchWithDtype(const Context &dev_ctx, const DenseTensor &x, const paddle::optional<DenseTensor> &bias, const paddle::optional<DenseTensor> &dequant_scales, const paddle::optional<DenseTensor> &shift, const paddle::optional<DenseTensor> &smooth, const std::string &act_method, int rows, int cols, float quant_scale, int quant_round_type, float quant_max_bound, float quant_min_bound, DenseTensor *out, NormalVersion) { auto *bias_p = bias.get_ptr(); auto *dequant_scales_p = dequant_scales.get_ptr(); auto *shift_p = shift.get_ptr(); auto *smooth_p = smooth.get_ptr(); if (dequant_scales_p != nullptr) { if (shift_p != nullptr) { DispatchComputeImpl<T>(dev_ctx, x, bias_p, dequant_scales_p, shift_p, smooth_p, act_method, rows, cols, quant_scale, quant_round_type, quant_max_bound, quant_min_bound, out); } else { DispatchComputeImpl<T>(dev_ctx, x, bias_p, dequant_scales_p, act_method, rows, cols, quant_scale, quant_round_type, quant_max_bound, quant_min_bound, out); } } else { const T *bias_data = bias_p == nullptr ? nullptr : bias_p->data<T>(); Load<T> load_func(x.data<T>()); Store<T> store_func(dev_ctx.template Alloc<T>(out)); ComputeImpl<T>( dev_ctx, bias_data, act_method, rows, cols, load_func, store_func); } } // (not use) only for registering int32_t template <typename T, typename Context> void DispatchWithDtype(const Context &dev_ctx, const DenseTensor &x, const paddle::optional<DenseTensor> &bias, const paddle::optional<DenseTensor> &dequant_scales, const paddle::optional<DenseTensor> &shift, const paddle::optional<DenseTensor> &smooth, const std::string &act_method, int rows, int cols, float quant_scale, int quant_round_type, float quant_max_bound, float quant_min_bound, DenseTensor *out, UnusedVersion) {} #endif template <typename T, typename Context> void FusedBiasActKernel(const Context &dev_ctx, const DenseTensor &x, const paddle::optional<DenseTensor> &bias, const paddle::optional<DenseTensor> &dequant_scales, const paddle::optional<DenseTensor> &shift, const paddle::optional<DenseTensor> &smooth, const std::string &act_method, const std::string &compute_dtype, float quant_scale, int quant_round_type, float quant_max_bound, float quant_min_bound, DenseTensor *out) { #ifndef PADDLE_WITH_HIP int rows = x.dims()[0]; int cols = x.dims()[1]; if (x.dtype() == phi::DataType::INT32) { if (compute_dtype == "bf16") { DispatchWithDtype<phi::dtype::bfloat16, Context>( dev_ctx, x, bias, dequant_scales, shift, smooth, act_method, rows, cols, quant_scale, quant_round_type, quant_max_bound, quant_min_bound, out, typename DispatchDtypeTrait<phi::dtype::bfloat16>::FuncVersion{}); } else if (compute_dtype == "fp16") { DispatchWithDtype<phi::dtype::float16, Context>( dev_ctx, x, bias, dequant_scales, shift, smooth, act_method, rows, cols, quant_scale, quant_round_type, quant_max_bound, quant_min_bound, out, typename DispatchDtypeTrait<phi::dtype::float16>::FuncVersion{}); } else if (compute_dtype == "fp32") { DispatchWithDtype<float, Context>( dev_ctx, x, bias, dequant_scales, shift, smooth, act_method, rows, cols, quant_scale, quant_round_type, quant_max_bound, quant_min_bound, out, typename DispatchDtypeTrait<float>::FuncVersion{}); } else { PADDLE_THROW(phi::errors::InvalidArgument( "In the case of quantization enabled with Input(x) INT32, " "Attr(compute_dtype) must be set in (bf16, fp16, fp32), " "but get compute_dtype (%s)", compute_dtype)); } } else { DispatchWithDtype<T, Context>( dev_ctx, x, bias, dequant_scales, shift, smooth, act_method, rows, cols, quant_scale, quant_round_type, quant_max_bound, quant_min_bound, out, typename DispatchDtypeTrait<T>::FuncVersion{}); } #endif } } // namespace fusion } // namespace phi PD_REGISTER_KERNEL(fused_bias_act, GPU, ALL_LAYOUT, phi::fusion::FusedBiasActKernel, float, phi::dtype::bfloat16, phi::dtype::float16, int32_t) {}
0a880196844586636f6b585235c41516da708288.cu
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "glog/logging.h" #include "paddle/phi/core/flags.h" #include "paddle/phi/kernels/fusion/gpu/fused_bias_act_utils.h" PHI_DECLARE_bool(use_fast_math); namespace phi { namespace fusion { #ifndef PADDLE_WITH_HIP template <typename T, typename Functor, int VecSize, typename LoadFunc, typename StoreFunc> __global__ void ActFFNGlu(const T *bias, Functor act_functor, const int token_num, const int hid_dim, const int elem_num, LoadFunc load_func, StoreFunc store_func) { using LoadT = phi::AlignedVector<T, VecSize>; LoadT src_vec1; LoadT src_vec2; LoadT bias_vec1; LoadT bias_vec2; const int global_tid = blockIdx.x * blockDim.x + threadIdx.x; for (int i = global_tid * VecSize; i < elem_num; i += gridDim.x * blockDim.x * VecSize) { int bi = i / hid_dim; int idx = i % hid_dim; load_func.template load<VecSize>(&src_vec1, bi * hid_dim * 2 + idx); load_func.template load<VecSize>(&src_vec2, bi * hid_dim * 2 + idx + hid_dim); if (bias) { phi::Load<T, VecSize>(&bias[idx], &bias_vec1); phi::Load<T, VecSize>(&bias[idx + hid_dim], &bias_vec2); } #pragma unroll for (int j = 0; j < VecSize; j++) { if (bias) { src_vec1[j] += bias_vec1[j]; src_vec2[j] += bias_vec2[j]; } src_vec1[j] = act_functor(src_vec1[j]); src_vec1[j] *= src_vec2[j]; } store_func.template store<VecSize>(src_vec1, bi * hid_dim + idx); } } template <typename T, typename Context, typename Functor, typename LoadFunc, typename StoreFunc, typename LoadT = T> void LaunchActFFNGlu(const Context &dev_ctx, const T *bias, const int token_num, const int hid_dim, LoadFunc load_func, StoreFunc store_func) { constexpr int VecSize = 16; constexpr int PackSize = VecSize / sizeof(LoadT); const int elem_cnt = token_num * hid_dim; const int blocksize = 128; int grid_size = 1; Functor functor; switch (hid_dim % PackSize) { case 0: GetNumBlocks(elem_cnt / PackSize, &grid_size); ActFFNGlu<T, Functor, PackSize> <<<grid_size, blocksize, 0, dev_ctx.stream()>>>(bias, functor, token_num, hid_dim, elem_cnt, load_func, store_func); break; default: GetNumBlocks(elem_cnt, &grid_size); ActFFNGlu<T, Functor, 1><<<grid_size, blocksize, 0, dev_ctx.stream()>>>( bias, functor, token_num, hid_dim, elem_cnt, load_func, store_func); break; } } template <typename T, typename Functor, int VecSize, typename LoadFunc, typename StoreFunc> __global__ void BiasAct(const T *bias, Functor act_functor, const int rows, const int cols, const int elem_num, LoadFunc load_func, StoreFunc store_func) { using LoadT = phi::AlignedVector<T, VecSize>; LoadT src_vec; LoadT bias_vec; // Zero Initialize BiasVec. #pragma unroll for (int unroll_idx = 0; unroll_idx < VecSize; unroll_idx++) { bias_vec[unroll_idx] = 0; } const int global_tid = blockIdx.x * blockDim.x + threadIdx.x; for (int i = global_tid * VecSize; i < elem_num; i += gridDim.x * blockDim.x * VecSize) { int row_idx = i / cols; int col_idx = i % cols; int linear_idx = row_idx * cols + col_idx; load_func.template load<VecSize>(&src_vec, linear_idx); if (bias) { phi::Load<T, VecSize>(&bias[col_idx], &bias_vec); } #pragma unroll for (int j = 0; j < VecSize; j++) { if (bias) { src_vec[j] += bias_vec[j]; } src_vec[j] = act_functor(src_vec[j]); } store_func.template store<VecSize>(src_vec, linear_idx); } } template <typename T, typename Context, typename Functor, typename LoadFunc, typename StoreFunc, typename LoadT = T> void LaunchBiasAct(const Context &dev_ctx, const T *bias, const int token_num, const int hid_dim, LoadFunc load_func, StoreFunc store_func) { constexpr int VecSize = 16; constexpr int PackSize = VecSize / sizeof(LoadT); const int elem_cnt = token_num * hid_dim; const int blocksize = 128; int grid_size = 1; Functor functor; switch (hid_dim % PackSize) { case 0: GetNumBlocks(elem_cnt / PackSize, &grid_size); BiasAct<T, Functor, PackSize> <<<grid_size, blocksize, 0, dev_ctx.stream()>>>(bias, functor, token_num, hid_dim, elem_cnt, load_func, store_func); break; default: GetNumBlocks(elem_cnt, &grid_size); BiasAct<T, Functor, 1><<<grid_size, blocksize, 0, dev_ctx.stream()>>>( bias, functor, token_num, hid_dim, elem_cnt, load_func, store_func); break; } } template <typename T, typename Context, typename LoadFunc, typename StoreFunc, typename LoadT = T> void ComputeImpl(const Context &dev_ctx, const T *bias_data, const std::string &act_method, int rows, int cols, LoadFunc load_func, StoreFunc store_func) { if (act_method == "geglu") { // Note(Zhengzekang): For GLU structure, we need divide the cols by 2. VLOG(8) << "Doing geglu"; LaunchActFFNGlu<T, Context, GeluFunctor<T>, LoadFunc, StoreFunc, LoadT>( dev_ctx, bias_data, rows, cols / 2, load_func, store_func); } else if (act_method == "swiglu") { VLOG(8) << "Doing swiglu"; LaunchActFFNGlu<T, Context, CudaSwishFunctor<T>, LoadFunc, StoreFunc, LoadT>( dev_ctx, bias_data, rows, cols / 2, load_func, store_func); } else if (act_method == "gelu") { if (FLAGS_use_fast_math) { VLOG(8) << "Doing Fast GELU"; LaunchBiasAct<T, Context, FastGeluFunctor<T>, LoadFunc, StoreFunc, LoadT>( dev_ctx, bias_data, rows, cols, load_func, store_func); } else { VLOG(8) << "Doing GELU"; LaunchBiasAct<T, Context, GeluFunctor<T>, LoadFunc, StoreFunc, LoadT>( dev_ctx, bias_data, rows, cols, load_func, store_func); } } else { PADDLE_THROW(phi::errors::Unimplemented( "Currently Only Support GeGLU, SwiGLU, GeLU")); } } template <typename T, typename Context> void DispatchComputeImpl(const Context &dev_ctx, const DenseTensor &x, const DenseTensor *bias, const DenseTensor *dequant_scales, const std::string &act_method, int rows, int cols, const float quant_scale, const int quant_round_type, const float quant_max_bound, const float quant_min_bound, DenseTensor *out) { const T *bias_data = bias == nullptr ? nullptr : bias->data<T>(); if (dequant_scales != nullptr && quant_scale > 0) { DequantLoad<T> load_func( x.data<int32_t>(), dequant_scales->data<float>(), cols); QuantStore<T> store_func(dev_ctx.template Alloc<int8_t>(out), quant_round_type, quant_scale, quant_max_bound, quant_min_bound); ComputeImpl<T, Context, DequantLoad<T>, QuantStore<T>, int32_t>( dev_ctx, bias_data, act_method, rows, cols, load_func, store_func); } else if (dequant_scales == nullptr && quant_scale > 0) { Load<T> load_func(x.data<T>()); QuantStore<T> store_func(dev_ctx.template Alloc<int8_t>(out), quant_round_type, quant_scale, quant_max_bound, quant_min_bound); ComputeImpl<T>( dev_ctx, bias_data, act_method, rows, cols, load_func, store_func); } else if (dequant_scales != nullptr && quant_scale <= 0) { DequantLoad<T> load_func( x.data<int32_t>(), dequant_scales->data<float>(), cols); Store<T> store_func(dev_ctx.template Alloc<T>(out)); ComputeImpl<T, Context, DequantLoad<T>, Store<T>, int32_t>( dev_ctx, bias_data, act_method, rows, cols, load_func, store_func); } else { Load<T> load_func(x.data<T>()); Store<T> store_func(dev_ctx.template Alloc<T>(out)); ComputeImpl<T>( dev_ctx, bias_data, act_method, rows, cols, load_func, store_func); } } template <typename T, typename Context> void DispatchComputeImpl(const Context &dev_ctx, const DenseTensor &x, const DenseTensor *bias, const DenseTensor *dequant_scales, const DenseTensor *shift, const DenseTensor *smooth, const std::string &act_method, int rows, int cols, const float quant_scale, const int quant_round_type, const float quant_max_bound, const float quant_min_bound, DenseTensor *out) { bool use_glu = (act_method == "geglu" || act_method == "swiglu"); const T *bias_data = bias == nullptr ? nullptr : bias->data<T>(); if (dequant_scales != nullptr && quant_scale > 0) { int8_t *out_data = dev_ctx.template Alloc<int8_t>(out); DequantLoad<T> load_func( x.data<int32_t>(), dequant_scales->data<float>(), cols); QuantStore<T, true> store_func(dev_ctx.template Alloc<int8_t>(out), shift->data<T>(), smooth->data<T>(), use_glu ? cols / 2 : cols, quant_round_type, quant_scale, quant_max_bound, quant_min_bound); ComputeImpl<T, Context, DequantLoad<T>, QuantStore<T, true>, int32_t>( dev_ctx, bias_data, act_method, rows, cols, load_func, store_func); } else if (dequant_scales == nullptr && quant_scale > 0) { Load<T> load_func(x.data<T>()); QuantStore<T, true> store_func(dev_ctx.template Alloc<int8_t>(out), shift->data<T>(), smooth->data<T>(), use_glu ? cols / 2 : cols, quant_round_type, quant_scale, quant_max_bound, quant_min_bound); ComputeImpl<T>( dev_ctx, bias_data, act_method, rows, cols, load_func, store_func); } else if (dequant_scales != nullptr && quant_scale <= 0) { DequantLoad<T> load_func( x.data<int32_t>(), dequant_scales->data<float>(), cols); Store<T, true> store_func(dev_ctx.template Alloc<T>(out), shift->data<T>(), smooth->data<T>(), use_glu ? cols / 2 : cols); ComputeImpl<T, Context, DequantLoad<T>, Store<T, true>, int32_t>( dev_ctx, bias_data, act_method, rows, cols, load_func, store_func); } else { Load<T> load_func(x.data<T>()); Store<T, true> store_func(dev_ctx.template Alloc<T>(out), shift->data<T>(), smooth->data<T>(), use_glu ? cols / 2 : cols); ComputeImpl<T>( dev_ctx, bias_data, act_method, rows, cols, load_func, store_func); } } struct NormalVersion {}; struct UnusedVersion {}; template <typename T> struct DispatchDtypeTrait { using FuncVersion = NormalVersion; }; template <> struct DispatchDtypeTrait<int32_t> { using FuncVersion = UnusedVersion; }; template <typename T, typename Context> void DispatchWithDtype(const Context &dev_ctx, const DenseTensor &x, const paddle::optional<DenseTensor> &bias, const paddle::optional<DenseTensor> &dequant_scales, const paddle::optional<DenseTensor> &shift, const paddle::optional<DenseTensor> &smooth, const std::string &act_method, int rows, int cols, float quant_scale, int quant_round_type, float quant_max_bound, float quant_min_bound, DenseTensor *out, NormalVersion) { auto *bias_p = bias.get_ptr(); auto *dequant_scales_p = dequant_scales.get_ptr(); auto *shift_p = shift.get_ptr(); auto *smooth_p = smooth.get_ptr(); if (dequant_scales_p != nullptr) { if (shift_p != nullptr) { DispatchComputeImpl<T>(dev_ctx, x, bias_p, dequant_scales_p, shift_p, smooth_p, act_method, rows, cols, quant_scale, quant_round_type, quant_max_bound, quant_min_bound, out); } else { DispatchComputeImpl<T>(dev_ctx, x, bias_p, dequant_scales_p, act_method, rows, cols, quant_scale, quant_round_type, quant_max_bound, quant_min_bound, out); } } else { const T *bias_data = bias_p == nullptr ? nullptr : bias_p->data<T>(); Load<T> load_func(x.data<T>()); Store<T> store_func(dev_ctx.template Alloc<T>(out)); ComputeImpl<T>( dev_ctx, bias_data, act_method, rows, cols, load_func, store_func); } } // (not use) only for registering int32_t template <typename T, typename Context> void DispatchWithDtype(const Context &dev_ctx, const DenseTensor &x, const paddle::optional<DenseTensor> &bias, const paddle::optional<DenseTensor> &dequant_scales, const paddle::optional<DenseTensor> &shift, const paddle::optional<DenseTensor> &smooth, const std::string &act_method, int rows, int cols, float quant_scale, int quant_round_type, float quant_max_bound, float quant_min_bound, DenseTensor *out, UnusedVersion) {} #endif template <typename T, typename Context> void FusedBiasActKernel(const Context &dev_ctx, const DenseTensor &x, const paddle::optional<DenseTensor> &bias, const paddle::optional<DenseTensor> &dequant_scales, const paddle::optional<DenseTensor> &shift, const paddle::optional<DenseTensor> &smooth, const std::string &act_method, const std::string &compute_dtype, float quant_scale, int quant_round_type, float quant_max_bound, float quant_min_bound, DenseTensor *out) { #ifndef PADDLE_WITH_HIP int rows = x.dims()[0]; int cols = x.dims()[1]; if (x.dtype() == phi::DataType::INT32) { if (compute_dtype == "bf16") { DispatchWithDtype<phi::dtype::bfloat16, Context>( dev_ctx, x, bias, dequant_scales, shift, smooth, act_method, rows, cols, quant_scale, quant_round_type, quant_max_bound, quant_min_bound, out, typename DispatchDtypeTrait<phi::dtype::bfloat16>::FuncVersion{}); } else if (compute_dtype == "fp16") { DispatchWithDtype<phi::dtype::float16, Context>( dev_ctx, x, bias, dequant_scales, shift, smooth, act_method, rows, cols, quant_scale, quant_round_type, quant_max_bound, quant_min_bound, out, typename DispatchDtypeTrait<phi::dtype::float16>::FuncVersion{}); } else if (compute_dtype == "fp32") { DispatchWithDtype<float, Context>( dev_ctx, x, bias, dequant_scales, shift, smooth, act_method, rows, cols, quant_scale, quant_round_type, quant_max_bound, quant_min_bound, out, typename DispatchDtypeTrait<float>::FuncVersion{}); } else { PADDLE_THROW(phi::errors::InvalidArgument( "In the case of quantization enabled with Input(x) INT32, " "Attr(compute_dtype) must be set in (bf16, fp16, fp32), " "but get compute_dtype (%s)", compute_dtype)); } } else { DispatchWithDtype<T, Context>( dev_ctx, x, bias, dequant_scales, shift, smooth, act_method, rows, cols, quant_scale, quant_round_type, quant_max_bound, quant_min_bound, out, typename DispatchDtypeTrait<T>::FuncVersion{}); } #endif } } // namespace fusion } // namespace phi PD_REGISTER_KERNEL(fused_bias_act, GPU, ALL_LAYOUT, phi::fusion::FusedBiasActKernel, float, phi::dtype::bfloat16, phi::dtype::float16, int32_t) {}
b1a2539d89dbf47a831e7c9e17eb141e680bd3e5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Simple CUDA example by Ingemar Ragnemalm 2009. Simplest possible? // Assigns every element in an array with its index. // nvcc simple.cu -L /usr/local/cuda/lib -lcudart -o simple #include <stdio.h> #include <math.h> const int N = 16; const int blocksize = 16; __global__ void simple(float *c) { c[threadIdx.x] = threadIdx.x; } __global__ void simple_sqrt(float *c) { c[threadIdx.x] = sqrtf(threadIdx.x); // Calculate the square root of the thread index as floats } int main() { float *c = new float[N]; float *cd; const int size = N*sizeof(float); // Allocate memory for an array of floats of size N hipMalloc( (void**)&cd, size ); dim3 dimBlock( blocksize, 1 ); // QUESTION 1: Here we create a block with 16 threads --> We use 16 threads/cores on 1 SM dim3 dimGrid( 1, 1 ); // QUESTION 1: Here we see that the grid is 1 x 1 --> We have one block hipLaunchKernelGGL(( simple_sqrt), dim3(dimGrid), dim3(dimBlock), 0, 0, cd); hipDeviceSynchronize(); hipMemcpy( c, cd, size, hipMemcpyDeviceToHost ); hipFree( cd ); printf("\n\n*--------SQUARE ROOT USING GPU: --------*\n"); for (int i = 0; i < N; i++) printf("%.10f ", c[i]); printf("\n*---------------------------------------*"); printf("\n\n*--------SQUARE ROOT USING CPU: --------*\n"); for(int i = 0; i < N; i++) { c[i] = sqrtf(i); printf("%.10f ", c[i]); } printf("\n*---------------------------------------*"); delete[] c; printf("\ndone\n"); return EXIT_SUCCESS; }
b1a2539d89dbf47a831e7c9e17eb141e680bd3e5.cu
// Simple CUDA example by Ingemar Ragnemalm 2009. Simplest possible? // Assigns every element in an array with its index. // nvcc simple.cu -L /usr/local/cuda/lib -lcudart -o simple #include <stdio.h> #include <math.h> const int N = 16; const int blocksize = 16; __global__ void simple(float *c) { c[threadIdx.x] = threadIdx.x; } __global__ void simple_sqrt(float *c) { c[threadIdx.x] = sqrtf(threadIdx.x); // Calculate the square root of the thread index as floats } int main() { float *c = new float[N]; float *cd; const int size = N*sizeof(float); // Allocate memory for an array of floats of size N cudaMalloc( (void**)&cd, size ); dim3 dimBlock( blocksize, 1 ); // QUESTION 1: Here we create a block with 16 threads --> We use 16 threads/cores on 1 SM dim3 dimGrid( 1, 1 ); // QUESTION 1: Here we see that the grid is 1 x 1 --> We have one block simple_sqrt<<<dimGrid, dimBlock>>>(cd); cudaThreadSynchronize(); cudaMemcpy( c, cd, size, cudaMemcpyDeviceToHost ); cudaFree( cd ); printf("\n\n*--------SQUARE ROOT USING GPU: --------*\n"); for (int i = 0; i < N; i++) printf("%.10f ", c[i]); printf("\n*---------------------------------------*"); printf("\n\n*--------SQUARE ROOT USING CPU: --------*\n"); for(int i = 0; i < N; i++) { c[i] = sqrtf(i); printf("%.10f ", c[i]); } printf("\n*---------------------------------------*"); delete[] c; printf("\ndone\n"); return EXIT_SUCCESS; }
820ddb9f5c62232cda2fafb6e2cb8ec90d815dec.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <stdio.h> #include "timer.h" #include "cuda_utils.h" typedef float dtype; #define N_ (8 * 1024 * 1024) #define MAX_THREADS 256 #define MAX_BLOCKS 64 #define MIN(x,y) ((x < y) ? x : y) /* return the next power of 2 number that is larger than x */ unsigned int nextPow2( unsigned int x ) { --x; x |= x >> 1; x |= x >> 2; x |= x >> 4; x |= x >> 8; x |= x >> 16; return ++x; } /* find out # of threads and # thread blocks for a particular kernel */ void getNumBlocksAndThreads(int whichKernel, int n, int maxBlocks, int maxThreads, int &blocks, int &threads) { if (whichKernel < 3) { /* 1 thread per element */ threads = (n < maxThreads) ? nextPow2(n) : maxThreads; blocks = (n + threads - 1) / threads; } else { /* 1 thread per 2 elements */ threads = (n < maxThreads*2) ? nextPow2((n + 1)/ 2) : maxThreads; blocks = (n + (threads * 2 - 1)) / (threads * 2); } /* limit the total number of threads */ if (whichKernel == 5) blocks = MIN(maxBlocks, blocks); } /* special type of reduction to account for floating point error */ dtype reduce_cpu(dtype *data, int n) { dtype sum = data[0]; dtype c = (dtype)0.0; for (int i = 1; i < n; i++) { dtype y = data[i] - c; dtype t = sum + y; c = (t - sum) - y; sum = t; } return sum; } __global__ void kernel3(dtype *g_idata, dtype *g_odata, unsigned int n) { __shared__ dtype scratch[MAX_THREADS]; unsigned int bid = gridDim.x * blockIdx.y + blockIdx.x; unsigned int i = bid * blockDim.x + threadIdx.x; if(i < (n>>1)) { scratch[threadIdx.x] = g_idata[i]+g_idata[i+(n>>1)]; } else { scratch[threadIdx.x] = 0; } __syncthreads (); for(unsigned int s = blockDim.x >> 1; s >= 1; s = s >> 1) { if(threadIdx.x < s) { scratch[threadIdx.x] += scratch[threadIdx.x + s]; } __syncthreads (); } if(threadIdx.x == 0) { g_odata[bid] = scratch[0]; // the blocks overwrite the first "numOfBlocks" elements in the output array. each block writes at the block idx location. } } int main(int argc, char** argv) { int i; /* data structure */ dtype *h_idata, h_odata, h_cpu; dtype *d_idata, *d_odata; /* timer */ struct stopwatch_t* timer = NULL; long double t_kernel_3, t_cpu; /* which kernel are we running */ int whichKernel; /* number of threads and thread blocks */ int threads, blocks; int N; if(argc > 1) { N = atoi (argv[1]); printf("N: %d\n", N); } else { N = N_; printf("N: %d\n", N); } /* naive kernel */ whichKernel = 3; getNumBlocksAndThreads (whichKernel, N, MAX_BLOCKS, MAX_THREADS, blocks, threads); /* initialize timer */ stopwatch_init (); timer = stopwatch_create (); /* allocate memory */ h_idata = (dtype*) malloc (N * sizeof (dtype)); CUDA_CHECK_ERROR (hipMalloc (&d_idata, N * sizeof (dtype))); CUDA_CHECK_ERROR (hipMalloc (&d_odata, blocks * sizeof (dtype))); /* Initialize array */ srand48(time(NULL)); for(i = 0; i < N; i++) { h_idata[i] = drand48() / 100000; } CUDA_CHECK_ERROR (hipMemcpy (d_idata, h_idata, N * sizeof (dtype), hipMemcpyHostToDevice)); /* ================================================== */ /* GPU kernel */ dim3 gb(16, ((blocks + 16 - 1) / 16), 1); dim3 tb(threads, 1, 1); /* warm up */ hipLaunchKernelGGL(( kernel3) , dim3(gb), dim3(tb), 0, 0, d_idata, d_odata, N); hipDeviceSynchronize (); stopwatch_start (timer); /* execute kernel */ hipLaunchKernelGGL(( kernel3) , dim3(gb), dim3(tb), 0, 0, d_idata, d_odata, N); int s = blocks; while(s > 1) { threads = 0; blocks = 0; getNumBlocksAndThreads (whichKernel, s, MAX_BLOCKS, MAX_THREADS, blocks, threads); dim3 gb(16, (blocks + 16 - 1) / 16, 1); dim3 tb(threads, 1, 1); hipLaunchKernelGGL(( kernel3) , dim3(gb), dim3(tb), 0, 0, d_odata, d_odata, s); s = (s + threads * 2 - 1) / (threads * 2); } hipDeviceSynchronize (); t_kernel_3 = stopwatch_stop (timer); fprintf (stdout, "Time to execute first add GPU reduction kernel: %Lg secs\n", t_kernel_3); double bw = (N * sizeof(dtype)) / (t_kernel_3 * 1e9); fprintf (stdout, "Effective bandwidth: %.2lf GB/s\n", bw); /* copy result back from GPU */ CUDA_CHECK_ERROR (hipMemcpy (&h_odata, d_odata, sizeof (dtype), hipMemcpyDeviceToHost)); /* ================================================== */ /* ================================================== */ /* CPU kernel */ stopwatch_start (timer); h_cpu = reduce_cpu (h_idata, N); t_cpu = stopwatch_stop (timer); fprintf (stdout, "Time to execute naive CPU reduction: %Lg secs\n", t_cpu); /* ================================================== */ if(abs (h_odata - h_cpu) > 1e-5) { fprintf(stderr, "FAILURE: GPU: %f CPU: %f\n", h_odata, h_cpu); } else { printf("SUCCESS: GPU: %f CPU: %f\n", h_odata, h_cpu); } return 0; }
820ddb9f5c62232cda2fafb6e2cb8ec90d815dec.cu
#include <stdlib.h> #include <stdio.h> #include "timer.h" #include "cuda_utils.h" typedef float dtype; #define N_ (8 * 1024 * 1024) #define MAX_THREADS 256 #define MAX_BLOCKS 64 #define MIN(x,y) ((x < y) ? x : y) /* return the next power of 2 number that is larger than x */ unsigned int nextPow2( unsigned int x ) { --x; x |= x >> 1; x |= x >> 2; x |= x >> 4; x |= x >> 8; x |= x >> 16; return ++x; } /* find out # of threads and # thread blocks for a particular kernel */ void getNumBlocksAndThreads(int whichKernel, int n, int maxBlocks, int maxThreads, int &blocks, int &threads) { if (whichKernel < 3) { /* 1 thread per element */ threads = (n < maxThreads) ? nextPow2(n) : maxThreads; blocks = (n + threads - 1) / threads; } else { /* 1 thread per 2 elements */ threads = (n < maxThreads*2) ? nextPow2((n + 1)/ 2) : maxThreads; blocks = (n + (threads * 2 - 1)) / (threads * 2); } /* limit the total number of threads */ if (whichKernel == 5) blocks = MIN(maxBlocks, blocks); } /* special type of reduction to account for floating point error */ dtype reduce_cpu(dtype *data, int n) { dtype sum = data[0]; dtype c = (dtype)0.0; for (int i = 1; i < n; i++) { dtype y = data[i] - c; dtype t = sum + y; c = (t - sum) - y; sum = t; } return sum; } __global__ void kernel3(dtype *g_idata, dtype *g_odata, unsigned int n) { __shared__ dtype scratch[MAX_THREADS]; unsigned int bid = gridDim.x * blockIdx.y + blockIdx.x; unsigned int i = bid * blockDim.x + threadIdx.x; if(i < (n>>1)) { scratch[threadIdx.x] = g_idata[i]+g_idata[i+(n>>1)]; } else { scratch[threadIdx.x] = 0; } __syncthreads (); for(unsigned int s = blockDim.x >> 1; s >= 1; s = s >> 1) { if(threadIdx.x < s) { scratch[threadIdx.x] += scratch[threadIdx.x + s]; } __syncthreads (); } if(threadIdx.x == 0) { g_odata[bid] = scratch[0]; // the blocks overwrite the first "numOfBlocks" elements in the output array. each block writes at the block idx location. } } int main(int argc, char** argv) { int i; /* data structure */ dtype *h_idata, h_odata, h_cpu; dtype *d_idata, *d_odata; /* timer */ struct stopwatch_t* timer = NULL; long double t_kernel_3, t_cpu; /* which kernel are we running */ int whichKernel; /* number of threads and thread blocks */ int threads, blocks; int N; if(argc > 1) { N = atoi (argv[1]); printf("N: %d\n", N); } else { N = N_; printf("N: %d\n", N); } /* naive kernel */ whichKernel = 3; getNumBlocksAndThreads (whichKernel, N, MAX_BLOCKS, MAX_THREADS, blocks, threads); /* initialize timer */ stopwatch_init (); timer = stopwatch_create (); /* allocate memory */ h_idata = (dtype*) malloc (N * sizeof (dtype)); CUDA_CHECK_ERROR (cudaMalloc (&d_idata, N * sizeof (dtype))); CUDA_CHECK_ERROR (cudaMalloc (&d_odata, blocks * sizeof (dtype))); /* Initialize array */ srand48(time(NULL)); for(i = 0; i < N; i++) { h_idata[i] = drand48() / 100000; } CUDA_CHECK_ERROR (cudaMemcpy (d_idata, h_idata, N * sizeof (dtype), cudaMemcpyHostToDevice)); /* ================================================== */ /* GPU kernel */ dim3 gb(16, ((blocks + 16 - 1) / 16), 1); dim3 tb(threads, 1, 1); /* warm up */ kernel3 <<<gb, tb>>> (d_idata, d_odata, N); cudaThreadSynchronize (); stopwatch_start (timer); /* execute kernel */ kernel3 <<<gb, tb>>> (d_idata, d_odata, N); int s = blocks; while(s > 1) { threads = 0; blocks = 0; getNumBlocksAndThreads (whichKernel, s, MAX_BLOCKS, MAX_THREADS, blocks, threads); dim3 gb(16, (blocks + 16 - 1) / 16, 1); dim3 tb(threads, 1, 1); kernel3 <<<gb, tb>>> (d_odata, d_odata, s); s = (s + threads * 2 - 1) / (threads * 2); } cudaThreadSynchronize (); t_kernel_3 = stopwatch_stop (timer); fprintf (stdout, "Time to execute first add GPU reduction kernel: %Lg secs\n", t_kernel_3); double bw = (N * sizeof(dtype)) / (t_kernel_3 * 1e9); fprintf (stdout, "Effective bandwidth: %.2lf GB/s\n", bw); /* copy result back from GPU */ CUDA_CHECK_ERROR (cudaMemcpy (&h_odata, d_odata, sizeof (dtype), cudaMemcpyDeviceToHost)); /* ================================================== */ /* ================================================== */ /* CPU kernel */ stopwatch_start (timer); h_cpu = reduce_cpu (h_idata, N); t_cpu = stopwatch_stop (timer); fprintf (stdout, "Time to execute naive CPU reduction: %Lg secs\n", t_cpu); /* ================================================== */ if(abs (h_odata - h_cpu) > 1e-5) { fprintf(stderr, "FAILURE: GPU: %f CPU: %f\n", h_odata, h_cpu); } else { printf("SUCCESS: GPU: %f CPU: %f\n", h_odata, h_cpu); } return 0; }
7a662e91928b959bb1696e1a25988f0ed8daa0e7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by op2.m on 29-Oct-2012 09:37:53 // // user function __device__ #include "update.h" // CUDA kernel function __global__ void op_cuda_update( double *arg0, double *arg1, double *arg2, double *arg3, double *arg4, int offset_s, int set_size ) { double arg0_l[4]; double arg1_l[4]; double arg2_l[4]; double arg4_l[1]; for (int d=0; d<1; d++) arg4_l[d]=ZERO_double; int tid = threadIdx.x%OP_WARPSIZE; extern __shared__ char shared[]; char *arg_s = shared + offset_s*(threadIdx.x/OP_WARPSIZE); // process set elements for (int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x) { int offset = n - tid; int nelems = MIN(OP_WARPSIZE,set_size-offset); // copy data into shared memory, then into local for (int m=0; m<4; m++) ((double *)arg_s)[tid+m*nelems] = arg0[tid+m*nelems+offset*4]; for (int m=0; m<4; m++) arg0_l[m] = ((double *)arg_s)[m+tid*4]; for (int m=0; m<4; m++) ((double *)arg_s)[tid+m*nelems] = arg2[tid+m*nelems+offset*4]; for (int m=0; m<4; m++) arg2_l[m] = ((double *)arg_s)[m+tid*4]; // user-supplied kernel call update( arg0_l, arg1_l, arg2_l, arg3+n, arg4_l ); // copy back into shared memory, then to device for (int m=0; m<4; m++) ((double *)arg_s)[m+tid*4] = arg1_l[m]; for (int m=0; m<4; m++) arg1[tid+m*nelems+offset*4] = ((double *)arg_s)[tid+m*nelems]; for (int m=0; m<4; m++) ((double *)arg_s)[m+tid*4] = arg2_l[m]; for (int m=0; m<4; m++) arg2[tid+m*nelems+offset*4] = ((double *)arg_s)[tid+m*nelems]; } // global reductions for(int d=0; d<1; d++) op_reduction<OP_INC>(&arg4[d+blockIdx.x*1],arg4_l[d]); } // host stub function void op_par_loop_update(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2, op_arg arg3, op_arg arg4 ){ double *arg4h = (double *)arg4.data; int nargs = 5; op_arg args[5]; args[0] = arg0; args[1] = arg1; args[2] = arg2; args[3] = arg3; args[4] = arg4; if (OP_diags>2) { printf(" kernel routine w/o indirection: update\n"); } op_mpi_halo_exchanges_cuda(set, nargs, args); // initialise timers double cpu_t1, cpu_t2, wall_t1=0, wall_t2=0; op_timing_realloc(4); OP_kernels[4].name = name; OP_kernels[4].count += 1; if (set->size >0) { op_timers_core(&cpu_t1, &wall_t1); // set CUDA execution parameters #ifdef OP_BLOCK_SIZE_4 int nthread = OP_BLOCK_SIZE_4; #else // int nthread = OP_block_size; int nthread = 128; #endif int nblocks = 200; // transfer global reduction data to GPU int maxblocks = nblocks; int reduct_bytes = 0; int reduct_size = 0; reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double)); reduct_size = MAX(reduct_size,sizeof(double)); reallocReductArrays(reduct_bytes); reduct_bytes = 0; arg4.data = OP_reduct_h + reduct_bytes; arg4.data_d = OP_reduct_d + reduct_bytes; for (int b=0; b<maxblocks; b++) for (int d=0; d<1; d++) ((double *)arg4.data)[d+b*1] = ZERO_double; reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double)); mvReductArraysToDevice(reduct_bytes); // work out shared memory requirements per element int nshared = 0; nshared = MAX(nshared,sizeof(double)*4); nshared = MAX(nshared,sizeof(double)*4); nshared = MAX(nshared,sizeof(double)*4); // execute plan int offset_s = nshared*OP_WARPSIZE; nshared = MAX(nshared*nthread,reduct_size*nthread); hipLaunchKernelGGL(( op_cuda_update), dim3(nblocks),dim3(nthread),nshared, 0, (double *) arg0.data_d, (double *) arg1.data_d, (double *) arg2.data_d, (double *) arg3.data_d, (double *) arg4.data_d, offset_s, set->size ); cutilSafeCall(hipDeviceSynchronize()); cutilCheckMsg("op_cuda_update execution failed\n"); // transfer global reduction data back to CPU mvReductArraysToHost(reduct_bytes); for (int b=0; b<maxblocks; b++) for (int d=0; d<1; d++) arg4h[d] = arg4h[d] + ((double *)arg4.data)[d+b*1]; arg4.data = (char *)arg4h; op_mpi_reduce(&arg4,arg4h); } op_mpi_set_dirtybit_cuda(nargs, args); // update kernel record op_timers_core(&cpu_t2, &wall_t2); OP_kernels[4].time += wall_t2 - wall_t1; OP_kernels[4].transfer += (float)set->size * arg0.size; OP_kernels[4].transfer += (float)set->size * arg1.size; OP_kernels[4].transfer += (float)set->size * arg2.size * 2.0f; OP_kernels[4].transfer += (float)set->size * arg3.size; }
7a662e91928b959bb1696e1a25988f0ed8daa0e7.cu
// // auto-generated by op2.m on 29-Oct-2012 09:37:53 // // user function __device__ #include "update.h" // CUDA kernel function __global__ void op_cuda_update( double *arg0, double *arg1, double *arg2, double *arg3, double *arg4, int offset_s, int set_size ) { double arg0_l[4]; double arg1_l[4]; double arg2_l[4]; double arg4_l[1]; for (int d=0; d<1; d++) arg4_l[d]=ZERO_double; int tid = threadIdx.x%OP_WARPSIZE; extern __shared__ char shared[]; char *arg_s = shared + offset_s*(threadIdx.x/OP_WARPSIZE); // process set elements for (int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x) { int offset = n - tid; int nelems = MIN(OP_WARPSIZE,set_size-offset); // copy data into shared memory, then into local for (int m=0; m<4; m++) ((double *)arg_s)[tid+m*nelems] = arg0[tid+m*nelems+offset*4]; for (int m=0; m<4; m++) arg0_l[m] = ((double *)arg_s)[m+tid*4]; for (int m=0; m<4; m++) ((double *)arg_s)[tid+m*nelems] = arg2[tid+m*nelems+offset*4]; for (int m=0; m<4; m++) arg2_l[m] = ((double *)arg_s)[m+tid*4]; // user-supplied kernel call update( arg0_l, arg1_l, arg2_l, arg3+n, arg4_l ); // copy back into shared memory, then to device for (int m=0; m<4; m++) ((double *)arg_s)[m+tid*4] = arg1_l[m]; for (int m=0; m<4; m++) arg1[tid+m*nelems+offset*4] = ((double *)arg_s)[tid+m*nelems]; for (int m=0; m<4; m++) ((double *)arg_s)[m+tid*4] = arg2_l[m]; for (int m=0; m<4; m++) arg2[tid+m*nelems+offset*4] = ((double *)arg_s)[tid+m*nelems]; } // global reductions for(int d=0; d<1; d++) op_reduction<OP_INC>(&arg4[d+blockIdx.x*1],arg4_l[d]); } // host stub function void op_par_loop_update(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2, op_arg arg3, op_arg arg4 ){ double *arg4h = (double *)arg4.data; int nargs = 5; op_arg args[5]; args[0] = arg0; args[1] = arg1; args[2] = arg2; args[3] = arg3; args[4] = arg4; if (OP_diags>2) { printf(" kernel routine w/o indirection: update\n"); } op_mpi_halo_exchanges_cuda(set, nargs, args); // initialise timers double cpu_t1, cpu_t2, wall_t1=0, wall_t2=0; op_timing_realloc(4); OP_kernels[4].name = name; OP_kernels[4].count += 1; if (set->size >0) { op_timers_core(&cpu_t1, &wall_t1); // set CUDA execution parameters #ifdef OP_BLOCK_SIZE_4 int nthread = OP_BLOCK_SIZE_4; #else // int nthread = OP_block_size; int nthread = 128; #endif int nblocks = 200; // transfer global reduction data to GPU int maxblocks = nblocks; int reduct_bytes = 0; int reduct_size = 0; reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double)); reduct_size = MAX(reduct_size,sizeof(double)); reallocReductArrays(reduct_bytes); reduct_bytes = 0; arg4.data = OP_reduct_h + reduct_bytes; arg4.data_d = OP_reduct_d + reduct_bytes; for (int b=0; b<maxblocks; b++) for (int d=0; d<1; d++) ((double *)arg4.data)[d+b*1] = ZERO_double; reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double)); mvReductArraysToDevice(reduct_bytes); // work out shared memory requirements per element int nshared = 0; nshared = MAX(nshared,sizeof(double)*4); nshared = MAX(nshared,sizeof(double)*4); nshared = MAX(nshared,sizeof(double)*4); // execute plan int offset_s = nshared*OP_WARPSIZE; nshared = MAX(nshared*nthread,reduct_size*nthread); op_cuda_update<<<nblocks,nthread,nshared>>>( (double *) arg0.data_d, (double *) arg1.data_d, (double *) arg2.data_d, (double *) arg3.data_d, (double *) arg4.data_d, offset_s, set->size ); cutilSafeCall(cudaDeviceSynchronize()); cutilCheckMsg("op_cuda_update execution failed\n"); // transfer global reduction data back to CPU mvReductArraysToHost(reduct_bytes); for (int b=0; b<maxblocks; b++) for (int d=0; d<1; d++) arg4h[d] = arg4h[d] + ((double *)arg4.data)[d+b*1]; arg4.data = (char *)arg4h; op_mpi_reduce(&arg4,arg4h); } op_mpi_set_dirtybit_cuda(nargs, args); // update kernel record op_timers_core(&cpu_t2, &wall_t2); OP_kernels[4].time += wall_t2 - wall_t1; OP_kernels[4].transfer += (float)set->size * arg0.size; OP_kernels[4].transfer += (float)set->size * arg1.size; OP_kernels[4].transfer += (float)set->size * arg2.size * 2.0f; OP_kernels[4].transfer += (float)set->size * arg3.size; }
3a376e30b7a0d1078706204f9a76d8c81895e268.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************************************** * Copyright (c) 2020, Vijay Thakkar ([email protected]). **************************************************************************************************/ ////////////////////////////////////////////////////////////////////// // THIS BENCHMARK FILE IS GENERATED AUTOMATICALLY : DO NOT MODIFY // ////////////////////////////////////////////////////////////////////// #include "benchmark/benchmark.h" #include "cuasr/gemm/device/default_srgemm_configuration.h" #include "cuasr/gemm/device/srgemm.h" #include "cuasr/functional.h" #include "harness.h" //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 8 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_8x32x8_8x32x1_2x4_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_8x32x8_8x32x1_2x4_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 16 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_16x32x8_16x32x1_4x4_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_16x32x8_16x32x1_4x4_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 16 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_16x64x8_16x64x1_4x8_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_16x64x8_16x64x1_4x8_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x32x8_32x32x1_8x4_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x32x8_32x32x1_8x4_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x64x8_32x64x1_8x8_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x64x8_32x64x1_8x8_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 1 x 1 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x32x8_64x32x1_8x8_8x4_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x32x8_64x32x1_8x8_8x4_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 8 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_8x32x8_8x16x1_2x2_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_8x32x8_8x16x1_2x2_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 8 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_8x64x8_8x32x1_2x4_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_8x64x8_8x32x1_2x4_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 16 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_16x32x8_16x16x1_4x2_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_16x32x8_16x16x1_4x2_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 16 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_16x64x8_16x32x1_4x4_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_16x64x8_16x32x1_4x4_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 16 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_16x128x8_16x64x1_4x8_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_16x128x8_16x64x1_4x8_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 1 x 2 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x32x8_32x16x1_4x4_8x4_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x32x8_32x16x1_4x4_8x4_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x64x8_32x32x1_8x4_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x64x8_32x32x1_8x4_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 32 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x128x8_32x64x1_8x8_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x128x8_32x64x1_8x8_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 1 x 2 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x64x8_64x32x1_8x8_8x4_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x64x8_64x32x1_8x8_8x4_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 1 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x32x8_16x32x1_4x4_4x8_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x32x8_16x32x1_4x4_4x8_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 1 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x32x8_32x32x1_8x4_4x8_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x32x8_32x32x1_8x4_4x8_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 1 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x64x8_32x64x1_8x8_4x8_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x64x8_32x64x1_8x8_4x8_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 1 // Threadblock: 128 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_128x32x8_64x32x1_8x8_8x4_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_128x32x8_64x32x1_8x8_8x4_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 16 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_16x32x8_8x16x1_2x2_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_16x32x8_8x16x1_2x2_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 16 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_16x64x8_8x32x1_2x4_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_16x64x8_8x32x1_2x4_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x32x8_16x16x1_4x2_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x32x8_16x16x1_4x2_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x64x8_16x32x1_4x4_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x64x8_16x32x1_4x4_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 32 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x128x8_16x64x1_4x8_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x128x8_16x64x1_4x8_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 2 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x32x8_32x16x1_4x4_8x4_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x32x8_32x16x1_4x4_8x4_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x64x8_32x32x1_8x4_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x64x8_32x32x1_8x4_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 64 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x128x8_32x64x1_8x8_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x128x8_32x64x1_8x8_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 2 // Threadblock: 128 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_128x32x8_64x16x1_8x4_8x4_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_128x32x8_64x16x1_8x4_8x4_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 2 // Threadblock: 128 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_128x64x8_64x32x1_8x8_8x4_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_128x64x8_64x32x1_8x8_8x4_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 16 x 64 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_16x64x16_8x16x1_2x2_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_16x64x16_8x16x1_2x2_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 16 x 128 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_16x128x16_8x32x1_2x4_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_16x128x16_8x32x1_2x4_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 4 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x32x8_16x8x1_2x2_8x4_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x32x8_16x8x1_2x2_8x4_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x64x8_16x16x1_4x2_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x64x8_16x16x1_4x2_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 32 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x128x8_16x32x1_4x4_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x128x8_16x32x1_4x4_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 32 x 256 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x256x8_16x64x1_4x8_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 256, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x256x8_16x64x1_4x8_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 4 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x64x8_32x16x1_4x4_8x4_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x64x8_32x16x1_4x4_8x4_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 64 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x128x8_32x32x1_8x4_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x128x8_32x32x1_8x4_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 64 x 256 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x256x8_32x64x1_8x8_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x256x8_32x64x1_8x8_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 4 // Threadblock: 128 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_128x128x8_64x32x1_8x8_8x4_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_128x128x8_64x32x1_8x8_8x4_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x32x8_8x16x1_2x2_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x32x8_8x16x1_2x2_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x32x8_16x16x1_4x2_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x32x8_16x16x1_4x2_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x64x8_16x32x1_4x4_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x64x8_16x32x1_4x4_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 2 // Threadblock: 128 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_128x32x8_32x16x1_4x4_8x4_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_128x32x8_32x16x1_4x4_8x4_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 128 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_128x64x8_32x32x1_8x4_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_128x64x8_32x32x1_8x4_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 128 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_128x128x8_32x64x1_8x8_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_128x128x8_32x64x1_8x8_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 2 // Threadblock: 256 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_256x32x8_64x16x1_8x4_8x4_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<256, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_256x32x8_64x16x1_8x4_8x4_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 2 // Threadblock: 256 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_256x64x8_64x32x1_8x8_8x4_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_256x64x8_64x32x1_8x8_8x4_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 32 x 64 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x64x16_8x16x1_2x2_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x64x16_8x16x1_2x2_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 32 x 128 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x128x16_8x32x1_2x4_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x128x16_8x32x1_2x4_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 64 x 32 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x32x16_16x8x1_2x2_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>; using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x32x16_16x8x1_2x2_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x64x8_16x16x1_4x2_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x64x8_16x16x1_4x2_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 64 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x128x8_16x32x1_4x4_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x128x8_16x32x1_4x4_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 64 x 256 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x256x8_16x64x1_4x8_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x256x8_16x64x1_4x8_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 128 x 32 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_128x32x16_32x8x1_4x2_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 16>; using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_128x32x16_32x8x1_4x2_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 128 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_128x64x8_32x16x1_4x4_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_128x64x8_32x16x1_4x4_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 128 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_128x128x8_32x32x1_8x4_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_128x128x8_32x32x1_8x4_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 256 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_256x64x8_64x16x1_8x4_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_256x64x8_64x16x1_8x4_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif
3a376e30b7a0d1078706204f9a76d8c81895e268.cu
/*************************************************************************************************** * Copyright (c) 2020, Vijay Thakkar ([email protected]). **************************************************************************************************/ ////////////////////////////////////////////////////////////////////// // THIS BENCHMARK FILE IS GENERATED AUTOMATICALLY : DO NOT MODIFY // ////////////////////////////////////////////////////////////////////// #include "benchmark/benchmark.h" #include "cuasr/gemm/device/default_srgemm_configuration.h" #include "cuasr/gemm/device/srgemm.h" #include "cuasr/functional.h" #include "harness.h" //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 8 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_8x32x8_8x32x1_2x4_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_8x32x8_8x32x1_2x4_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 16 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_16x32x8_16x32x1_4x4_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_16x32x8_16x32x1_4x4_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 16 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_16x64x8_16x64x1_4x8_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_16x64x8_16x64x1_4x8_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x32x8_32x32x1_8x4_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x32x8_32x32x1_8x4_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x64x8_32x64x1_8x8_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x64x8_32x64x1_8x8_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 1 x 1 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x32x8_64x32x1_8x8_8x4_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x32x8_64x32x1_8x8_8x4_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 8 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_8x32x8_8x16x1_2x2_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_8x32x8_8x16x1_2x2_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 8 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_8x64x8_8x32x1_2x4_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_8x64x8_8x32x1_2x4_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 16 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_16x32x8_16x16x1_4x2_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_16x32x8_16x16x1_4x2_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 16 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_16x64x8_16x32x1_4x4_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_16x64x8_16x32x1_4x4_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 16 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_16x128x8_16x64x1_4x8_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_16x128x8_16x64x1_4x8_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 1 x 2 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x32x8_32x16x1_4x4_8x4_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x32x8_32x16x1_4x4_8x4_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x64x8_32x32x1_8x4_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x64x8_32x32x1_8x4_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 32 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x128x8_32x64x1_8x8_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x128x8_32x64x1_8x8_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 1 x 2 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x64x8_64x32x1_8x8_8x4_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x64x8_64x32x1_8x8_8x4_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 1 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x32x8_16x32x1_4x4_4x8_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x32x8_16x32x1_4x4_4x8_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 1 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x32x8_32x32x1_8x4_4x8_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x32x8_32x32x1_8x4_4x8_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 1 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x64x8_32x64x1_8x8_4x8_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x64x8_32x64x1_8x8_4x8_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 1 // Threadblock: 128 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_128x32x8_64x32x1_8x8_8x4_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_128x32x8_64x32x1_8x8_8x4_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 16 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_16x32x8_8x16x1_2x2_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_16x32x8_8x16x1_2x2_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 16 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_16x64x8_8x32x1_2x4_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_16x64x8_8x32x1_2x4_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x32x8_16x16x1_4x2_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x32x8_16x16x1_4x2_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x64x8_16x32x1_4x4_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x64x8_16x32x1_4x4_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 32 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x128x8_16x64x1_4x8_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x128x8_16x64x1_4x8_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 2 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x32x8_32x16x1_4x4_8x4_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x32x8_32x16x1_4x4_8x4_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x64x8_32x32x1_8x4_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x64x8_32x32x1_8x4_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 64 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x128x8_32x64x1_8x8_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x128x8_32x64x1_8x8_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 2 // Threadblock: 128 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_128x32x8_64x16x1_8x4_8x4_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_128x32x8_64x16x1_8x4_8x4_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 2 // Threadblock: 128 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_128x64x8_64x32x1_8x8_8x4_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_128x64x8_64x32x1_8x8_8x4_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 16 x 64 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_16x64x16_8x16x1_2x2_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_16x64x16_8x16x1_2x2_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 16 x 128 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_16x128x16_8x32x1_2x4_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_16x128x16_8x32x1_2x4_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 4 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x32x8_16x8x1_2x2_8x4_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x32x8_16x8x1_2x2_8x4_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x64x8_16x16x1_4x2_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x64x8_16x16x1_4x2_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 32 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x128x8_16x32x1_4x4_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x128x8_16x32x1_4x4_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 32 x 256 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x256x8_16x64x1_4x8_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 256, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x256x8_16x64x1_4x8_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 4 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x64x8_32x16x1_4x4_8x4_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x64x8_32x16x1_4x4_8x4_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 64 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x128x8_32x32x1_8x4_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x128x8_32x32x1_8x4_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 64 x 256 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x256x8_32x64x1_8x8_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x256x8_32x64x1_8x8_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 4 // Threadblock: 128 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_128x128x8_64x32x1_8x8_8x4_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_128x128x8_64x32x1_8x8_8x4_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x32x8_8x16x1_2x2_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x32x8_8x16x1_2x2_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x32x8_16x16x1_4x2_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x32x8_16x16x1_4x2_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x64x8_16x32x1_4x4_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x64x8_16x32x1_4x4_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 2 // Threadblock: 128 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_128x32x8_32x16x1_4x4_8x4_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_128x32x8_32x16x1_4x4_8x4_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 128 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_128x64x8_32x32x1_8x4_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_128x64x8_32x32x1_8x4_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 128 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_128x128x8_32x64x1_8x8_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_128x128x8_32x64x1_8x8_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 2 // Threadblock: 256 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_256x32x8_64x16x1_8x4_8x4_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<256, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_256x32x8_64x16x1_8x4_8x4_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 2 // Threadblock: 256 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_256x64x8_64x32x1_8x8_8x4_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_256x64x8_64x32x1_8x8_8x4_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 32 x 64 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x64x16_8x16x1_2x2_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x64x16_8x16x1_2x2_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 32 x 128 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x128x16_8x32x1_2x4_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_32x128x16_8x32x1_2x4_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 64 x 32 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x32x16_16x8x1_2x2_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>; using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x32x16_16x8x1_2x2_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x64x8_16x16x1_4x2_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x64x8_16x16x1_4x2_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 64 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x128x8_16x32x1_4x4_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x128x8_16x32x1_4x4_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 64 x 256 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x256x8_16x64x1_4x8_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_64x256x8_16x64x1_4x8_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 128 x 32 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_128x32x16_32x8x1_4x2_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 16>; using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_128x32x16_32x8x1_4x2_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 128 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_128x64x8_32x16x1_4x4_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_128x64x8_32x16x1_4x4_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 128 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_128x128x8_32x32x1_8x4_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_128x128x8_32x32x1_8x4_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 256 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_256x64x8_64x16x1_8x4_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_multiplies_ssrgemm_tt_n_256x64x8_64x16x1_8x4_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif
shmem_kernels.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * shmem_kernels.cu: This file is part of the gpumembench suite. * * Contact: Elias Konstantinidis <[email protected]> **/ #include <chrono> // timing #include <stdio.h> using namespace std::chrono; #define TOTAL_ITERATIONS (1024) #define BLOCK_SIZE 256 // shared memory swap operation __device__ void shmem_swap(float4 *v1, float4 *v2){ float4 tmp; tmp = *v2; *v2 = *v1; *v1 = tmp; } __device__ float4 init_val(int i){ return make_float4(i, i+11, i+19, i+23); } __device__ float4 reduce_vector(float4 v1, float4 v2, float4 v3, float4 v4, float4 v5, float4 v6){ return make_float4(v1.x + v2.x + v3.x + v4.x + v5.x + v6.x, v1.y + v2.y + v3.y + v4.y + v5.y + v6.y, v1.z + v2.z + v3.z + v4.z + v5.z + v6.z, v1.w + v2.w + v3.w + v4.w + v5.w + v6.w); } __device__ void set_vector(float4 *target, int offset, float4 v){ target[offset].x = v.x; target[offset].y = v.y; target[offset].z = v.z; target[offset].w = v.w; } __global__ void benchmark_shmem(float4 *g_data){ __shared__ float4 shm_buffer[BLOCK_SIZE*6]; int tid = threadIdx.x; int globaltid = blockIdx.x*blockDim.x + tid; set_vector(shm_buffer, tid+0*blockDim.x, init_val(tid)); set_vector(shm_buffer, tid+1*blockDim.x, init_val(tid+1)); set_vector(shm_buffer, tid+2*blockDim.x, init_val(tid+3)); set_vector(shm_buffer, tid+3*blockDim.x, init_val(tid+7)); set_vector(shm_buffer, tid+4*blockDim.x, init_val(tid+13)); set_vector(shm_buffer, tid+5*blockDim.x, init_val(tid+17)); __syncthreads(); // __threadfence_block() is faster though #pragma unroll 32 for(int j=0; j<TOTAL_ITERATIONS; j++){ shmem_swap(shm_buffer+tid+0*blockDim.x, shm_buffer+tid+1*blockDim.x); shmem_swap(shm_buffer+tid+2*blockDim.x, shm_buffer+tid+3*blockDim.x); shmem_swap(shm_buffer+tid+4*blockDim.x, shm_buffer+tid+5*blockDim.x); __syncthreads(); shmem_swap(shm_buffer+tid+1*blockDim.x, shm_buffer+tid+2*blockDim.x); shmem_swap(shm_buffer+tid+3*blockDim.x, shm_buffer+tid+4*blockDim.x); __syncthreads(); } g_data[globaltid] = reduce_vector(shm_buffer[tid+0*blockDim.x], shm_buffer[tid+1*blockDim.x], shm_buffer[tid+2*blockDim.x], shm_buffer[tid+3*blockDim.x], shm_buffer[tid+4*blockDim.x], shm_buffer[tid+5*blockDim.x]); } void shmembenchGPU(double *c, const long size, const int repeat) { const int TOTAL_BLOCKS = size/(BLOCK_SIZE); double *cd; hipMalloc((void**)&cd, size*sizeof(double)); dim3 dimBlock(BLOCK_SIZE, 1, 1); dim3 dimGrid_f4(TOTAL_BLOCKS/4, 1, 1); auto start = high_resolution_clock::now(); for (int i = 0; i < repeat; i++) hipLaunchKernelGGL(( benchmark_shmem), dim3(dimGrid_f4), dim3(dimBlock) , 0, 0, (float4*)cd); hipDeviceSynchronize(); auto end = high_resolution_clock::now(); auto time_shmem_128b = duration_cast<nanoseconds>(end - start).count() / (double)repeat; printf("Average kernel execution time : %f (ms)\n", time_shmem_128b * 1e-6); // Copy results back to host memory hipMemcpy(c, cd, size*sizeof(double), hipMemcpyDeviceToHost); hipFree(cd); // simple checksum double sum = 0; for (long i = 0; i < size; i++) sum += c[i]; if (sum != 21256458760384741137729978368.00) printf("checksum failed\n"); printf("Memory throughput\n"); const long long operations_bytes = (6LL+4*5*TOTAL_ITERATIONS+6)*size*sizeof(float); const long long operations_128bit = (6LL+4*5*TOTAL_ITERATIONS+6)*size/4; printf("\tusing 128bit operations : %8.2f GB/sec (%6.2f billion accesses/sec)\n", (double)operations_bytes / time_shmem_128b, (double)operations_128bit / time_shmem_128b); }
shmem_kernels.cu
/** * shmem_kernels.cu: This file is part of the gpumembench suite. * * Contact: Elias Konstantinidis <[email protected]> **/ #include <chrono> // timing #include <stdio.h> using namespace std::chrono; #define TOTAL_ITERATIONS (1024) #define BLOCK_SIZE 256 // shared memory swap operation __device__ void shmem_swap(float4 *v1, float4 *v2){ float4 tmp; tmp = *v2; *v2 = *v1; *v1 = tmp; } __device__ float4 init_val(int i){ return make_float4(i, i+11, i+19, i+23); } __device__ float4 reduce_vector(float4 v1, float4 v2, float4 v3, float4 v4, float4 v5, float4 v6){ return make_float4(v1.x + v2.x + v3.x + v4.x + v5.x + v6.x, v1.y + v2.y + v3.y + v4.y + v5.y + v6.y, v1.z + v2.z + v3.z + v4.z + v5.z + v6.z, v1.w + v2.w + v3.w + v4.w + v5.w + v6.w); } __device__ void set_vector(float4 *target, int offset, float4 v){ target[offset].x = v.x; target[offset].y = v.y; target[offset].z = v.z; target[offset].w = v.w; } __global__ void benchmark_shmem(float4 *g_data){ __shared__ float4 shm_buffer[BLOCK_SIZE*6]; int tid = threadIdx.x; int globaltid = blockIdx.x*blockDim.x + tid; set_vector(shm_buffer, tid+0*blockDim.x, init_val(tid)); set_vector(shm_buffer, tid+1*blockDim.x, init_val(tid+1)); set_vector(shm_buffer, tid+2*blockDim.x, init_val(tid+3)); set_vector(shm_buffer, tid+3*blockDim.x, init_val(tid+7)); set_vector(shm_buffer, tid+4*blockDim.x, init_val(tid+13)); set_vector(shm_buffer, tid+5*blockDim.x, init_val(tid+17)); __syncthreads(); // __threadfence_block() is faster though #pragma unroll 32 for(int j=0; j<TOTAL_ITERATIONS; j++){ shmem_swap(shm_buffer+tid+0*blockDim.x, shm_buffer+tid+1*blockDim.x); shmem_swap(shm_buffer+tid+2*blockDim.x, shm_buffer+tid+3*blockDim.x); shmem_swap(shm_buffer+tid+4*blockDim.x, shm_buffer+tid+5*blockDim.x); __syncthreads(); shmem_swap(shm_buffer+tid+1*blockDim.x, shm_buffer+tid+2*blockDim.x); shmem_swap(shm_buffer+tid+3*blockDim.x, shm_buffer+tid+4*blockDim.x); __syncthreads(); } g_data[globaltid] = reduce_vector(shm_buffer[tid+0*blockDim.x], shm_buffer[tid+1*blockDim.x], shm_buffer[tid+2*blockDim.x], shm_buffer[tid+3*blockDim.x], shm_buffer[tid+4*blockDim.x], shm_buffer[tid+5*blockDim.x]); } void shmembenchGPU(double *c, const long size, const int repeat) { const int TOTAL_BLOCKS = size/(BLOCK_SIZE); double *cd; cudaMalloc((void**)&cd, size*sizeof(double)); dim3 dimBlock(BLOCK_SIZE, 1, 1); dim3 dimGrid_f4(TOTAL_BLOCKS/4, 1, 1); auto start = high_resolution_clock::now(); for (int i = 0; i < repeat; i++) benchmark_shmem<<< dimGrid_f4, dimBlock >>>((float4*)cd); cudaDeviceSynchronize(); auto end = high_resolution_clock::now(); auto time_shmem_128b = duration_cast<nanoseconds>(end - start).count() / (double)repeat; printf("Average kernel execution time : %f (ms)\n", time_shmem_128b * 1e-6); // Copy results back to host memory cudaMemcpy(c, cd, size*sizeof(double), cudaMemcpyDeviceToHost); cudaFree(cd); // simple checksum double sum = 0; for (long i = 0; i < size; i++) sum += c[i]; if (sum != 21256458760384741137729978368.00) printf("checksum failed\n"); printf("Memory throughput\n"); const long long operations_bytes = (6LL+4*5*TOTAL_ITERATIONS+6)*size*sizeof(float); const long long operations_128bit = (6LL+4*5*TOTAL_ITERATIONS+6)*size/4; printf("\tusing 128bit operations : %8.2f GB/sec (%6.2f billion accesses/sec)\n", (double)operations_bytes / time_shmem_128b, (double)operations_128bit / time_shmem_128b); }
fc691a506f6e8ef9863e6ceb703473a7ab6399f6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or bpied warranties, including, but not limited to, the bpied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #include "internal_shared.hpp" #include "opencv2/gpu/device/limits.hpp" #include "opencv2/gpu/device/vec_distance.hpp" #include "opencv2/gpu/device/datamov_utils.hpp" namespace cv { namespace gpu { namespace device { namespace bf_knnmatch { /////////////////////////////////////////////////////////////////////////////// // Reduction template <int BLOCK_SIZE> __device__ void findBestMatch(float& bestDistance1, float& bestDistance2, int& bestTrainIdx1, int& bestTrainIdx2, float* s_distance, int* s_trainIdx) { float myBestDistance1 = numeric_limits<float>::max(); float myBestDistance2 = numeric_limits<float>::max(); int myBestTrainIdx1 = -1; int myBestTrainIdx2 = -1; s_distance += threadIdx.y * BLOCK_SIZE; s_trainIdx += threadIdx.y * BLOCK_SIZE; s_distance[threadIdx.x] = bestDistance1; s_trainIdx[threadIdx.x] = bestTrainIdx1; __syncthreads(); if (threadIdx.x == 0) { #pragma unroll for (int i = 0; i < BLOCK_SIZE; ++i) { float val = s_distance[i]; if (val < myBestDistance1) { myBestDistance2 = myBestDistance1; myBestTrainIdx2 = myBestTrainIdx1; myBestDistance1 = val; myBestTrainIdx1 = s_trainIdx[i]; } else if (val < myBestDistance2) { myBestDistance2 = val; myBestTrainIdx2 = s_trainIdx[i]; } } } __syncthreads(); s_distance[threadIdx.x] = bestDistance2; s_trainIdx[threadIdx.x] = bestTrainIdx2; __syncthreads(); if (threadIdx.x == 0) { #pragma unroll for (int i = 0; i < BLOCK_SIZE; ++i) { float val = s_distance[i]; if (val < myBestDistance2) { myBestDistance2 = val; myBestTrainIdx2 = s_trainIdx[i]; } } } bestDistance1 = myBestDistance1; bestDistance2 = myBestDistance2; bestTrainIdx1 = myBestTrainIdx1; bestTrainIdx2 = myBestTrainIdx2; } template <int BLOCK_SIZE> __device__ void findBestMatch(float& bestDistance1, float& bestDistance2, int& bestTrainIdx1, int& bestTrainIdx2, int& bestImgIdx1, int& bestImgIdx2, float* s_distance, int* s_trainIdx, int* s_imgIdx) { float myBestDistance1 = numeric_limits<float>::max(); float myBestDistance2 = numeric_limits<float>::max(); int myBestTrainIdx1 = -1; int myBestTrainIdx2 = -1; int myBestImgIdx1 = -1; int myBestImgIdx2 = -1; s_distance += threadIdx.y * BLOCK_SIZE; s_trainIdx += threadIdx.y * BLOCK_SIZE; s_imgIdx += threadIdx.y * BLOCK_SIZE; s_distance[threadIdx.x] = bestDistance1; s_trainIdx[threadIdx.x] = bestTrainIdx1; s_imgIdx[threadIdx.x] = bestImgIdx1; __syncthreads(); if (threadIdx.x == 0) { #pragma unroll for (int i = 0; i < BLOCK_SIZE; ++i) { float val = s_distance[i]; if (val < myBestDistance1) { myBestDistance2 = myBestDistance1; myBestTrainIdx2 = myBestTrainIdx1; myBestImgIdx2 = myBestImgIdx1; myBestDistance1 = val; myBestTrainIdx1 = s_trainIdx[i]; myBestImgIdx1 = s_imgIdx[i]; } else if (val < myBestDistance2) { myBestDistance2 = val; myBestTrainIdx2 = s_trainIdx[i]; myBestImgIdx2 = s_imgIdx[i]; } } } __syncthreads(); s_distance[threadIdx.x] = bestDistance2; s_trainIdx[threadIdx.x] = bestTrainIdx2; s_imgIdx[threadIdx.x] = bestImgIdx2; __syncthreads(); if (threadIdx.x == 0) { #pragma unroll for (int i = 0; i < BLOCK_SIZE; ++i) { float val = s_distance[i]; if (val < myBestDistance2) { myBestDistance2 = val; myBestTrainIdx2 = s_trainIdx[i]; myBestImgIdx2 = s_imgIdx[i]; } } } bestDistance1 = myBestDistance1; bestDistance2 = myBestDistance2; bestTrainIdx1 = myBestTrainIdx1; bestTrainIdx2 = myBestTrainIdx2; bestImgIdx1 = myBestImgIdx1; bestImgIdx2 = myBestImgIdx2; } /////////////////////////////////////////////////////////////////////////////// // Match Unrolled Cached template <int BLOCK_SIZE, int MAX_DESC_LEN, typename T, typename U> __device__ void loadQueryToSmem(int queryIdx, const DevMem2D_<T>& query, U* s_query) { #pragma unroll for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i) { const int loadX = threadIdx.x + i * BLOCK_SIZE; s_query[threadIdx.y * MAX_DESC_LEN + loadX] = loadX < query.cols ? query.ptr(::min(queryIdx, query.rows - 1))[loadX] : 0; } } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> __device__ void loopUnrolledCached(int queryIdx, const DevMem2D_<T>& query, int imgIdx, const DevMem2D_<T>& train, const Mask& mask, typename Dist::value_type* s_query, typename Dist::value_type* s_train, float& bestDistance1, float& bestDistance2, int& bestTrainIdx1, int& bestTrainIdx2, int& bestImgIdx1, int& bestImgIdx2) { for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t) { Dist dist; #pragma unroll for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i) { const int loadX = threadIdx.x + i * BLOCK_SIZE; s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0; if (loadX < train.cols) { T val; ForceGlob<T>::Load(train.ptr(::min(t * BLOCK_SIZE + threadIdx.y, train.rows - 1)), loadX, val); s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = val; } __syncthreads(); #pragma unroll for (int j = 0; j < BLOCK_SIZE; ++j) dist.reduceIter(s_query[threadIdx.y * MAX_DESC_LEN + i * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]); __syncthreads(); } typename Dist::result_type distVal = dist; const int trainIdx = t * BLOCK_SIZE + threadIdx.x; if (queryIdx < query.rows && trainIdx < train.rows && mask(queryIdx, trainIdx)) { if (distVal < bestDistance1) { bestImgIdx2 = bestImgIdx1; bestDistance2 = bestDistance1; bestTrainIdx2 = bestTrainIdx1; bestImgIdx1 = imgIdx; bestDistance1 = distVal; bestTrainIdx1 = trainIdx; } else if (distVal < bestDistance2) { bestImgIdx2 = imgIdx; bestDistance2 = distVal; bestTrainIdx2 = trainIdx; } } } } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> __global__ void matchUnrolledCached(const DevMem2D_<T> query, const DevMem2D_<T> train, const Mask mask, int2* bestTrainIdx, float2* bestDistance) { extern __shared__ int smem[]; const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y; typename Dist::value_type* s_query = (typename Dist::value_type*)(smem); typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * MAX_DESC_LEN); loadQueryToSmem<BLOCK_SIZE, MAX_DESC_LEN>(queryIdx, query, s_query); float myBestDistance1 = numeric_limits<float>::max(); float myBestDistance2 = numeric_limits<float>::max(); int myBestTrainIdx1 = -1; int myBestTrainIdx2 = -1; loopUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, 0, train, mask, s_query, s_train, myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestTrainIdx1, myBestTrainIdx2); __syncthreads(); float* s_distance = (float*)(smem); int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE); findBestMatch<BLOCK_SIZE>(myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, s_distance, s_trainIdx); if (queryIdx < query.rows && threadIdx.x == 0) { bestTrainIdx[queryIdx] = make_int2(myBestTrainIdx1, myBestTrainIdx2); bestDistance[queryIdx] = make_float2(myBestDistance1, myBestDistance2); } } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> void matchUnrolledCached(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask, const DevMem2D_<int2>& trainIdx, const DevMem2D_<float2>& distance, hipStream_t stream) { const dim3 block(BLOCK_SIZE, BLOCK_SIZE); const dim3 grid(divUp(query.rows, BLOCK_SIZE)); const size_t smemSize = (BLOCK_SIZE * (MAX_DESC_LEN >= BLOCK_SIZE ? MAX_DESC_LEN : BLOCK_SIZE) + BLOCK_SIZE * BLOCK_SIZE) * sizeof(int); hipLaunchKernelGGL(( matchUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist>), dim3(grid), dim3(block), smemSize, stream, query, train, mask, trainIdx.data, distance.data); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> __global__ void matchUnrolledCached(const DevMem2D_<T> query, const DevMem2D_<T>* trains, int n, const Mask mask, int2* bestTrainIdx, int2* bestImgIdx, float2* bestDistance) { extern __shared__ int smem[]; const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y; typename Dist::value_type* s_query = (typename Dist::value_type*)(smem); typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * MAX_DESC_LEN); loadQueryToSmem<BLOCK_SIZE, MAX_DESC_LEN>(queryIdx, query, s_query); float myBestDistance1 = numeric_limits<float>::max(); float myBestDistance2 = numeric_limits<float>::max(); int myBestTrainIdx1 = -1; int myBestTrainIdx2 = -1; int myBestImgIdx1 = -1; int myBestImgIdx2 = -1; Mask m = mask; for (int imgIdx = 0; imgIdx < n; ++imgIdx) { const DevMem2D_<T> train = trains[imgIdx]; m.next(); loopUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, imgIdx, train, m, s_query, s_train, myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestImgIdx1, myBestImgIdx2); } __syncthreads(); float* s_distance = (float*)(smem); int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE); int* s_imgIdx = (int*)(smem + 2 * BLOCK_SIZE * BLOCK_SIZE); findBestMatch<BLOCK_SIZE>(myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestImgIdx1, myBestImgIdx2, s_distance, s_trainIdx, s_imgIdx); if (queryIdx < query.rows && threadIdx.x == 0) { bestTrainIdx[queryIdx] = make_int2(myBestTrainIdx1, myBestTrainIdx2); bestImgIdx[queryIdx] = make_int2(myBestImgIdx1, myBestImgIdx2); bestDistance[queryIdx] = make_float2(myBestDistance1, myBestDistance2); } } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> void matchUnrolledCached(const DevMem2D_<T>& query, const DevMem2D_<T>* trains, int n, const Mask& mask, const DevMem2D_<int2>& trainIdx, const DevMem2D_<int2>& imgIdx, const DevMem2D_<float2>& distance, hipStream_t stream) { const dim3 block(BLOCK_SIZE, BLOCK_SIZE); const dim3 grid(divUp(query.rows, BLOCK_SIZE)); const size_t smemSize = (BLOCK_SIZE * (MAX_DESC_LEN >= 2 * BLOCK_SIZE ? MAX_DESC_LEN : 2 * BLOCK_SIZE) + BLOCK_SIZE * BLOCK_SIZE) * sizeof(int); hipLaunchKernelGGL(( matchUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist>), dim3(grid), dim3(block), smemSize, stream, query, trains, n, mask, trainIdx.data, imgIdx.data, distance.data); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } /////////////////////////////////////////////////////////////////////////////// // Match Unrolled template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> __device__ void loopUnrolled(int queryIdx, const DevMem2D_<T>& query, int imgIdx, const DevMem2D_<T>& train, const Mask& mask, typename Dist::value_type* s_query, typename Dist::value_type* s_train, float& bestDistance1, float& bestDistance2, int& bestTrainIdx1, int& bestTrainIdx2, int& bestImgIdx1, int& bestImgIdx2) { for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t) { Dist dist; #pragma unroll for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i) { const int loadX = threadIdx.x + i * BLOCK_SIZE; s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0; s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0; if (loadX < query.cols) { T val; ForceGlob<T>::Load(query.ptr(::min(queryIdx, query.rows - 1)), loadX, val); s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = val; ForceGlob<T>::Load(train.ptr(::min(t * BLOCK_SIZE + threadIdx.y, train.rows - 1)), loadX, val); s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = val; } __syncthreads(); #pragma unroll for (int j = 0; j < BLOCK_SIZE; ++j) dist.reduceIter(s_query[threadIdx.y * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]); __syncthreads(); } typename Dist::result_type distVal = dist; const int trainIdx = t * BLOCK_SIZE + threadIdx.x; if (queryIdx < query.rows && trainIdx < train.rows && mask(queryIdx, trainIdx)) { if (distVal < bestDistance1) { bestImgIdx2 = bestImgIdx1; bestDistance2 = bestDistance1; bestTrainIdx2 = bestTrainIdx1; bestImgIdx1 = imgIdx; bestDistance1 = distVal; bestTrainIdx1 = trainIdx; } else if (distVal < bestDistance2) { bestImgIdx2 = imgIdx; bestDistance2 = distVal; bestTrainIdx2 = trainIdx; } } } } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> __global__ void matchUnrolled(const DevMem2D_<T> query, const DevMem2D_<T> train, const Mask mask, int2* bestTrainIdx, float2* bestDistance) { extern __shared__ int smem[]; const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y; typename Dist::value_type* s_query = (typename Dist::value_type*)(smem); typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE); float myBestDistance1 = numeric_limits<float>::max(); float myBestDistance2 = numeric_limits<float>::max(); int myBestTrainIdx1 = -1; int myBestTrainIdx2 = -1; loopUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, 0, train, mask, s_query, s_train, myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestTrainIdx1, myBestTrainIdx2); __syncthreads(); float* s_distance = (float*)(smem); int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE); findBestMatch<BLOCK_SIZE>(myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, s_distance, s_trainIdx); if (queryIdx < query.rows && threadIdx.x == 0) { bestTrainIdx[queryIdx] = make_int2(myBestTrainIdx1, myBestTrainIdx2); bestDistance[queryIdx] = make_float2(myBestDistance1, myBestDistance2); } } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> void matchUnrolled(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask, const DevMem2D_<int2>& trainIdx, const DevMem2D_<float2>& distance, hipStream_t stream) { const dim3 block(BLOCK_SIZE, BLOCK_SIZE); const dim3 grid(divUp(query.rows, BLOCK_SIZE)); const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int); hipLaunchKernelGGL(( matchUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist>), dim3(grid), dim3(block), smemSize, stream, query, train, mask, trainIdx.data, distance.data); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> __global__ void matchUnrolled(const DevMem2D_<T> query, const DevMem2D_<T>* trains, int n, const Mask mask, int2* bestTrainIdx, int2* bestImgIdx, float2* bestDistance) { extern __shared__ int smem[]; const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y; typename Dist::value_type* s_query = (typename Dist::value_type*)(smem); typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE); float myBestDistance1 = numeric_limits<float>::max(); float myBestDistance2 = numeric_limits<float>::max(); int myBestTrainIdx1 = -1; int myBestTrainIdx2 = -1; int myBestImgIdx1 = -1; int myBestImgIdx2 = -1; Mask m = mask; for (int imgIdx = 0; imgIdx < n; ++imgIdx) { const DevMem2D_<T> train = trains[imgIdx]; m.next(); loopUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, imgIdx, train, m, s_query, s_train, myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestImgIdx1, myBestImgIdx2); } __syncthreads(); float* s_distance = (float*)(smem); int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE); int* s_imgIdx = (int*)(smem + 2 * BLOCK_SIZE * BLOCK_SIZE); findBestMatch<BLOCK_SIZE>(myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestImgIdx1, myBestImgIdx2, s_distance, s_trainIdx, s_imgIdx); if (queryIdx < query.rows && threadIdx.x == 0) { bestTrainIdx[queryIdx] = make_int2(myBestTrainIdx1, myBestTrainIdx2); bestImgIdx[queryIdx] = make_int2(myBestImgIdx1, myBestImgIdx2); bestDistance[queryIdx] = make_float2(myBestDistance1, myBestDistance2); } } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> void matchUnrolled(const DevMem2D_<T>& query, const DevMem2D_<T>* trains, int n, const Mask& mask, const DevMem2D_<int2>& trainIdx, const DevMem2D_<int2>& imgIdx, const DevMem2D_<float2>& distance, hipStream_t stream) { const dim3 block(BLOCK_SIZE, BLOCK_SIZE); const dim3 grid(divUp(query.rows, BLOCK_SIZE)); const size_t smemSize = (3 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int); hipLaunchKernelGGL(( matchUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist>), dim3(grid), dim3(block), smemSize, stream, query, trains, n, mask, trainIdx.data, imgIdx.data, distance.data); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } /////////////////////////////////////////////////////////////////////////////// // Match template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> __device__ void loop(int queryIdx, const DevMem2D_<T>& query, int imgIdx, const DevMem2D_<T>& train, const Mask& mask, typename Dist::value_type* s_query, typename Dist::value_type* s_train, float& bestDistance1, float& bestDistance2, int& bestTrainIdx1, int& bestTrainIdx2, int& bestImgIdx1, int& bestImgIdx2) { for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t) { Dist dist; for (int i = 0, endi = (query.cols + BLOCK_SIZE - 1) / BLOCK_SIZE; i < endi; ++i) { const int loadX = threadIdx.x + i * BLOCK_SIZE; s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0; s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0; if (loadX < query.cols) { T val; ForceGlob<T>::Load(query.ptr(::min(queryIdx, query.rows - 1)), loadX, val); s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = val; ForceGlob<T>::Load(train.ptr(::min(t * BLOCK_SIZE + threadIdx.y, train.rows - 1)), loadX, val); s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = val; } __syncthreads(); #pragma unroll for (int j = 0; j < BLOCK_SIZE; ++j) dist.reduceIter(s_query[threadIdx.y * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]); __syncthreads(); } typename Dist::result_type distVal = dist; const int trainIdx = t * BLOCK_SIZE + threadIdx.x; if (queryIdx < query.rows && trainIdx < train.rows && mask(queryIdx, trainIdx)) { if (distVal < bestDistance1) { bestImgIdx2 = bestImgIdx1; bestDistance2 = bestDistance1; bestTrainIdx2 = bestTrainIdx1; bestImgIdx1 = imgIdx; bestDistance1 = distVal; bestTrainIdx1 = trainIdx; } else if (distVal < bestDistance2) { bestImgIdx2 = imgIdx; bestDistance2 = distVal; bestTrainIdx2 = trainIdx; } } } } template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> __global__ void match(const DevMem2D_<T> query, const DevMem2D_<T> train, const Mask mask, int2* bestTrainIdx, float2* bestDistance) { extern __shared__ int smem[]; const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y; typename Dist::value_type* s_query = (typename Dist::value_type*)(smem); typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE); float myBestDistance1 = numeric_limits<float>::max(); float myBestDistance2 = numeric_limits<float>::max(); int myBestTrainIdx1 = -1; int myBestTrainIdx2 = -1; loop<BLOCK_SIZE, Dist>(queryIdx, query, 0, train, mask, s_query, s_train, myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestTrainIdx1, myBestTrainIdx2); __syncthreads(); float* s_distance = (float*)(smem); int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE); findBestMatch<BLOCK_SIZE>(myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, s_distance, s_trainIdx); if (queryIdx < query.rows && threadIdx.x == 0) { bestTrainIdx[queryIdx] = make_int2(myBestTrainIdx1, myBestTrainIdx2); bestDistance[queryIdx] = make_float2(myBestDistance1, myBestDistance2); } } template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> void match(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask, const DevMem2D_<int2>& trainIdx, const DevMem2D_<float2>& distance, hipStream_t stream) { const dim3 block(BLOCK_SIZE, BLOCK_SIZE); const dim3 grid(divUp(query.rows, BLOCK_SIZE)); const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int); hipLaunchKernelGGL(( match<BLOCK_SIZE, Dist>), dim3(grid), dim3(block), smemSize, stream, query, train, mask, trainIdx.data, distance.data); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> __global__ void match(const DevMem2D_<T> query, const DevMem2D_<T>* trains, int n, const Mask mask, int2* bestTrainIdx, int2* bestImgIdx, float2* bestDistance) { extern __shared__ int smem[]; const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y; typename Dist::value_type* s_query = (typename Dist::value_type*)(smem); typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE); float myBestDistance1 = numeric_limits<float>::max(); float myBestDistance2 = numeric_limits<float>::max(); int myBestTrainIdx1 = -1; int myBestTrainIdx2 = -1; int myBestImgIdx1 = -1; int myBestImgIdx2 = -1; Mask m = mask; for (int imgIdx = 0; imgIdx < n; ++imgIdx) { const DevMem2D_<T> train = trains[imgIdx]; m.next(); loop<BLOCK_SIZE, Dist>(queryIdx, query, imgIdx, train, m, s_query, s_train, myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestImgIdx1, myBestImgIdx2); } __syncthreads(); float* s_distance = (float*)(smem); int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE); int* s_imgIdx = (int*)(smem + 2 * BLOCK_SIZE * BLOCK_SIZE); findBestMatch<BLOCK_SIZE>(myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestImgIdx1, myBestImgIdx2, s_distance, s_trainIdx, s_imgIdx); if (queryIdx < query.rows && threadIdx.x == 0) { bestTrainIdx[queryIdx] = make_int2(myBestTrainIdx1, myBestTrainIdx2); bestImgIdx[queryIdx] = make_int2(myBestImgIdx1, myBestImgIdx2); bestDistance[queryIdx] = make_float2(myBestDistance1, myBestDistance2); } } template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> void match(const DevMem2D_<T>& query, const DevMem2D_<T>* trains, int n, const Mask& mask, const DevMem2D_<int2>& trainIdx, const DevMem2D_<int2>& imgIdx, const DevMem2D_<float2>& distance, hipStream_t stream) { const dim3 block(BLOCK_SIZE, BLOCK_SIZE); const dim3 grid(divUp(query.rows, BLOCK_SIZE)); const size_t smemSize = (3 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int); hipLaunchKernelGGL(( match<BLOCK_SIZE, Dist>), dim3(grid), dim3(block), smemSize, stream, query, trains, n, mask, trainIdx.data, imgIdx.data, distance.data); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } /////////////////////////////////////////////////////////////////////////////// // knnMatch 2 dispatcher template <typename Dist, typename T, typename Mask> void match2Dispatcher(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask, const DevMem2Db& trainIdx, const DevMem2Db& distance, int cc, hipStream_t stream) { (void)cc; if (query.cols <= 64) { matchUnrolledCached<16, 64, Dist>(query, train, mask, static_cast< DevMem2D_<int2> >(trainIdx), static_cast< DevMem2D_<float2> > (distance), stream); } else if (query.cols <= 128) { matchUnrolledCached<16, 128, Dist>(query, train, mask, static_cast< DevMem2D_<int2> >(trainIdx), static_cast< DevMem2D_<float2> > (distance), stream); } /*else if (query.cols <= 256) { matchUnrolled<16, 256, Dist>(query, train, mask, static_cast< DevMem2D_<int2> >(trainIdx), static_cast< DevMem2D_<float2> > (distance), stream); } else if (query.cols <= 512) { matchUnrolled<16, 512, Dist>(query, train, mask, static_cast< DevMem2D_<int2> >(trainIdx), static_cast< DevMem2D_<float2> > (distance), stream); } else if (query.cols <= 1024) { matchUnrolled<16, 1024, Dist>(query, train, mask, static_cast< DevMem2D_<int2> >(trainIdx), static_cast< DevMem2D_<float2> > (distance), stream); }*/ else { match<16, Dist>(query, train, mask, static_cast< DevMem2D_<int2> >(trainIdx), static_cast< DevMem2D_<float2> > (distance), stream); } } template <typename Dist, typename T, typename Mask> void match2Dispatcher(const DevMem2D_<T>& query, const DevMem2D_<T>* trains, int n, const Mask& mask, const DevMem2Db& trainIdx, const DevMem2Db& imgIdx, const DevMem2Db& distance, int cc, hipStream_t stream) { (void)cc; if (query.cols <= 64) { matchUnrolledCached<16, 64, Dist>(query, trains, n, mask, static_cast< DevMem2D_<int2> >(trainIdx), static_cast< DevMem2D_<int2> >(imgIdx), static_cast< DevMem2D_<float2> > (distance), stream); } else if (query.cols <= 128) { matchUnrolledCached<16, 128, Dist>(query, trains, n, mask, static_cast< DevMem2D_<int2> >(trainIdx), static_cast< DevMem2D_<int2> >(imgIdx), static_cast< DevMem2D_<float2> > (distance), stream); } /*else if (query.cols <= 256) { matchUnrolled<16, 256, Dist>(query, trains, n, mask, static_cast< DevMem2D_<int2> >(trainIdx), static_cast< DevMem2D_<int2> >(imgIdx), static_cast< DevMem2D_<float2> > (distance), stream); } else if (query.cols <= 512) { matchUnrolled<16, 512, Dist>(query, trains, n, mask, static_cast< DevMem2D_<int2> >(trainIdx), static_cast< DevMem2D_<int2> >(imgIdx), static_cast< DevMem2D_<float2> > (distance), stream); } else if (query.cols <= 1024) { matchUnrolled<16, 1024, Dist>(query, trains, n, mask, static_cast< DevMem2D_<int2> >(trainIdx), static_cast< DevMem2D_<int2> >(imgIdx), static_cast< DevMem2D_<float2> > (distance), stream); }*/ else { match<16, Dist>(query, trains, n, mask, static_cast< DevMem2D_<int2> >(trainIdx), static_cast< DevMem2D_<int2> >(imgIdx), static_cast< DevMem2D_<float2> > (distance), stream); } } /////////////////////////////////////////////////////////////////////////////// // Calc distance kernel template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> __global__ void calcDistanceUnrolled(const DevMem2D_<T> query, const DevMem2D_<T> train, const Mask mask, PtrStepf allDist) { extern __shared__ int smem[]; const int queryIdx = blockIdx.y * BLOCK_SIZE + threadIdx.y; const int trainIdx = blockIdx.x * BLOCK_SIZE + threadIdx.x; typename Dist::value_type* s_query = (typename Dist::value_type*)(smem); typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE); Dist dist; #pragma unroll for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i) { const int loadX = threadIdx.x + i * BLOCK_SIZE; if (loadX < query.cols) { s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = query.ptr(::min(queryIdx, query.rows - 1))[loadX]; s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = train.ptr(::min(blockIdx.x * BLOCK_SIZE + threadIdx.y, train.rows - 1))[loadX]; } else { s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0; s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0; } __syncthreads(); #pragma unroll for (int j = 0; j < BLOCK_SIZE; ++j) dist.reduceIter(s_query[threadIdx.y * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]); __syncthreads(); } if (queryIdx < query.rows && trainIdx < train.rows) { float distVal = numeric_limits<float>::max(); if (mask(queryIdx, trainIdx)) distVal = (typename Dist::result_type)dist; allDist.ptr(queryIdx)[trainIdx] = distVal; } } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> void calcDistanceUnrolled(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask, const DevMem2Df& allDist, hipStream_t stream) { const dim3 block(BLOCK_SIZE, BLOCK_SIZE); const dim3 grid(divUp(train.rows, BLOCK_SIZE), divUp(query.rows, BLOCK_SIZE)); const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int); hipLaunchKernelGGL(( calcDistanceUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist>), dim3(grid), dim3(block), smemSize, stream, query, train, mask, allDist); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> __global__ void calcDistance(const DevMem2D_<T> query, const DevMem2D_<T> train, const Mask mask, PtrStepf allDist) { extern __shared__ int smem[]; const int queryIdx = blockIdx.y * BLOCK_SIZE + threadIdx.y; const int trainIdx = blockIdx.x * BLOCK_SIZE + threadIdx.x; typename Dist::value_type* s_query = (typename Dist::value_type*)(smem); typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE); Dist dist; for (int i = 0, endi = (query.cols + BLOCK_SIZE - 1) / BLOCK_SIZE; i < endi; ++i) { const int loadX = threadIdx.x + i * BLOCK_SIZE; if (loadX < query.cols) { s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = query.ptr(::min(queryIdx, query.rows - 1))[loadX]; s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = train.ptr(::min(blockIdx.x * BLOCK_SIZE + threadIdx.y, train.rows - 1))[loadX]; } else { s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0; s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0; } __syncthreads(); #pragma unroll for (int j = 0; j < BLOCK_SIZE; ++j) dist.reduceIter(s_query[threadIdx.y * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]); __syncthreads(); } if (queryIdx < query.rows && trainIdx < train.rows) { float distVal = numeric_limits<float>::max(); if (mask(queryIdx, trainIdx)) distVal = (typename Dist::result_type)dist; allDist.ptr(queryIdx)[trainIdx] = distVal; } } template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> void calcDistance(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask, const DevMem2Df& allDist, hipStream_t stream) { const dim3 block(BLOCK_SIZE, BLOCK_SIZE); const dim3 grid(divUp(train.rows, BLOCK_SIZE), divUp(query.rows, BLOCK_SIZE)); const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int); hipLaunchKernelGGL(( calcDistance<BLOCK_SIZE, Dist>), dim3(grid), dim3(block), smemSize, stream, query, train, mask, allDist); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } /////////////////////////////////////////////////////////////////////////////// // Calc Distance dispatcher template <typename Dist, typename T, typename Mask> void calcDistanceDispatcher(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask, const DevMem2Df& allDist, int cc, hipStream_t stream) { (void)cc; if (query.cols <= 64) { calcDistanceUnrolled<16, 64, Dist>(query, train, mask, allDist, stream); } else if (query.cols <= 128) { calcDistanceUnrolled<16, 128, Dist>(query, train, mask, allDist, stream); } /*else if (query.cols <= 256) { calcDistanceUnrolled<16, 256, Dist>(query, train, mask, allDist, stream); } else if (query.cols <= 512) { calcDistanceUnrolled<16, 512, Dist>(query, train, mask, allDist, stream); } else if (query.cols <= 1024) { calcDistanceUnrolled<16, 1024, Dist>(query, train, mask, allDist, stream); }*/ else { calcDistance<16, Dist>(query, train, mask, allDist, stream); } } /////////////////////////////////////////////////////////////////////////////// // find knn match kernel template <int BLOCK_SIZE> __global__ void findBestMatch(DevMem2Df allDist, int i, PtrStepi trainIdx, PtrStepf distance) { const int SMEM_SIZE = BLOCK_SIZE > 64 ? BLOCK_SIZE : 64; __shared__ float s_dist[SMEM_SIZE]; __shared__ int s_trainIdx[SMEM_SIZE]; const int queryIdx = blockIdx.x; float* allDistRow = allDist.ptr(queryIdx); float dist = numeric_limits<float>::max(); int bestIdx = -1; for (int i = threadIdx.x; i < allDist.cols; i += BLOCK_SIZE) { float reg = allDistRow[i]; if (reg < dist) { dist = reg; bestIdx = i; } } s_dist[threadIdx.x] = dist; s_trainIdx[threadIdx.x] = bestIdx; __syncthreads(); reducePredVal<BLOCK_SIZE>(s_dist, dist, s_trainIdx, bestIdx, threadIdx.x, less<volatile float>()); if (threadIdx.x == 0) { if (dist < numeric_limits<float>::max()) { allDistRow[bestIdx] = numeric_limits<float>::max(); trainIdx.ptr(queryIdx)[i] = bestIdx; distance.ptr(queryIdx)[i] = dist; } } } template <int BLOCK_SIZE> void findKnnMatch(int k, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2Df& allDist, hipStream_t stream) { const dim3 block(BLOCK_SIZE, 1, 1); const dim3 grid(trainIdx.rows, 1, 1); for (int i = 0; i < k; ++i) { hipLaunchKernelGGL(( findBestMatch<BLOCK_SIZE>), dim3(grid), dim3(block), 0, stream, allDist, i, trainIdx, distance); cudaSafeCall( hipGetLastError() ); } if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } void findKnnMatchDispatcher(int k, const DevMem2Db& trainIdx, const DevMem2Db& distance, const DevMem2Df& allDist, int cc, hipStream_t stream) { findKnnMatch<256>(k, static_cast<DevMem2Di>(trainIdx), static_cast<DevMem2Df>(distance), allDist, stream); } /////////////////////////////////////////////////////////////////////////////// // knn match Dispatcher template <typename Dist, typename T, typename Mask> void matchDispatcher(const DevMem2D_<T>& query, const DevMem2D_<T>& train, int k, const Mask& mask, const DevMem2Db& trainIdx, const DevMem2Db& distance, const DevMem2Df& allDist, int cc, hipStream_t stream) { if (k == 2) { match2Dispatcher<Dist>(query, train, mask, trainIdx, distance, cc, stream); } else { calcDistanceDispatcher<Dist>(query, train, mask, allDist, cc, stream); findKnnMatchDispatcher(k, trainIdx, distance, allDist, cc, stream); } } /////////////////////////////////////////////////////////////////////////////// // knn match caller template <typename T> void matchL1_gpu(const DevMem2Db& query, const DevMem2Db& train, int k, const DevMem2Db& mask, const DevMem2Db& trainIdx, const DevMem2Db& distance, const DevMem2Df& allDist, int cc, hipStream_t stream) { if (mask.data) matchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), k, SingleMask(mask), trainIdx, distance, allDist, cc, stream); else matchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), k, WithOutMask(), trainIdx, distance, allDist, cc, stream); } template void matchL1_gpu<uchar >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, int k, const DevMem2Db& mask, const DevMem2Db& trainIdx, const DevMem2Db& distance, const DevMem2Df& allDist, int cc, hipStream_t stream); //template void matchL1_gpu<schar >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, int k, const DevMem2Db& mask, const DevMem2Db& trainIdx, const DevMem2Db& distance, const DevMem2Df& allDist, int cc, hipStream_t stream); template void matchL1_gpu<ushort>(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, int k, const DevMem2Db& mask, const DevMem2Db& trainIdx, const DevMem2Db& distance, const DevMem2Df& allDist, int cc, hipStream_t stream); template void matchL1_gpu<short >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, int k, const DevMem2Db& mask, const DevMem2Db& trainIdx, const DevMem2Db& distance, const DevMem2Df& allDist, int cc, hipStream_t stream); template void matchL1_gpu<int >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, int k, const DevMem2Db& mask, const DevMem2Db& trainIdx, const DevMem2Db& distance, const DevMem2Df& allDist, int cc, hipStream_t stream); template void matchL1_gpu<float >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, int k, const DevMem2Db& mask, const DevMem2Db& trainIdx, const DevMem2Db& distance, const DevMem2Df& allDist, int cc, hipStream_t stream); template <typename T> void matchL2_gpu(const DevMem2Db& query, const DevMem2Db& train, int k, const DevMem2Db& mask, const DevMem2Db& trainIdx, const DevMem2Db& distance, const DevMem2Df& allDist, int cc, hipStream_t stream) { if (mask.data) matchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), k, SingleMask(mask), trainIdx, distance, allDist, cc, stream); else matchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), k, WithOutMask(), trainIdx, distance, allDist, cc, stream); } //template void matchL2_gpu<uchar >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, int k, const DevMem2Db& mask, const DevMem2Db& trainIdx, const DevMem2Db& distance, const DevMem2Df& allDist, int cc, hipStream_t stream); //template void matchL2_gpu<schar >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, int k, const DevMem2Db& mask, const DevMem2Db& trainIdx, const DevMem2Db& distance, const DevMem2Df& allDist, int cc, hipStream_t stream); //template void matchL2_gpu<ushort>(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, int k, const DevMem2Db& mask, const DevMem2Db& trainIdx, const DevMem2Db& distance, const DevMem2Df& allDist, int cc, hipStream_t stream); //template void matchL2_gpu<short >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, int k, const DevMem2Db& mask, const DevMem2Db& trainIdx, const DevMem2Db& distance, const DevMem2Df& allDist, int cc, hipStream_t stream); //template void matchL2_gpu<int >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, int k, const DevMem2Db& mask, const DevMem2Db& trainIdx, const DevMem2Db& distance, const DevMem2Df& allDist, int cc, hipStream_t stream); template void matchL2_gpu<float >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, int k, const DevMem2Db& mask, const DevMem2Db& trainIdx, const DevMem2Db& distance, const DevMem2Df& allDist, int cc, hipStream_t stream); template <typename T> void matchHamming_gpu(const DevMem2Db& query, const DevMem2Db& train, int k, const DevMem2Db& mask, const DevMem2Db& trainIdx, const DevMem2Db& distance, const DevMem2Df& allDist, int cc, hipStream_t stream) { if (mask.data) matchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), k, SingleMask(mask), trainIdx, distance, allDist, cc, stream); else matchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), k, WithOutMask(), trainIdx, distance, allDist, cc, stream); } template void matchHamming_gpu<uchar >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, int k, const DevMem2Db& mask, const DevMem2Db& trainIdx, const DevMem2Db& distance, const DevMem2Df& allDist, int cc, hipStream_t stream); //template void matchHamming_gpu<schar >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, int k, const DevMem2Db& mask, const DevMem2Db& trainIdx, const DevMem2Db& distance, const DevMem2Df& allDist, int cc, hipStream_t stream); template void matchHamming_gpu<ushort>(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, int k, const DevMem2Db& mask, const DevMem2Db& trainIdx, const DevMem2Db& distance, const DevMem2Df& allDist, int cc, hipStream_t stream); //template void matchHamming_gpu<short >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, int k, const DevMem2Db& mask, const DevMem2Db& trainIdx, const DevMem2Db& distance, const DevMem2Df& allDist, int cc, hipStream_t stream); template void matchHamming_gpu<int >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, int k, const DevMem2Db& mask, const DevMem2Db& trainIdx, const DevMem2Db& distance, const DevMem2Df& allDist, int cc, hipStream_t stream); template <typename T> void match2L1_gpu(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Db& trainIdx, const DevMem2Db& imgIdx, const DevMem2Db& distance, int cc, hipStream_t stream) { if (masks.data) match2Dispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data), trainIdx, imgIdx, distance, cc, stream); else match2Dispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains.ptr(), trains.cols, WithOutMask(), trainIdx, imgIdx, distance, cc, stream); } template void match2L1_gpu<uchar >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Db& trainIdx, const DevMem2Db& imgIdx, const DevMem2Db& distance, int cc, hipStream_t stream); //template void match2L1_gpu<schar >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Db& trainIdx, const DevMem2Db& imgIdx, const DevMem2Db& distance, int cc, hipStream_t stream); template void match2L1_gpu<ushort>(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Db& trainIdx, const DevMem2Db& imgIdx, const DevMem2Db& distance, int cc, hipStream_t stream); template void match2L1_gpu<short >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Db& trainIdx, const DevMem2Db& imgIdx, const DevMem2Db& distance, int cc, hipStream_t stream); template void match2L1_gpu<int >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Db& trainIdx, const DevMem2Db& imgIdx, const DevMem2Db& distance, int cc, hipStream_t stream); template void match2L1_gpu<float >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Db& trainIdx, const DevMem2Db& imgIdx, const DevMem2Db& distance, int cc, hipStream_t stream); template <typename T> void match2L2_gpu(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Db& trainIdx, const DevMem2Db& imgIdx, const DevMem2Db& distance, int cc, hipStream_t stream) { if (masks.data) match2Dispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data), trainIdx, imgIdx, distance, cc, stream); else match2Dispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains.ptr(), trains.cols, WithOutMask(), trainIdx, imgIdx, distance, cc, stream); } //template void match2L2_gpu<uchar >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Db& trainIdx, const DevMem2Db& imgIdx, const DevMem2Db& distance, int cc, hipStream_t stream); //template void match2L2_gpu<schar >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Db& trainIdx, const DevMem2Db& imgIdx, const DevMem2Db& distance, int cc, hipStream_t stream); //template void match2L2_gpu<ushort>(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Db& trainIdx, const DevMem2Db& imgIdx, const DevMem2Db& distance, int cc, hipStream_t stream); //template void match2L2_gpu<short >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Db& trainIdx, const DevMem2Db& imgIdx, const DevMem2Db& distance, int cc, hipStream_t stream); //template void match2L2_gpu<int >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Db& trainIdx, const DevMem2Di& imgIdx, const DevMem2Db& distance, int cc, hipStream_t stream); template void match2L2_gpu<float >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Db& trainIdx, const DevMem2Db& imgIdx, const DevMem2Db& distance, int cc, hipStream_t stream); template <typename T> void match2Hamming_gpu(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Db& trainIdx, const DevMem2Db& imgIdx, const DevMem2Db& distance, int cc, hipStream_t stream) { if (masks.data) match2Dispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data), trainIdx, imgIdx, distance, cc, stream); else match2Dispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains.ptr(), trains.cols, WithOutMask(), trainIdx, imgIdx, distance, cc, stream); } template void match2Hamming_gpu<uchar >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Db& trainIdx, const DevMem2Db& imgIdx, const DevMem2Db& distance, int cc, hipStream_t stream); //template void match2Hamming_gpu<schar >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Db& trainIdx, const DevMem2Db& imgIdx, const DevMem2Db& distance, int cc, hipStream_t stream); template void match2Hamming_gpu<ushort>(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Db& trainIdx, const DevMem2Db& imgIdx, const DevMem2Db& distance, int cc, hipStream_t stream); //template void match2Hamming_gpu<short >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Db& trainIdx, const DevMem2Db& imgIdx, const DevMem2Db& distance, int cc, hipStream_t stream); template void match2Hamming_gpu<int >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Db& trainIdx, const DevMem2Db& imgIdx, const DevMem2Db& distance, int cc, hipStream_t stream); } // namespace bf_knnmatch }}} // namespace cv { namespace gpu { namespace device {
fc691a506f6e8ef9863e6ceb703473a7ab6399f6.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or bpied warranties, including, but not limited to, the bpied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #include "internal_shared.hpp" #include "opencv2/gpu/device/limits.hpp" #include "opencv2/gpu/device/vec_distance.hpp" #include "opencv2/gpu/device/datamov_utils.hpp" namespace cv { namespace gpu { namespace device { namespace bf_knnmatch { /////////////////////////////////////////////////////////////////////////////// // Reduction template <int BLOCK_SIZE> __device__ void findBestMatch(float& bestDistance1, float& bestDistance2, int& bestTrainIdx1, int& bestTrainIdx2, float* s_distance, int* s_trainIdx) { float myBestDistance1 = numeric_limits<float>::max(); float myBestDistance2 = numeric_limits<float>::max(); int myBestTrainIdx1 = -1; int myBestTrainIdx2 = -1; s_distance += threadIdx.y * BLOCK_SIZE; s_trainIdx += threadIdx.y * BLOCK_SIZE; s_distance[threadIdx.x] = bestDistance1; s_trainIdx[threadIdx.x] = bestTrainIdx1; __syncthreads(); if (threadIdx.x == 0) { #pragma unroll for (int i = 0; i < BLOCK_SIZE; ++i) { float val = s_distance[i]; if (val < myBestDistance1) { myBestDistance2 = myBestDistance1; myBestTrainIdx2 = myBestTrainIdx1; myBestDistance1 = val; myBestTrainIdx1 = s_trainIdx[i]; } else if (val < myBestDistance2) { myBestDistance2 = val; myBestTrainIdx2 = s_trainIdx[i]; } } } __syncthreads(); s_distance[threadIdx.x] = bestDistance2; s_trainIdx[threadIdx.x] = bestTrainIdx2; __syncthreads(); if (threadIdx.x == 0) { #pragma unroll for (int i = 0; i < BLOCK_SIZE; ++i) { float val = s_distance[i]; if (val < myBestDistance2) { myBestDistance2 = val; myBestTrainIdx2 = s_trainIdx[i]; } } } bestDistance1 = myBestDistance1; bestDistance2 = myBestDistance2; bestTrainIdx1 = myBestTrainIdx1; bestTrainIdx2 = myBestTrainIdx2; } template <int BLOCK_SIZE> __device__ void findBestMatch(float& bestDistance1, float& bestDistance2, int& bestTrainIdx1, int& bestTrainIdx2, int& bestImgIdx1, int& bestImgIdx2, float* s_distance, int* s_trainIdx, int* s_imgIdx) { float myBestDistance1 = numeric_limits<float>::max(); float myBestDistance2 = numeric_limits<float>::max(); int myBestTrainIdx1 = -1; int myBestTrainIdx2 = -1; int myBestImgIdx1 = -1; int myBestImgIdx2 = -1; s_distance += threadIdx.y * BLOCK_SIZE; s_trainIdx += threadIdx.y * BLOCK_SIZE; s_imgIdx += threadIdx.y * BLOCK_SIZE; s_distance[threadIdx.x] = bestDistance1; s_trainIdx[threadIdx.x] = bestTrainIdx1; s_imgIdx[threadIdx.x] = bestImgIdx1; __syncthreads(); if (threadIdx.x == 0) { #pragma unroll for (int i = 0; i < BLOCK_SIZE; ++i) { float val = s_distance[i]; if (val < myBestDistance1) { myBestDistance2 = myBestDistance1; myBestTrainIdx2 = myBestTrainIdx1; myBestImgIdx2 = myBestImgIdx1; myBestDistance1 = val; myBestTrainIdx1 = s_trainIdx[i]; myBestImgIdx1 = s_imgIdx[i]; } else if (val < myBestDistance2) { myBestDistance2 = val; myBestTrainIdx2 = s_trainIdx[i]; myBestImgIdx2 = s_imgIdx[i]; } } } __syncthreads(); s_distance[threadIdx.x] = bestDistance2; s_trainIdx[threadIdx.x] = bestTrainIdx2; s_imgIdx[threadIdx.x] = bestImgIdx2; __syncthreads(); if (threadIdx.x == 0) { #pragma unroll for (int i = 0; i < BLOCK_SIZE; ++i) { float val = s_distance[i]; if (val < myBestDistance2) { myBestDistance2 = val; myBestTrainIdx2 = s_trainIdx[i]; myBestImgIdx2 = s_imgIdx[i]; } } } bestDistance1 = myBestDistance1; bestDistance2 = myBestDistance2; bestTrainIdx1 = myBestTrainIdx1; bestTrainIdx2 = myBestTrainIdx2; bestImgIdx1 = myBestImgIdx1; bestImgIdx2 = myBestImgIdx2; } /////////////////////////////////////////////////////////////////////////////// // Match Unrolled Cached template <int BLOCK_SIZE, int MAX_DESC_LEN, typename T, typename U> __device__ void loadQueryToSmem(int queryIdx, const DevMem2D_<T>& query, U* s_query) { #pragma unroll for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i) { const int loadX = threadIdx.x + i * BLOCK_SIZE; s_query[threadIdx.y * MAX_DESC_LEN + loadX] = loadX < query.cols ? query.ptr(::min(queryIdx, query.rows - 1))[loadX] : 0; } } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> __device__ void loopUnrolledCached(int queryIdx, const DevMem2D_<T>& query, int imgIdx, const DevMem2D_<T>& train, const Mask& mask, typename Dist::value_type* s_query, typename Dist::value_type* s_train, float& bestDistance1, float& bestDistance2, int& bestTrainIdx1, int& bestTrainIdx2, int& bestImgIdx1, int& bestImgIdx2) { for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t) { Dist dist; #pragma unroll for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i) { const int loadX = threadIdx.x + i * BLOCK_SIZE; s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0; if (loadX < train.cols) { T val; ForceGlob<T>::Load(train.ptr(::min(t * BLOCK_SIZE + threadIdx.y, train.rows - 1)), loadX, val); s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = val; } __syncthreads(); #pragma unroll for (int j = 0; j < BLOCK_SIZE; ++j) dist.reduceIter(s_query[threadIdx.y * MAX_DESC_LEN + i * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]); __syncthreads(); } typename Dist::result_type distVal = dist; const int trainIdx = t * BLOCK_SIZE + threadIdx.x; if (queryIdx < query.rows && trainIdx < train.rows && mask(queryIdx, trainIdx)) { if (distVal < bestDistance1) { bestImgIdx2 = bestImgIdx1; bestDistance2 = bestDistance1; bestTrainIdx2 = bestTrainIdx1; bestImgIdx1 = imgIdx; bestDistance1 = distVal; bestTrainIdx1 = trainIdx; } else if (distVal < bestDistance2) { bestImgIdx2 = imgIdx; bestDistance2 = distVal; bestTrainIdx2 = trainIdx; } } } } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> __global__ void matchUnrolledCached(const DevMem2D_<T> query, const DevMem2D_<T> train, const Mask mask, int2* bestTrainIdx, float2* bestDistance) { extern __shared__ int smem[]; const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y; typename Dist::value_type* s_query = (typename Dist::value_type*)(smem); typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * MAX_DESC_LEN); loadQueryToSmem<BLOCK_SIZE, MAX_DESC_LEN>(queryIdx, query, s_query); float myBestDistance1 = numeric_limits<float>::max(); float myBestDistance2 = numeric_limits<float>::max(); int myBestTrainIdx1 = -1; int myBestTrainIdx2 = -1; loopUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, 0, train, mask, s_query, s_train, myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestTrainIdx1, myBestTrainIdx2); __syncthreads(); float* s_distance = (float*)(smem); int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE); findBestMatch<BLOCK_SIZE>(myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, s_distance, s_trainIdx); if (queryIdx < query.rows && threadIdx.x == 0) { bestTrainIdx[queryIdx] = make_int2(myBestTrainIdx1, myBestTrainIdx2); bestDistance[queryIdx] = make_float2(myBestDistance1, myBestDistance2); } } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> void matchUnrolledCached(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask, const DevMem2D_<int2>& trainIdx, const DevMem2D_<float2>& distance, cudaStream_t stream) { const dim3 block(BLOCK_SIZE, BLOCK_SIZE); const dim3 grid(divUp(query.rows, BLOCK_SIZE)); const size_t smemSize = (BLOCK_SIZE * (MAX_DESC_LEN >= BLOCK_SIZE ? MAX_DESC_LEN : BLOCK_SIZE) + BLOCK_SIZE * BLOCK_SIZE) * sizeof(int); matchUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist><<<grid, block, smemSize, stream>>>(query, train, mask, trainIdx.data, distance.data); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> __global__ void matchUnrolledCached(const DevMem2D_<T> query, const DevMem2D_<T>* trains, int n, const Mask mask, int2* bestTrainIdx, int2* bestImgIdx, float2* bestDistance) { extern __shared__ int smem[]; const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y; typename Dist::value_type* s_query = (typename Dist::value_type*)(smem); typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * MAX_DESC_LEN); loadQueryToSmem<BLOCK_SIZE, MAX_DESC_LEN>(queryIdx, query, s_query); float myBestDistance1 = numeric_limits<float>::max(); float myBestDistance2 = numeric_limits<float>::max(); int myBestTrainIdx1 = -1; int myBestTrainIdx2 = -1; int myBestImgIdx1 = -1; int myBestImgIdx2 = -1; Mask m = mask; for (int imgIdx = 0; imgIdx < n; ++imgIdx) { const DevMem2D_<T> train = trains[imgIdx]; m.next(); loopUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, imgIdx, train, m, s_query, s_train, myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestImgIdx1, myBestImgIdx2); } __syncthreads(); float* s_distance = (float*)(smem); int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE); int* s_imgIdx = (int*)(smem + 2 * BLOCK_SIZE * BLOCK_SIZE); findBestMatch<BLOCK_SIZE>(myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestImgIdx1, myBestImgIdx2, s_distance, s_trainIdx, s_imgIdx); if (queryIdx < query.rows && threadIdx.x == 0) { bestTrainIdx[queryIdx] = make_int2(myBestTrainIdx1, myBestTrainIdx2); bestImgIdx[queryIdx] = make_int2(myBestImgIdx1, myBestImgIdx2); bestDistance[queryIdx] = make_float2(myBestDistance1, myBestDistance2); } } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> void matchUnrolledCached(const DevMem2D_<T>& query, const DevMem2D_<T>* trains, int n, const Mask& mask, const DevMem2D_<int2>& trainIdx, const DevMem2D_<int2>& imgIdx, const DevMem2D_<float2>& distance, cudaStream_t stream) { const dim3 block(BLOCK_SIZE, BLOCK_SIZE); const dim3 grid(divUp(query.rows, BLOCK_SIZE)); const size_t smemSize = (BLOCK_SIZE * (MAX_DESC_LEN >= 2 * BLOCK_SIZE ? MAX_DESC_LEN : 2 * BLOCK_SIZE) + BLOCK_SIZE * BLOCK_SIZE) * sizeof(int); matchUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist><<<grid, block, smemSize, stream>>>(query, trains, n, mask, trainIdx.data, imgIdx.data, distance.data); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } /////////////////////////////////////////////////////////////////////////////// // Match Unrolled template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> __device__ void loopUnrolled(int queryIdx, const DevMem2D_<T>& query, int imgIdx, const DevMem2D_<T>& train, const Mask& mask, typename Dist::value_type* s_query, typename Dist::value_type* s_train, float& bestDistance1, float& bestDistance2, int& bestTrainIdx1, int& bestTrainIdx2, int& bestImgIdx1, int& bestImgIdx2) { for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t) { Dist dist; #pragma unroll for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i) { const int loadX = threadIdx.x + i * BLOCK_SIZE; s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0; s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0; if (loadX < query.cols) { T val; ForceGlob<T>::Load(query.ptr(::min(queryIdx, query.rows - 1)), loadX, val); s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = val; ForceGlob<T>::Load(train.ptr(::min(t * BLOCK_SIZE + threadIdx.y, train.rows - 1)), loadX, val); s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = val; } __syncthreads(); #pragma unroll for (int j = 0; j < BLOCK_SIZE; ++j) dist.reduceIter(s_query[threadIdx.y * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]); __syncthreads(); } typename Dist::result_type distVal = dist; const int trainIdx = t * BLOCK_SIZE + threadIdx.x; if (queryIdx < query.rows && trainIdx < train.rows && mask(queryIdx, trainIdx)) { if (distVal < bestDistance1) { bestImgIdx2 = bestImgIdx1; bestDistance2 = bestDistance1; bestTrainIdx2 = bestTrainIdx1; bestImgIdx1 = imgIdx; bestDistance1 = distVal; bestTrainIdx1 = trainIdx; } else if (distVal < bestDistance2) { bestImgIdx2 = imgIdx; bestDistance2 = distVal; bestTrainIdx2 = trainIdx; } } } } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> __global__ void matchUnrolled(const DevMem2D_<T> query, const DevMem2D_<T> train, const Mask mask, int2* bestTrainIdx, float2* bestDistance) { extern __shared__ int smem[]; const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y; typename Dist::value_type* s_query = (typename Dist::value_type*)(smem); typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE); float myBestDistance1 = numeric_limits<float>::max(); float myBestDistance2 = numeric_limits<float>::max(); int myBestTrainIdx1 = -1; int myBestTrainIdx2 = -1; loopUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, 0, train, mask, s_query, s_train, myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestTrainIdx1, myBestTrainIdx2); __syncthreads(); float* s_distance = (float*)(smem); int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE); findBestMatch<BLOCK_SIZE>(myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, s_distance, s_trainIdx); if (queryIdx < query.rows && threadIdx.x == 0) { bestTrainIdx[queryIdx] = make_int2(myBestTrainIdx1, myBestTrainIdx2); bestDistance[queryIdx] = make_float2(myBestDistance1, myBestDistance2); } } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> void matchUnrolled(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask, const DevMem2D_<int2>& trainIdx, const DevMem2D_<float2>& distance, cudaStream_t stream) { const dim3 block(BLOCK_SIZE, BLOCK_SIZE); const dim3 grid(divUp(query.rows, BLOCK_SIZE)); const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int); matchUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist><<<grid, block, smemSize, stream>>>(query, train, mask, trainIdx.data, distance.data); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> __global__ void matchUnrolled(const DevMem2D_<T> query, const DevMem2D_<T>* trains, int n, const Mask mask, int2* bestTrainIdx, int2* bestImgIdx, float2* bestDistance) { extern __shared__ int smem[]; const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y; typename Dist::value_type* s_query = (typename Dist::value_type*)(smem); typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE); float myBestDistance1 = numeric_limits<float>::max(); float myBestDistance2 = numeric_limits<float>::max(); int myBestTrainIdx1 = -1; int myBestTrainIdx2 = -1; int myBestImgIdx1 = -1; int myBestImgIdx2 = -1; Mask m = mask; for (int imgIdx = 0; imgIdx < n; ++imgIdx) { const DevMem2D_<T> train = trains[imgIdx]; m.next(); loopUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, imgIdx, train, m, s_query, s_train, myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestImgIdx1, myBestImgIdx2); } __syncthreads(); float* s_distance = (float*)(smem); int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE); int* s_imgIdx = (int*)(smem + 2 * BLOCK_SIZE * BLOCK_SIZE); findBestMatch<BLOCK_SIZE>(myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestImgIdx1, myBestImgIdx2, s_distance, s_trainIdx, s_imgIdx); if (queryIdx < query.rows && threadIdx.x == 0) { bestTrainIdx[queryIdx] = make_int2(myBestTrainIdx1, myBestTrainIdx2); bestImgIdx[queryIdx] = make_int2(myBestImgIdx1, myBestImgIdx2); bestDistance[queryIdx] = make_float2(myBestDistance1, myBestDistance2); } } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> void matchUnrolled(const DevMem2D_<T>& query, const DevMem2D_<T>* trains, int n, const Mask& mask, const DevMem2D_<int2>& trainIdx, const DevMem2D_<int2>& imgIdx, const DevMem2D_<float2>& distance, cudaStream_t stream) { const dim3 block(BLOCK_SIZE, BLOCK_SIZE); const dim3 grid(divUp(query.rows, BLOCK_SIZE)); const size_t smemSize = (3 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int); matchUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist><<<grid, block, smemSize, stream>>>(query, trains, n, mask, trainIdx.data, imgIdx.data, distance.data); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } /////////////////////////////////////////////////////////////////////////////// // Match template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> __device__ void loop(int queryIdx, const DevMem2D_<T>& query, int imgIdx, const DevMem2D_<T>& train, const Mask& mask, typename Dist::value_type* s_query, typename Dist::value_type* s_train, float& bestDistance1, float& bestDistance2, int& bestTrainIdx1, int& bestTrainIdx2, int& bestImgIdx1, int& bestImgIdx2) { for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t) { Dist dist; for (int i = 0, endi = (query.cols + BLOCK_SIZE - 1) / BLOCK_SIZE; i < endi; ++i) { const int loadX = threadIdx.x + i * BLOCK_SIZE; s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0; s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0; if (loadX < query.cols) { T val; ForceGlob<T>::Load(query.ptr(::min(queryIdx, query.rows - 1)), loadX, val); s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = val; ForceGlob<T>::Load(train.ptr(::min(t * BLOCK_SIZE + threadIdx.y, train.rows - 1)), loadX, val); s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = val; } __syncthreads(); #pragma unroll for (int j = 0; j < BLOCK_SIZE; ++j) dist.reduceIter(s_query[threadIdx.y * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]); __syncthreads(); } typename Dist::result_type distVal = dist; const int trainIdx = t * BLOCK_SIZE + threadIdx.x; if (queryIdx < query.rows && trainIdx < train.rows && mask(queryIdx, trainIdx)) { if (distVal < bestDistance1) { bestImgIdx2 = bestImgIdx1; bestDistance2 = bestDistance1; bestTrainIdx2 = bestTrainIdx1; bestImgIdx1 = imgIdx; bestDistance1 = distVal; bestTrainIdx1 = trainIdx; } else if (distVal < bestDistance2) { bestImgIdx2 = imgIdx; bestDistance2 = distVal; bestTrainIdx2 = trainIdx; } } } } template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> __global__ void match(const DevMem2D_<T> query, const DevMem2D_<T> train, const Mask mask, int2* bestTrainIdx, float2* bestDistance) { extern __shared__ int smem[]; const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y; typename Dist::value_type* s_query = (typename Dist::value_type*)(smem); typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE); float myBestDistance1 = numeric_limits<float>::max(); float myBestDistance2 = numeric_limits<float>::max(); int myBestTrainIdx1 = -1; int myBestTrainIdx2 = -1; loop<BLOCK_SIZE, Dist>(queryIdx, query, 0, train, mask, s_query, s_train, myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestTrainIdx1, myBestTrainIdx2); __syncthreads(); float* s_distance = (float*)(smem); int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE); findBestMatch<BLOCK_SIZE>(myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, s_distance, s_trainIdx); if (queryIdx < query.rows && threadIdx.x == 0) { bestTrainIdx[queryIdx] = make_int2(myBestTrainIdx1, myBestTrainIdx2); bestDistance[queryIdx] = make_float2(myBestDistance1, myBestDistance2); } } template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> void match(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask, const DevMem2D_<int2>& trainIdx, const DevMem2D_<float2>& distance, cudaStream_t stream) { const dim3 block(BLOCK_SIZE, BLOCK_SIZE); const dim3 grid(divUp(query.rows, BLOCK_SIZE)); const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int); match<BLOCK_SIZE, Dist><<<grid, block, smemSize, stream>>>(query, train, mask, trainIdx.data, distance.data); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> __global__ void match(const DevMem2D_<T> query, const DevMem2D_<T>* trains, int n, const Mask mask, int2* bestTrainIdx, int2* bestImgIdx, float2* bestDistance) { extern __shared__ int smem[]; const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y; typename Dist::value_type* s_query = (typename Dist::value_type*)(smem); typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE); float myBestDistance1 = numeric_limits<float>::max(); float myBestDistance2 = numeric_limits<float>::max(); int myBestTrainIdx1 = -1; int myBestTrainIdx2 = -1; int myBestImgIdx1 = -1; int myBestImgIdx2 = -1; Mask m = mask; for (int imgIdx = 0; imgIdx < n; ++imgIdx) { const DevMem2D_<T> train = trains[imgIdx]; m.next(); loop<BLOCK_SIZE, Dist>(queryIdx, query, imgIdx, train, m, s_query, s_train, myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestImgIdx1, myBestImgIdx2); } __syncthreads(); float* s_distance = (float*)(smem); int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE); int* s_imgIdx = (int*)(smem + 2 * BLOCK_SIZE * BLOCK_SIZE); findBestMatch<BLOCK_SIZE>(myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestImgIdx1, myBestImgIdx2, s_distance, s_trainIdx, s_imgIdx); if (queryIdx < query.rows && threadIdx.x == 0) { bestTrainIdx[queryIdx] = make_int2(myBestTrainIdx1, myBestTrainIdx2); bestImgIdx[queryIdx] = make_int2(myBestImgIdx1, myBestImgIdx2); bestDistance[queryIdx] = make_float2(myBestDistance1, myBestDistance2); } } template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> void match(const DevMem2D_<T>& query, const DevMem2D_<T>* trains, int n, const Mask& mask, const DevMem2D_<int2>& trainIdx, const DevMem2D_<int2>& imgIdx, const DevMem2D_<float2>& distance, cudaStream_t stream) { const dim3 block(BLOCK_SIZE, BLOCK_SIZE); const dim3 grid(divUp(query.rows, BLOCK_SIZE)); const size_t smemSize = (3 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int); match<BLOCK_SIZE, Dist><<<grid, block, smemSize, stream>>>(query, trains, n, mask, trainIdx.data, imgIdx.data, distance.data); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } /////////////////////////////////////////////////////////////////////////////// // knnMatch 2 dispatcher template <typename Dist, typename T, typename Mask> void match2Dispatcher(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask, const DevMem2Db& trainIdx, const DevMem2Db& distance, int cc, cudaStream_t stream) { (void)cc; if (query.cols <= 64) { matchUnrolledCached<16, 64, Dist>(query, train, mask, static_cast< DevMem2D_<int2> >(trainIdx), static_cast< DevMem2D_<float2> > (distance), stream); } else if (query.cols <= 128) { matchUnrolledCached<16, 128, Dist>(query, train, mask, static_cast< DevMem2D_<int2> >(trainIdx), static_cast< DevMem2D_<float2> > (distance), stream); } /*else if (query.cols <= 256) { matchUnrolled<16, 256, Dist>(query, train, mask, static_cast< DevMem2D_<int2> >(trainIdx), static_cast< DevMem2D_<float2> > (distance), stream); } else if (query.cols <= 512) { matchUnrolled<16, 512, Dist>(query, train, mask, static_cast< DevMem2D_<int2> >(trainIdx), static_cast< DevMem2D_<float2> > (distance), stream); } else if (query.cols <= 1024) { matchUnrolled<16, 1024, Dist>(query, train, mask, static_cast< DevMem2D_<int2> >(trainIdx), static_cast< DevMem2D_<float2> > (distance), stream); }*/ else { match<16, Dist>(query, train, mask, static_cast< DevMem2D_<int2> >(trainIdx), static_cast< DevMem2D_<float2> > (distance), stream); } } template <typename Dist, typename T, typename Mask> void match2Dispatcher(const DevMem2D_<T>& query, const DevMem2D_<T>* trains, int n, const Mask& mask, const DevMem2Db& trainIdx, const DevMem2Db& imgIdx, const DevMem2Db& distance, int cc, cudaStream_t stream) { (void)cc; if (query.cols <= 64) { matchUnrolledCached<16, 64, Dist>(query, trains, n, mask, static_cast< DevMem2D_<int2> >(trainIdx), static_cast< DevMem2D_<int2> >(imgIdx), static_cast< DevMem2D_<float2> > (distance), stream); } else if (query.cols <= 128) { matchUnrolledCached<16, 128, Dist>(query, trains, n, mask, static_cast< DevMem2D_<int2> >(trainIdx), static_cast< DevMem2D_<int2> >(imgIdx), static_cast< DevMem2D_<float2> > (distance), stream); } /*else if (query.cols <= 256) { matchUnrolled<16, 256, Dist>(query, trains, n, mask, static_cast< DevMem2D_<int2> >(trainIdx), static_cast< DevMem2D_<int2> >(imgIdx), static_cast< DevMem2D_<float2> > (distance), stream); } else if (query.cols <= 512) { matchUnrolled<16, 512, Dist>(query, trains, n, mask, static_cast< DevMem2D_<int2> >(trainIdx), static_cast< DevMem2D_<int2> >(imgIdx), static_cast< DevMem2D_<float2> > (distance), stream); } else if (query.cols <= 1024) { matchUnrolled<16, 1024, Dist>(query, trains, n, mask, static_cast< DevMem2D_<int2> >(trainIdx), static_cast< DevMem2D_<int2> >(imgIdx), static_cast< DevMem2D_<float2> > (distance), stream); }*/ else { match<16, Dist>(query, trains, n, mask, static_cast< DevMem2D_<int2> >(trainIdx), static_cast< DevMem2D_<int2> >(imgIdx), static_cast< DevMem2D_<float2> > (distance), stream); } } /////////////////////////////////////////////////////////////////////////////// // Calc distance kernel template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> __global__ void calcDistanceUnrolled(const DevMem2D_<T> query, const DevMem2D_<T> train, const Mask mask, PtrStepf allDist) { extern __shared__ int smem[]; const int queryIdx = blockIdx.y * BLOCK_SIZE + threadIdx.y; const int trainIdx = blockIdx.x * BLOCK_SIZE + threadIdx.x; typename Dist::value_type* s_query = (typename Dist::value_type*)(smem); typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE); Dist dist; #pragma unroll for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i) { const int loadX = threadIdx.x + i * BLOCK_SIZE; if (loadX < query.cols) { s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = query.ptr(::min(queryIdx, query.rows - 1))[loadX]; s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = train.ptr(::min(blockIdx.x * BLOCK_SIZE + threadIdx.y, train.rows - 1))[loadX]; } else { s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0; s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0; } __syncthreads(); #pragma unroll for (int j = 0; j < BLOCK_SIZE; ++j) dist.reduceIter(s_query[threadIdx.y * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]); __syncthreads(); } if (queryIdx < query.rows && trainIdx < train.rows) { float distVal = numeric_limits<float>::max(); if (mask(queryIdx, trainIdx)) distVal = (typename Dist::result_type)dist; allDist.ptr(queryIdx)[trainIdx] = distVal; } } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> void calcDistanceUnrolled(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask, const DevMem2Df& allDist, cudaStream_t stream) { const dim3 block(BLOCK_SIZE, BLOCK_SIZE); const dim3 grid(divUp(train.rows, BLOCK_SIZE), divUp(query.rows, BLOCK_SIZE)); const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int); calcDistanceUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist><<<grid, block, smemSize, stream>>>(query, train, mask, allDist); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> __global__ void calcDistance(const DevMem2D_<T> query, const DevMem2D_<T> train, const Mask mask, PtrStepf allDist) { extern __shared__ int smem[]; const int queryIdx = blockIdx.y * BLOCK_SIZE + threadIdx.y; const int trainIdx = blockIdx.x * BLOCK_SIZE + threadIdx.x; typename Dist::value_type* s_query = (typename Dist::value_type*)(smem); typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE); Dist dist; for (int i = 0, endi = (query.cols + BLOCK_SIZE - 1) / BLOCK_SIZE; i < endi; ++i) { const int loadX = threadIdx.x + i * BLOCK_SIZE; if (loadX < query.cols) { s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = query.ptr(::min(queryIdx, query.rows - 1))[loadX]; s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = train.ptr(::min(blockIdx.x * BLOCK_SIZE + threadIdx.y, train.rows - 1))[loadX]; } else { s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0; s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0; } __syncthreads(); #pragma unroll for (int j = 0; j < BLOCK_SIZE; ++j) dist.reduceIter(s_query[threadIdx.y * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]); __syncthreads(); } if (queryIdx < query.rows && trainIdx < train.rows) { float distVal = numeric_limits<float>::max(); if (mask(queryIdx, trainIdx)) distVal = (typename Dist::result_type)dist; allDist.ptr(queryIdx)[trainIdx] = distVal; } } template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> void calcDistance(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask, const DevMem2Df& allDist, cudaStream_t stream) { const dim3 block(BLOCK_SIZE, BLOCK_SIZE); const dim3 grid(divUp(train.rows, BLOCK_SIZE), divUp(query.rows, BLOCK_SIZE)); const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int); calcDistance<BLOCK_SIZE, Dist><<<grid, block, smemSize, stream>>>(query, train, mask, allDist); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } /////////////////////////////////////////////////////////////////////////////// // Calc Distance dispatcher template <typename Dist, typename T, typename Mask> void calcDistanceDispatcher(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask, const DevMem2Df& allDist, int cc, cudaStream_t stream) { (void)cc; if (query.cols <= 64) { calcDistanceUnrolled<16, 64, Dist>(query, train, mask, allDist, stream); } else if (query.cols <= 128) { calcDistanceUnrolled<16, 128, Dist>(query, train, mask, allDist, stream); } /*else if (query.cols <= 256) { calcDistanceUnrolled<16, 256, Dist>(query, train, mask, allDist, stream); } else if (query.cols <= 512) { calcDistanceUnrolled<16, 512, Dist>(query, train, mask, allDist, stream); } else if (query.cols <= 1024) { calcDistanceUnrolled<16, 1024, Dist>(query, train, mask, allDist, stream); }*/ else { calcDistance<16, Dist>(query, train, mask, allDist, stream); } } /////////////////////////////////////////////////////////////////////////////// // find knn match kernel template <int BLOCK_SIZE> __global__ void findBestMatch(DevMem2Df allDist, int i, PtrStepi trainIdx, PtrStepf distance) { const int SMEM_SIZE = BLOCK_SIZE > 64 ? BLOCK_SIZE : 64; __shared__ float s_dist[SMEM_SIZE]; __shared__ int s_trainIdx[SMEM_SIZE]; const int queryIdx = blockIdx.x; float* allDistRow = allDist.ptr(queryIdx); float dist = numeric_limits<float>::max(); int bestIdx = -1; for (int i = threadIdx.x; i < allDist.cols; i += BLOCK_SIZE) { float reg = allDistRow[i]; if (reg < dist) { dist = reg; bestIdx = i; } } s_dist[threadIdx.x] = dist; s_trainIdx[threadIdx.x] = bestIdx; __syncthreads(); reducePredVal<BLOCK_SIZE>(s_dist, dist, s_trainIdx, bestIdx, threadIdx.x, less<volatile float>()); if (threadIdx.x == 0) { if (dist < numeric_limits<float>::max()) { allDistRow[bestIdx] = numeric_limits<float>::max(); trainIdx.ptr(queryIdx)[i] = bestIdx; distance.ptr(queryIdx)[i] = dist; } } } template <int BLOCK_SIZE> void findKnnMatch(int k, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2Df& allDist, cudaStream_t stream) { const dim3 block(BLOCK_SIZE, 1, 1); const dim3 grid(trainIdx.rows, 1, 1); for (int i = 0; i < k; ++i) { findBestMatch<BLOCK_SIZE><<<grid, block, 0, stream>>>(allDist, i, trainIdx, distance); cudaSafeCall( cudaGetLastError() ); } if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } void findKnnMatchDispatcher(int k, const DevMem2Db& trainIdx, const DevMem2Db& distance, const DevMem2Df& allDist, int cc, cudaStream_t stream) { findKnnMatch<256>(k, static_cast<DevMem2Di>(trainIdx), static_cast<DevMem2Df>(distance), allDist, stream); } /////////////////////////////////////////////////////////////////////////////// // knn match Dispatcher template <typename Dist, typename T, typename Mask> void matchDispatcher(const DevMem2D_<T>& query, const DevMem2D_<T>& train, int k, const Mask& mask, const DevMem2Db& trainIdx, const DevMem2Db& distance, const DevMem2Df& allDist, int cc, cudaStream_t stream) { if (k == 2) { match2Dispatcher<Dist>(query, train, mask, trainIdx, distance, cc, stream); } else { calcDistanceDispatcher<Dist>(query, train, mask, allDist, cc, stream); findKnnMatchDispatcher(k, trainIdx, distance, allDist, cc, stream); } } /////////////////////////////////////////////////////////////////////////////// // knn match caller template <typename T> void matchL1_gpu(const DevMem2Db& query, const DevMem2Db& train, int k, const DevMem2Db& mask, const DevMem2Db& trainIdx, const DevMem2Db& distance, const DevMem2Df& allDist, int cc, cudaStream_t stream) { if (mask.data) matchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), k, SingleMask(mask), trainIdx, distance, allDist, cc, stream); else matchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), k, WithOutMask(), trainIdx, distance, allDist, cc, stream); } template void matchL1_gpu<uchar >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, int k, const DevMem2Db& mask, const DevMem2Db& trainIdx, const DevMem2Db& distance, const DevMem2Df& allDist, int cc, cudaStream_t stream); //template void matchL1_gpu<schar >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, int k, const DevMem2Db& mask, const DevMem2Db& trainIdx, const DevMem2Db& distance, const DevMem2Df& allDist, int cc, cudaStream_t stream); template void matchL1_gpu<ushort>(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, int k, const DevMem2Db& mask, const DevMem2Db& trainIdx, const DevMem2Db& distance, const DevMem2Df& allDist, int cc, cudaStream_t stream); template void matchL1_gpu<short >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, int k, const DevMem2Db& mask, const DevMem2Db& trainIdx, const DevMem2Db& distance, const DevMem2Df& allDist, int cc, cudaStream_t stream); template void matchL1_gpu<int >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, int k, const DevMem2Db& mask, const DevMem2Db& trainIdx, const DevMem2Db& distance, const DevMem2Df& allDist, int cc, cudaStream_t stream); template void matchL1_gpu<float >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, int k, const DevMem2Db& mask, const DevMem2Db& trainIdx, const DevMem2Db& distance, const DevMem2Df& allDist, int cc, cudaStream_t stream); template <typename T> void matchL2_gpu(const DevMem2Db& query, const DevMem2Db& train, int k, const DevMem2Db& mask, const DevMem2Db& trainIdx, const DevMem2Db& distance, const DevMem2Df& allDist, int cc, cudaStream_t stream) { if (mask.data) matchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), k, SingleMask(mask), trainIdx, distance, allDist, cc, stream); else matchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), k, WithOutMask(), trainIdx, distance, allDist, cc, stream); } //template void matchL2_gpu<uchar >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, int k, const DevMem2Db& mask, const DevMem2Db& trainIdx, const DevMem2Db& distance, const DevMem2Df& allDist, int cc, cudaStream_t stream); //template void matchL2_gpu<schar >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, int k, const DevMem2Db& mask, const DevMem2Db& trainIdx, const DevMem2Db& distance, const DevMem2Df& allDist, int cc, cudaStream_t stream); //template void matchL2_gpu<ushort>(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, int k, const DevMem2Db& mask, const DevMem2Db& trainIdx, const DevMem2Db& distance, const DevMem2Df& allDist, int cc, cudaStream_t stream); //template void matchL2_gpu<short >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, int k, const DevMem2Db& mask, const DevMem2Db& trainIdx, const DevMem2Db& distance, const DevMem2Df& allDist, int cc, cudaStream_t stream); //template void matchL2_gpu<int >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, int k, const DevMem2Db& mask, const DevMem2Db& trainIdx, const DevMem2Db& distance, const DevMem2Df& allDist, int cc, cudaStream_t stream); template void matchL2_gpu<float >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, int k, const DevMem2Db& mask, const DevMem2Db& trainIdx, const DevMem2Db& distance, const DevMem2Df& allDist, int cc, cudaStream_t stream); template <typename T> void matchHamming_gpu(const DevMem2Db& query, const DevMem2Db& train, int k, const DevMem2Db& mask, const DevMem2Db& trainIdx, const DevMem2Db& distance, const DevMem2Df& allDist, int cc, cudaStream_t stream) { if (mask.data) matchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), k, SingleMask(mask), trainIdx, distance, allDist, cc, stream); else matchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), k, WithOutMask(), trainIdx, distance, allDist, cc, stream); } template void matchHamming_gpu<uchar >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, int k, const DevMem2Db& mask, const DevMem2Db& trainIdx, const DevMem2Db& distance, const DevMem2Df& allDist, int cc, cudaStream_t stream); //template void matchHamming_gpu<schar >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, int k, const DevMem2Db& mask, const DevMem2Db& trainIdx, const DevMem2Db& distance, const DevMem2Df& allDist, int cc, cudaStream_t stream); template void matchHamming_gpu<ushort>(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, int k, const DevMem2Db& mask, const DevMem2Db& trainIdx, const DevMem2Db& distance, const DevMem2Df& allDist, int cc, cudaStream_t stream); //template void matchHamming_gpu<short >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, int k, const DevMem2Db& mask, const DevMem2Db& trainIdx, const DevMem2Db& distance, const DevMem2Df& allDist, int cc, cudaStream_t stream); template void matchHamming_gpu<int >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, int k, const DevMem2Db& mask, const DevMem2Db& trainIdx, const DevMem2Db& distance, const DevMem2Df& allDist, int cc, cudaStream_t stream); template <typename T> void match2L1_gpu(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Db& trainIdx, const DevMem2Db& imgIdx, const DevMem2Db& distance, int cc, cudaStream_t stream) { if (masks.data) match2Dispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data), trainIdx, imgIdx, distance, cc, stream); else match2Dispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains.ptr(), trains.cols, WithOutMask(), trainIdx, imgIdx, distance, cc, stream); } template void match2L1_gpu<uchar >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Db& trainIdx, const DevMem2Db& imgIdx, const DevMem2Db& distance, int cc, cudaStream_t stream); //template void match2L1_gpu<schar >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Db& trainIdx, const DevMem2Db& imgIdx, const DevMem2Db& distance, int cc, cudaStream_t stream); template void match2L1_gpu<ushort>(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Db& trainIdx, const DevMem2Db& imgIdx, const DevMem2Db& distance, int cc, cudaStream_t stream); template void match2L1_gpu<short >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Db& trainIdx, const DevMem2Db& imgIdx, const DevMem2Db& distance, int cc, cudaStream_t stream); template void match2L1_gpu<int >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Db& trainIdx, const DevMem2Db& imgIdx, const DevMem2Db& distance, int cc, cudaStream_t stream); template void match2L1_gpu<float >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Db& trainIdx, const DevMem2Db& imgIdx, const DevMem2Db& distance, int cc, cudaStream_t stream); template <typename T> void match2L2_gpu(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Db& trainIdx, const DevMem2Db& imgIdx, const DevMem2Db& distance, int cc, cudaStream_t stream) { if (masks.data) match2Dispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data), trainIdx, imgIdx, distance, cc, stream); else match2Dispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains.ptr(), trains.cols, WithOutMask(), trainIdx, imgIdx, distance, cc, stream); } //template void match2L2_gpu<uchar >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Db& trainIdx, const DevMem2Db& imgIdx, const DevMem2Db& distance, int cc, cudaStream_t stream); //template void match2L2_gpu<schar >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Db& trainIdx, const DevMem2Db& imgIdx, const DevMem2Db& distance, int cc, cudaStream_t stream); //template void match2L2_gpu<ushort>(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Db& trainIdx, const DevMem2Db& imgIdx, const DevMem2Db& distance, int cc, cudaStream_t stream); //template void match2L2_gpu<short >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Db& trainIdx, const DevMem2Db& imgIdx, const DevMem2Db& distance, int cc, cudaStream_t stream); //template void match2L2_gpu<int >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Db& trainIdx, const DevMem2Di& imgIdx, const DevMem2Db& distance, int cc, cudaStream_t stream); template void match2L2_gpu<float >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Db& trainIdx, const DevMem2Db& imgIdx, const DevMem2Db& distance, int cc, cudaStream_t stream); template <typename T> void match2Hamming_gpu(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Db& trainIdx, const DevMem2Db& imgIdx, const DevMem2Db& distance, int cc, cudaStream_t stream) { if (masks.data) match2Dispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data), trainIdx, imgIdx, distance, cc, stream); else match2Dispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains.ptr(), trains.cols, WithOutMask(), trainIdx, imgIdx, distance, cc, stream); } template void match2Hamming_gpu<uchar >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Db& trainIdx, const DevMem2Db& imgIdx, const DevMem2Db& distance, int cc, cudaStream_t stream); //template void match2Hamming_gpu<schar >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Db& trainIdx, const DevMem2Db& imgIdx, const DevMem2Db& distance, int cc, cudaStream_t stream); template void match2Hamming_gpu<ushort>(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Db& trainIdx, const DevMem2Db& imgIdx, const DevMem2Db& distance, int cc, cudaStream_t stream); //template void match2Hamming_gpu<short >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Db& trainIdx, const DevMem2Db& imgIdx, const DevMem2Db& distance, int cc, cudaStream_t stream); template void match2Hamming_gpu<int >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Db& trainIdx, const DevMem2Db& imgIdx, const DevMem2Db& distance, int cc, cudaStream_t stream); } // namespace bf_knnmatch }}} // namespace cv { namespace gpu { namespace device {
e802f563e632deb33439ae82b1ccb734f15a6084.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #define N 2048 * 2048 // Number of elements in each vector /* * Optimize this already-accelerated codebase. Work iteratively, * and use nsys to support your work. * * Aim to profile `saxpy` (without modifying `N`) running under * 20us. * * Some bugs have been placed in this codebase for your edification. */ __global__ void saxpy(int * a, int * b, int * c) { int tid = blockIdx.x * blockDim.x * threadIdx.x; if ( tid < N ) c[tid] = 2 * a[tid] + b[tid]; } int main() { int *a, *b, *c; int size = N * sizeof (int); // The total number of bytes per vector hipMallocManaged(&a, size); hipMallocManaged(&b, size); hipMallocManaged(&c, size); // Initialize memory for( int i = 0; i < N; ++i ) { a[i] = 2; b[i] = 1; c[i] = 0; } int deviceId, numberOfSMs; hipGetDevice(&deviceId); hipDeviceGetAttribute(&numberOfSMs, hipDeviceAttributeMultiprocessorCount, deviceId); hipMemPrefetchAsync(a, size, deviceId); hipMemPrefetchAsync(b, size, deviceId); hipMemPrefetchAsync(c, size, deviceId); int threads_per_block = 256; int number_of_blocks = numberOfSMs * 32; hipLaunchKernelGGL(( saxpy) , dim3(number_of_blocks), dim3(threads_per_block) , 0, 0, a, b, c ); // Print out the first and last 5 values of c for a quality check for( int i = 0; i < 5; ++i ) printf("c[%d] = %d, ", i, c[i]); printf ("\n"); for( int i = N-5; i < N; ++i ) printf("c[%d] = %d, ", i, c[i]); printf ("\n"); hipFree( a ); hipFree( b ); hipFree( c ); }
e802f563e632deb33439ae82b1ccb734f15a6084.cu
#include <stdio.h> #define N 2048 * 2048 // Number of elements in each vector /* * Optimize this already-accelerated codebase. Work iteratively, * and use nsys to support your work. * * Aim to profile `saxpy` (without modifying `N`) running under * 20us. * * Some bugs have been placed in this codebase for your edification. */ __global__ void saxpy(int * a, int * b, int * c) { int tid = blockIdx.x * blockDim.x * threadIdx.x; if ( tid < N ) c[tid] = 2 * a[tid] + b[tid]; } int main() { int *a, *b, *c; int size = N * sizeof (int); // The total number of bytes per vector cudaMallocManaged(&a, size); cudaMallocManaged(&b, size); cudaMallocManaged(&c, size); // Initialize memory for( int i = 0; i < N; ++i ) { a[i] = 2; b[i] = 1; c[i] = 0; } int deviceId, numberOfSMs; cudaGetDevice(&deviceId); cudaDeviceGetAttribute(&numberOfSMs, cudaDevAttrMultiProcessorCount, deviceId); cudaMemPrefetchAsync(a, size, deviceId); cudaMemPrefetchAsync(b, size, deviceId); cudaMemPrefetchAsync(c, size, deviceId); int threads_per_block = 256; int number_of_blocks = numberOfSMs * 32; saxpy <<< number_of_blocks, threads_per_block >>> ( a, b, c ); // Print out the first and last 5 values of c for a quality check for( int i = 0; i < 5; ++i ) printf("c[%d] = %d, ", i, c[i]); printf ("\n"); for( int i = N-5; i < N; ++i ) printf("c[%d] = %d, ", i, c[i]); printf ("\n"); cudaFree( a ); cudaFree( b ); cudaFree( c ); }
a7de5d57988a455a9d85da1a37a90efe53e7d76e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <ATen/DeviceGuard.h> #include <THH/THH.h> #include <THH/THHDeviceUtils.cuh> #include <vector> #include <iostream> #include <cmath> int const threadsPerBlock = sizeof(unsigned long long) * 8; template <typename scalar_t> struct Point { scalar_t x, y; __device__ Point() : x(0), y(0) {} __device__ Point(scalar_t x, scalar_t y) : x(x), y(y) {} __device__ scalar_t dot(const Point<scalar_t>& vec) const { return this->x * vec.x + this->y * vec.y; } __device__ scalar_t cross(const Point<scalar_t>& vec) const { return this->x * vec.y - vec.x * this->y; } __device__ Point<scalar_t> operator-( const Point<scalar_t>& vec) const { return Point(this->x - vec.x, this->y - vec.y); } __device__ Point<scalar_t> operator-=( const Point<scalar_t>& vec) { this->x -= vec.x; this->y -= vec.y; return *this; } __device__ Point<scalar_t> operator+( const Point<scalar_t>& vec) const { return Point(this->x + vec.x, this->y + vec.y); } __device__ Point<scalar_t> operator+=( const Point<scalar_t>& vec) { this->x += vec.x; this->y += vec.y; return *this; } __device__ bool operator<( const Point<scalar_t>& vec) const { if ((this->x == 0 && this->y == 0) && (vec.x != 0 || vec.y != 0)) return true; return this->cross(vec) > 0; } }; template <typename scalar_t> __device__ Point<scalar_t> operator*(scalar_t a, const Point<scalar_t>& p) { return Point<scalar_t>(a * p.x, a * p.y); } template <typename scalar_t> struct LinSeg { Point<scalar_t> x1, x2; __device__ LinSeg() {} __device__ LinSeg(const Point<scalar_t>& x1, const Point<scalar_t>& x2) : x1(x1), x2(x2) {} __device__ int InterSectWith(const LinSeg<scalar_t>& linseg, Point<scalar_t>* ps) { Point<scalar_t> a1 = this->x1, a2 = this->x2, b1 = linseg.x1, b2 = linseg.x2; /* intersection point A=a2-a1, B=b2-b1, C=a1-b1 [C.x] = [-A.x B.x] * [s] [C.y] [-A.y B.y] [t] */ Point<scalar_t> A = a2 - a1, B = b2 - b1, C = a1 - b1; if (C.x == 0 && C.y == 0) { ps[0] = a1; return 1; } scalar_t D = -A.cross(B); if (D != 0) { // not parallel, may intersect. scalar_t s = C.cross(B) / D; scalar_t t = -A.cross(C) / D; if (0 <= s && s < 1 && 0 <= t && t < 1) { // head vertex does not count. ps[0] = a1 + s * A; return 1; } else { return 0; } } else { // check colinearity: |A*C|=0 if (A.cross(C) != 0) { // not colinear return 0; } else { int p_cnt = 0; // colinear overlap: only tail vertices count. scalar_t BdtC = B.dot(C); // (b2-b1)*(a1-b1) scalar_t BdtB = B.dot(B); // (b2-b1)*(b2-b1) scalar_t AdtnC = -A.dot(C); // (a2-a1)*(b1-a1) scalar_t AdtA = A.dot(A); // (a2-a1)*(a2-a1) if (BdtC >= 0 && BdtC < BdtB) // a1 between b2 and b1 ps[p_cnt++] = a1; if (AdtnC >= 0 && AdtnC < AdtA) // b1 between a2 and a1 ps[p_cnt++] = b1; return p_cnt; } } } }; template <typename scalar_t> __device__ void rbbox2points(const scalar_t* const rb, Point<scalar_t>* vs) { scalar_t x = rb[0], y = rb[1], w_2 = rb[2] / 2, h_2 = rb[3] / 2, a = rb[4]; scalar_t cosa = cosf(a), sina = sinf(a); scalar_t wx = cosa * w_2, wy = sina * w_2; scalar_t hx = -sina * h_2, hy = cosa * h_2; vs[0] = Point<scalar_t>(x + wx + hx, y + wy + hy); vs[1] = Point<scalar_t>(x - wx + hx, y - wy + hy); vs[2] = Point<scalar_t>(x - wx - hx, y - wy - hy); vs[3] = Point<scalar_t>(x + wx - hx, y + wy - hy); } template <typename scalar_t> __device__ int vertex_in_rbbox(Point<scalar_t>* v1, Point<scalar_t>* v2, Point<scalar_t>* ps) { Point<scalar_t> center = (scalar_t)0.5 * (v2[0] + v2[2]); Point<scalar_t> w_vec = (scalar_t)0.5 * (v2[1] - v2[0]); Point<scalar_t> h_vec = (scalar_t)0.5 * (v2[2] - v2[1]); scalar_t h_vec_2 = h_vec.dot(h_vec); scalar_t w_vec_2 = w_vec.dot(w_vec); int p_cnt = 0; for (int i = 0; i < 4; i++) { Point<scalar_t> pr = v1[i] - center; if (std::abs(pr.dot(h_vec)) < h_vec_2 && std::abs(pr.dot(w_vec)) < w_vec_2) { ps[p_cnt++] = v1[i]; } } return p_cnt; } template <typename scalar_t> __device__ int rbbox_border_intsec(Point<scalar_t>* v1, Point<scalar_t>* v2, Point<scalar_t>* ps) { LinSeg<scalar_t> rb1[4] = { {v1[0], v1[1]}, {v1[1], v1[2]}, {v1[2], v1[3]}, {v1[3], v1[0]} }; LinSeg<scalar_t> rb2[4] = { {v2[0], v2[1]}, {v2[1], v2[2]}, {v2[2], v2[3]}, {v2[3], v2[0]} }; int p_cnt = 0; for (int i = 0; i < 4; i++) for (int j = 0; j < 4; j++) { p_cnt += rb1[i].InterSectWith(rb2[j], ps + p_cnt); } return p_cnt; } template <typename scalar_t> __device__ scalar_t area(Point<scalar_t> *vs_dirty, int p_cnt_dirty) { const scalar_t numthres = (scalar_t) 1e-2; Point<scalar_t> vs[16]; vs[0] = {0, 0}; int p_cnt = 1; // set vs[0] the reference point for (int i = 1; i < p_cnt_dirty; i++) { bool clean = true; vs_dirty[i] -= vs_dirty[0]; for (int j = 0; j < p_cnt; j++) { Point<scalar_t> diff = vs_dirty[i] - vs[j]; if (std::abs(diff.x) < numthres && std::abs(diff.y) < numthres) { clean = false; break; } } if (clean) { vs[p_cnt++] = vs_dirty[i]; } } // sort for (int i = 1; i < p_cnt; i++) { vs[0] = vs[i]; int j; for (j = i - 1; vs[0] < vs[j]; j--) vs[j + 1] = vs[j]; vs[j + 1] = vs[0]; } // calculate area scalar_t a = 0; vs[0] = {0, 0}; for (int i = 1; i < p_cnt; i++) a += vs[i].cross(vs[(i + 1) % p_cnt]); return a / 2; } template <typename scalar_t> __device__ scalar_t devIoU( const scalar_t* const rb1_p, const scalar_t* const rb2_p) { Point<scalar_t> v1[4], v2[4], u[16]; rbbox2points(rb1_p, v1); rbbox2points(rb2_p, v2); int p_cnt = 0; // add rbbox's vertices inside the other one p_cnt += vertex_in_rbbox(v1, v2, u + p_cnt); p_cnt += vertex_in_rbbox(v2, v1, u + p_cnt); // add rect border line segment intersection points p_cnt += rbbox_border_intsec(v1, v2, u + p_cnt); if (p_cnt >= 3) { scalar_t s1 = rb1_p[2] * rb1_p[3]; scalar_t s2 = rb2_p[2] * rb2_p[3]; scalar_t su = area(u, p_cnt); su = min(su, s1); su = min(su, s2); su = max(su, (scalar_t)0); return su / (s1 + s2 - su); } else { return (scalar_t)0; } } __global__ void nmsr_kernel(const int n_boxes, const float nms_overlap_thresh, const float* dev_boxes, unsigned long long* dev_mask) { const int row_start = blockIdx.y; const int col_start = blockIdx.x; const int row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); const int col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); __shared__ float block_boxes[threadsPerBlock * 5]; auto block_boxes_p = block_boxes + threadIdx.x * 5; auto dev_boxes_p = dev_boxes + (threadsPerBlock * col_start + threadIdx.x) * 6; if (threadIdx.x < col_size) { block_boxes_p[0] = dev_boxes_p[0]; block_boxes_p[1] = dev_boxes_p[1]; block_boxes_p[2] = dev_boxes_p[2]; block_boxes_p[3] = dev_boxes_p[3]; block_boxes_p[4] = dev_boxes_p[4]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; const float* cur_box = dev_boxes + cur_box_idx * 6; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = THCCeilDiv(n_boxes, threadsPerBlock); dev_mask[cur_box_idx * col_blocks + col_start] = t; } } // boxes is a N x 6 tensor at::Tensor nmsr_cuda(const at::Tensor boxes, float nms_overlap_thresh) { // Ensure CUDA uses the input tensor device. at::DeviceGuard guard(boxes.device()); using scalar_t = float; AT_ASSERTM(boxes.type().is_cuda(), "boxes must be a CUDA tensor"); auto scores = boxes.select(1, 5); auto order_t = std::get<1>(scores.sort(0, /* descending=*/true)); auto boxes_sorted = boxes.index_select(0, order_t); int boxes_num = boxes.size(0); const int col_blocks = THCCeilDiv(boxes_num, threadsPerBlock); scalar_t* boxes_dev = boxes_sorted.data<scalar_t>(); THCState* state = at::globalContext().lazyInitCUDA(); // TODO replace with getTHCState unsigned long long* mask_dev = NULL; //THCudaCheck(THCudaMalloc(state, (void**) &mask_dev, // boxes_num * col_blocks * sizeof(unsigned long long))); mask_dev = (unsigned long long*) THCudaMalloc(state, boxes_num * col_blocks * sizeof(unsigned long long)); dim3 blocks(THCCeilDiv(boxes_num, threadsPerBlock), THCCeilDiv(boxes_num, threadsPerBlock)); dim3 threads(threadsPerBlock); hipLaunchKernelGGL(( nmsr_kernel) , dim3(blocks), dim3(threads), 0, 0, boxes_num, nms_overlap_thresh, boxes_dev, mask_dev); std::vector<unsigned long long> mask_host(boxes_num * col_blocks); THCudaCheck(hipMemcpy(&mask_host[0], mask_dev, sizeof(unsigned long long) * boxes_num * col_blocks, hipMemcpyDeviceToHost)); std::vector<unsigned long long> remv(col_blocks); memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); at::Tensor keep = at::empty({ boxes_num }, boxes.options().dtype(at::kLong).device(at::kCPU)); int64_t* keep_out = keep.data<int64_t>(); int num_to_keep = 0; for (int i = 0; i < boxes_num; i++) { int nblock = i / threadsPerBlock; int inblock = i % threadsPerBlock; if (!(remv[nblock] & (1ULL << inblock))) { keep_out[num_to_keep++] = i; unsigned long long* p = &mask_host[0] + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv[j] |= p[j]; } } } THCudaFree(state, mask_dev); // TODO improve this part return std::get<0>(order_t.index({ keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep).to( order_t.device(), keep.scalar_type()) }).sort(0, false)); }
a7de5d57988a455a9d85da1a37a90efe53e7d76e.cu
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/DeviceGuard.h> #include <THC/THC.h> #include <THC/THCDeviceUtils.cuh> #include <vector> #include <iostream> #include <cmath> int const threadsPerBlock = sizeof(unsigned long long) * 8; template <typename scalar_t> struct Point { scalar_t x, y; __device__ Point() : x(0), y(0) {} __device__ Point(scalar_t x, scalar_t y) : x(x), y(y) {} __device__ scalar_t dot(const Point<scalar_t>& vec) const { return this->x * vec.x + this->y * vec.y; } __device__ scalar_t cross(const Point<scalar_t>& vec) const { return this->x * vec.y - vec.x * this->y; } __device__ Point<scalar_t> operator-( const Point<scalar_t>& vec) const { return Point(this->x - vec.x, this->y - vec.y); } __device__ Point<scalar_t> operator-=( const Point<scalar_t>& vec) { this->x -= vec.x; this->y -= vec.y; return *this; } __device__ Point<scalar_t> operator+( const Point<scalar_t>& vec) const { return Point(this->x + vec.x, this->y + vec.y); } __device__ Point<scalar_t> operator+=( const Point<scalar_t>& vec) { this->x += vec.x; this->y += vec.y; return *this; } __device__ bool operator<( const Point<scalar_t>& vec) const { if ((this->x == 0 && this->y == 0) && (vec.x != 0 || vec.y != 0)) return true; return this->cross(vec) > 0; } }; template <typename scalar_t> __device__ Point<scalar_t> operator*(scalar_t a, const Point<scalar_t>& p) { return Point<scalar_t>(a * p.x, a * p.y); } template <typename scalar_t> struct LinSeg { Point<scalar_t> x1, x2; __device__ LinSeg() {} __device__ LinSeg(const Point<scalar_t>& x1, const Point<scalar_t>& x2) : x1(x1), x2(x2) {} __device__ int InterSectWith(const LinSeg<scalar_t>& linseg, Point<scalar_t>* ps) { Point<scalar_t> a1 = this->x1, a2 = this->x2, b1 = linseg.x1, b2 = linseg.x2; /* intersection point A=a2-a1, B=b2-b1, C=a1-b1 [C.x] = [-A.x B.x] * [s] [C.y] [-A.y B.y] [t] */ Point<scalar_t> A = a2 - a1, B = b2 - b1, C = a1 - b1; if (C.x == 0 && C.y == 0) { ps[0] = a1; return 1; } scalar_t D = -A.cross(B); if (D != 0) { // not parallel, may intersect. scalar_t s = C.cross(B) / D; scalar_t t = -A.cross(C) / D; if (0 <= s && s < 1 && 0 <= t && t < 1) { // head vertex does not count. ps[0] = a1 + s * A; return 1; } else { return 0; } } else { // check colinearity: |A*C|=0 if (A.cross(C) != 0) { // not colinear return 0; } else { int p_cnt = 0; // colinear overlap: only tail vertices count. scalar_t BdtC = B.dot(C); // (b2-b1)*(a1-b1) scalar_t BdtB = B.dot(B); // (b2-b1)*(b2-b1) scalar_t AdtnC = -A.dot(C); // (a2-a1)*(b1-a1) scalar_t AdtA = A.dot(A); // (a2-a1)*(a2-a1) if (BdtC >= 0 && BdtC < BdtB) // a1 between b2 and b1 ps[p_cnt++] = a1; if (AdtnC >= 0 && AdtnC < AdtA) // b1 between a2 and a1 ps[p_cnt++] = b1; return p_cnt; } } } }; template <typename scalar_t> __device__ void rbbox2points(const scalar_t* const rb, Point<scalar_t>* vs) { scalar_t x = rb[0], y = rb[1], w_2 = rb[2] / 2, h_2 = rb[3] / 2, a = rb[4]; scalar_t cosa = cosf(a), sina = sinf(a); scalar_t wx = cosa * w_2, wy = sina * w_2; scalar_t hx = -sina * h_2, hy = cosa * h_2; vs[0] = Point<scalar_t>(x + wx + hx, y + wy + hy); vs[1] = Point<scalar_t>(x - wx + hx, y - wy + hy); vs[2] = Point<scalar_t>(x - wx - hx, y - wy - hy); vs[3] = Point<scalar_t>(x + wx - hx, y + wy - hy); } template <typename scalar_t> __device__ int vertex_in_rbbox(Point<scalar_t>* v1, Point<scalar_t>* v2, Point<scalar_t>* ps) { Point<scalar_t> center = (scalar_t)0.5 * (v2[0] + v2[2]); Point<scalar_t> w_vec = (scalar_t)0.5 * (v2[1] - v2[0]); Point<scalar_t> h_vec = (scalar_t)0.5 * (v2[2] - v2[1]); scalar_t h_vec_2 = h_vec.dot(h_vec); scalar_t w_vec_2 = w_vec.dot(w_vec); int p_cnt = 0; for (int i = 0; i < 4; i++) { Point<scalar_t> pr = v1[i] - center; if (std::abs(pr.dot(h_vec)) < h_vec_2 && std::abs(pr.dot(w_vec)) < w_vec_2) { ps[p_cnt++] = v1[i]; } } return p_cnt; } template <typename scalar_t> __device__ int rbbox_border_intsec(Point<scalar_t>* v1, Point<scalar_t>* v2, Point<scalar_t>* ps) { LinSeg<scalar_t> rb1[4] = { {v1[0], v1[1]}, {v1[1], v1[2]}, {v1[2], v1[3]}, {v1[3], v1[0]} }; LinSeg<scalar_t> rb2[4] = { {v2[0], v2[1]}, {v2[1], v2[2]}, {v2[2], v2[3]}, {v2[3], v2[0]} }; int p_cnt = 0; for (int i = 0; i < 4; i++) for (int j = 0; j < 4; j++) { p_cnt += rb1[i].InterSectWith(rb2[j], ps + p_cnt); } return p_cnt; } template <typename scalar_t> __device__ scalar_t area(Point<scalar_t> *vs_dirty, int p_cnt_dirty) { const scalar_t numthres = (scalar_t) 1e-2; Point<scalar_t> vs[16]; vs[0] = {0, 0}; int p_cnt = 1; // set vs[0] the reference point for (int i = 1; i < p_cnt_dirty; i++) { bool clean = true; vs_dirty[i] -= vs_dirty[0]; for (int j = 0; j < p_cnt; j++) { Point<scalar_t> diff = vs_dirty[i] - vs[j]; if (std::abs(diff.x) < numthres && std::abs(diff.y) < numthres) { clean = false; break; } } if (clean) { vs[p_cnt++] = vs_dirty[i]; } } // sort for (int i = 1; i < p_cnt; i++) { vs[0] = vs[i]; int j; for (j = i - 1; vs[0] < vs[j]; j--) vs[j + 1] = vs[j]; vs[j + 1] = vs[0]; } // calculate area scalar_t a = 0; vs[0] = {0, 0}; for (int i = 1; i < p_cnt; i++) a += vs[i].cross(vs[(i + 1) % p_cnt]); return a / 2; } template <typename scalar_t> __device__ scalar_t devIoU( const scalar_t* const rb1_p, const scalar_t* const rb2_p) { Point<scalar_t> v1[4], v2[4], u[16]; rbbox2points(rb1_p, v1); rbbox2points(rb2_p, v2); int p_cnt = 0; // add rbbox's vertices inside the other one p_cnt += vertex_in_rbbox(v1, v2, u + p_cnt); p_cnt += vertex_in_rbbox(v2, v1, u + p_cnt); // add rect border line segment intersection points p_cnt += rbbox_border_intsec(v1, v2, u + p_cnt); if (p_cnt >= 3) { scalar_t s1 = rb1_p[2] * rb1_p[3]; scalar_t s2 = rb2_p[2] * rb2_p[3]; scalar_t su = area(u, p_cnt); su = min(su, s1); su = min(su, s2); su = max(su, (scalar_t)0); return su / (s1 + s2 - su); } else { return (scalar_t)0; } } __global__ void nmsr_kernel(const int n_boxes, const float nms_overlap_thresh, const float* dev_boxes, unsigned long long* dev_mask) { const int row_start = blockIdx.y; const int col_start = blockIdx.x; const int row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); const int col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); __shared__ float block_boxes[threadsPerBlock * 5]; auto block_boxes_p = block_boxes + threadIdx.x * 5; auto dev_boxes_p = dev_boxes + (threadsPerBlock * col_start + threadIdx.x) * 6; if (threadIdx.x < col_size) { block_boxes_p[0] = dev_boxes_p[0]; block_boxes_p[1] = dev_boxes_p[1]; block_boxes_p[2] = dev_boxes_p[2]; block_boxes_p[3] = dev_boxes_p[3]; block_boxes_p[4] = dev_boxes_p[4]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; const float* cur_box = dev_boxes + cur_box_idx * 6; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = THCCeilDiv(n_boxes, threadsPerBlock); dev_mask[cur_box_idx * col_blocks + col_start] = t; } } // boxes is a N x 6 tensor at::Tensor nmsr_cuda(const at::Tensor boxes, float nms_overlap_thresh) { // Ensure CUDA uses the input tensor device. at::DeviceGuard guard(boxes.device()); using scalar_t = float; AT_ASSERTM(boxes.type().is_cuda(), "boxes must be a CUDA tensor"); auto scores = boxes.select(1, 5); auto order_t = std::get<1>(scores.sort(0, /* descending=*/true)); auto boxes_sorted = boxes.index_select(0, order_t); int boxes_num = boxes.size(0); const int col_blocks = THCCeilDiv(boxes_num, threadsPerBlock); scalar_t* boxes_dev = boxes_sorted.data<scalar_t>(); THCState* state = at::globalContext().lazyInitCUDA(); // TODO replace with getTHCState unsigned long long* mask_dev = NULL; //THCudaCheck(THCudaMalloc(state, (void**) &mask_dev, // boxes_num * col_blocks * sizeof(unsigned long long))); mask_dev = (unsigned long long*) THCudaMalloc(state, boxes_num * col_blocks * sizeof(unsigned long long)); dim3 blocks(THCCeilDiv(boxes_num, threadsPerBlock), THCCeilDiv(boxes_num, threadsPerBlock)); dim3 threads(threadsPerBlock); nmsr_kernel <<<blocks, threads>>> (boxes_num, nms_overlap_thresh, boxes_dev, mask_dev); std::vector<unsigned long long> mask_host(boxes_num * col_blocks); THCudaCheck(cudaMemcpy(&mask_host[0], mask_dev, sizeof(unsigned long long) * boxes_num * col_blocks, cudaMemcpyDeviceToHost)); std::vector<unsigned long long> remv(col_blocks); memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); at::Tensor keep = at::empty({ boxes_num }, boxes.options().dtype(at::kLong).device(at::kCPU)); int64_t* keep_out = keep.data<int64_t>(); int num_to_keep = 0; for (int i = 0; i < boxes_num; i++) { int nblock = i / threadsPerBlock; int inblock = i % threadsPerBlock; if (!(remv[nblock] & (1ULL << inblock))) { keep_out[num_to_keep++] = i; unsigned long long* p = &mask_host[0] + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv[j] |= p[j]; } } } THCudaFree(state, mask_dev); // TODO improve this part return std::get<0>(order_t.index({ keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep).to( order_t.device(), keep.scalar_type()) }).sort(0, false)); }
9173762b19f59b1e43bbb7bc5dc1761237f966d9.hip
// !!! This is a file automatically generated by hipify!!! //=================================================================// // CUDA DC kernel // Topological-Driven: one node per thread, thread_centric, // use atomicAdd instruction //=================================================================// #include <hip/hip_runtime.h> #include <stdint.h> #include <stdio.h> #include "cudaGraph.h" __global__ void initialize(uint32_t * d_graph_property, uint64_t num_vertex) { size_t tid = blockIdx.x * blockDim.x + threadIdx.x; if ( tid < num_vertex ) { d_graph_property[tid] = 0; } } __global__ void kernel(uint32_t * vplist, cudaGraph graph) { uint64_t tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= graph.vertex_cnt) return; uint64_t start, end; start = graph.get_firstedge_index(tid); end = graph.get_edge_index_end(tid); for (uint64_t i=start; i<end; i++) { uint64_t dest = graph.get_edge_dest(i); atomicAdd(&(vplist[dest]), 1); } } void cuda_degree_centr(uint64_t * vertexlist, uint64_t * edgelist, uint32_t * vproplist, uint64_t vertex_cnt, uint64_t edge_cnt) { uint32_t * device_vpl = 0; float h2d_copy_time = 0; // host to device data transfer time float d2h_copy_time = 0; // device to host data transfer time float kernel_time = 0; // kernel execution time int device; hipGetDevice(&device); hipDeviceProp_t devProp; hipGetDeviceProperties(&devProp,device); // Try to use as many threads as possible so that each thread // is processing one vertex. If max thread is reached, // split them into multiple blocks. unsigned int num_thread_per_block = (unsigned int) vertex_cnt; if (num_thread_per_block > devProp.maxThreadsPerBlock) num_thread_per_block = devProp.maxThreadsPerBlock; unsigned int num_block = (unsigned int)ceil( vertex_cnt/(double)num_thread_per_block ); // malloc of gpu side cudaErrCheck( hipMalloc((void**)&device_vpl, vertex_cnt*sizeof(uint32_t)) ); hipEvent_t start_event, stop_event; cudaErrCheck( hipEventCreate(&start_event) ); cudaErrCheck( hipEventCreate(&stop_event) ); // initialization hipLaunchKernelGGL(( initialize), dim3(num_block), dim3(num_thread_per_block), 0, 0, device_vpl, vertex_cnt); // prepare graph struct // one for host side, one for device side cudaGraph h_graph, d_graph; // here copy only the pointers h_graph.read(vertexlist, edgelist, vertex_cnt, edge_cnt); // memcpy from host to device hipEventRecord(start_event, 0); // copy graph data to device h_graph.cudaGraphCopy(&d_graph); hipEventRecord(stop_event, 0); hipEventSynchronize(stop_event); hipEventElapsedTime(&h2d_copy_time, start_event, stop_event); hipEventRecord(start_event, 0); hipLaunchKernelGGL(( kernel), dim3(num_block), dim3(num_thread_per_block), 0, 0, device_vpl, d_graph); hipEventRecord(stop_event, 0); hipEventSynchronize(stop_event); hipEventElapsedTime(&kernel_time, start_event, stop_event); hipEventRecord(start_event, 0); cudaErrCheck( hipMemcpy(vproplist, device_vpl, vertex_cnt*sizeof(uint32_t), hipMemcpyDeviceToHost) ); hipEventRecord(stop_event, 0); hipEventSynchronize(stop_event); hipEventElapsedTime(&d2h_copy_time, start_event, stop_event); #ifndef ENABLE_VERIFY printf("== host->device copy time: %f ms\n", h2d_copy_time); printf("== device->host copy time: %f ms\n", d2h_copy_time); printf("== kernel time: %f ms\n", kernel_time); #endif hipEventDestroy(start_event); hipEventDestroy(stop_event); // free graph struct on device side d_graph.cudaGraphFree(); cudaErrCheck( hipFree(device_vpl) ); }
9173762b19f59b1e43bbb7bc5dc1761237f966d9.cu
//=================================================================// // CUDA DC kernel // Topological-Driven: one node per thread, thread_centric, // use atomicAdd instruction //=================================================================// #include <cuda.h> #include <stdint.h> #include <stdio.h> #include "cudaGraph.h" __global__ void initialize(uint32_t * d_graph_property, uint64_t num_vertex) { size_t tid = blockIdx.x * blockDim.x + threadIdx.x; if ( tid < num_vertex ) { d_graph_property[tid] = 0; } } __global__ void kernel(uint32_t * vplist, cudaGraph graph) { uint64_t tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= graph.vertex_cnt) return; uint64_t start, end; start = graph.get_firstedge_index(tid); end = graph.get_edge_index_end(tid); for (uint64_t i=start; i<end; i++) { uint64_t dest = graph.get_edge_dest(i); atomicAdd(&(vplist[dest]), 1); } } void cuda_degree_centr(uint64_t * vertexlist, uint64_t * edgelist, uint32_t * vproplist, uint64_t vertex_cnt, uint64_t edge_cnt) { uint32_t * device_vpl = 0; float h2d_copy_time = 0; // host to device data transfer time float d2h_copy_time = 0; // device to host data transfer time float kernel_time = 0; // kernel execution time int device; cudaGetDevice(&device); cudaDeviceProp devProp; cudaGetDeviceProperties(&devProp,device); // Try to use as many threads as possible so that each thread // is processing one vertex. If max thread is reached, // split them into multiple blocks. unsigned int num_thread_per_block = (unsigned int) vertex_cnt; if (num_thread_per_block > devProp.maxThreadsPerBlock) num_thread_per_block = devProp.maxThreadsPerBlock; unsigned int num_block = (unsigned int)ceil( vertex_cnt/(double)num_thread_per_block ); // malloc of gpu side cudaErrCheck( cudaMalloc((void**)&device_vpl, vertex_cnt*sizeof(uint32_t)) ); cudaEvent_t start_event, stop_event; cudaErrCheck( cudaEventCreate(&start_event) ); cudaErrCheck( cudaEventCreate(&stop_event) ); // initialization initialize<<<num_block, num_thread_per_block>>>(device_vpl, vertex_cnt); // prepare graph struct // one for host side, one for device side cudaGraph h_graph, d_graph; // here copy only the pointers h_graph.read(vertexlist, edgelist, vertex_cnt, edge_cnt); // memcpy from host to device cudaEventRecord(start_event, 0); // copy graph data to device h_graph.cudaGraphCopy(&d_graph); cudaEventRecord(stop_event, 0); cudaEventSynchronize(stop_event); cudaEventElapsedTime(&h2d_copy_time, start_event, stop_event); cudaEventRecord(start_event, 0); kernel<<<num_block, num_thread_per_block>>>(device_vpl, d_graph); cudaEventRecord(stop_event, 0); cudaEventSynchronize(stop_event); cudaEventElapsedTime(&kernel_time, start_event, stop_event); cudaEventRecord(start_event, 0); cudaErrCheck( cudaMemcpy(vproplist, device_vpl, vertex_cnt*sizeof(uint32_t), cudaMemcpyDeviceToHost) ); cudaEventRecord(stop_event, 0); cudaEventSynchronize(stop_event); cudaEventElapsedTime(&d2h_copy_time, start_event, stop_event); #ifndef ENABLE_VERIFY printf("== host->device copy time: %f ms\n", h2d_copy_time); printf("== device->host copy time: %f ms\n", d2h_copy_time); printf("== kernel time: %f ms\n", kernel_time); #endif cudaEventDestroy(start_event); cudaEventDestroy(stop_event); // free graph struct on device side d_graph.cudaGraphFree(); cudaErrCheck( cudaFree(device_vpl) ); }
3842609378fb284be87ef3c4c0e4486cb0b84f07.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <time.h> #include <hip/hip_runtime.h> unsigned int getmax(unsigned int *, unsigned int); #define TPB 1024 __global__ void get_cuda_max(unsigned int* dev_num, unsigned int size){ unsigned int id = (blockDim.x * blockIdx.x) + threadIdx.x; unsigned int size_cp = size; unsigned int ten = size_cp/10; if(id < ten){ for(unsigned int i = 1; i < 10; i++){ if(dev_num[ten*i + id] > dev_num[id]) dev_num[id] = dev_num[ten*i + id]; } } } int main(int argc, char *argv[]) { unsigned int size = 0; // The size of the array unsigned int i; // loop index unsigned int * numbers; //pointer to the array if(argc !=2) { printf("usage: maxseq num\n"); printf("num = size of the array\n"); exit(1); } size = atol(argv[1]); numbers = (unsigned int *)malloc(size * sizeof(unsigned int)); if( !numbers ) { printf("Unable to allocate mem for an array of size %u\n", size); exit(1); } srand(time(NULL)); // setting a seed for the random number generator // Fill-up the array with random numbers from 0 to size-1 for( i = 0; i < size; i++) numbers[i] = rand() % size; // for( i = 0; i < size; i++) // printf("%u\n", numbers[i]); unsigned int num_blocks = (size + TPB - 1)/TPB; unsigned int* dev_num; hipMalloc((void**) &dev_num, size*sizeof(unsigned int)); hipMemcpy(dev_num, numbers, size*sizeof(unsigned int), hipMemcpyHostToDevice); unsigned int size_cp = size; while(size_cp > 1){ hipLaunchKernelGGL(( get_cuda_max), dim3(num_blocks), dim3(TPB), 0, 0, dev_num, size_cp); size_cp = size_cp/10; } hipMemcpy(numbers, dev_num, size*sizeof(unsigned int), hipMemcpyDeviceToHost); unsigned int ans = numbers[0]; hipFree(dev_num); printf(" The maximum number in the array is: %u\n", ans); printf("The max num sequentially is: %u\n", getmax(numbers, size)); free(numbers); exit(0); } /* input: pointer to an array of long int number of elements in the array output: the maximum number of the array */ unsigned int getmax(unsigned int num[], unsigned int size) { unsigned int i; unsigned int max = num[0]; for(i = 1; i < size; i++) if(num[i] > max) max = num[i]; return( max ); }
3842609378fb284be87ef3c4c0e4486cb0b84f07.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <cuda.h> unsigned int getmax(unsigned int *, unsigned int); #define TPB 1024 __global__ void get_cuda_max(unsigned int* dev_num, unsigned int size){ unsigned int id = (blockDim.x * blockIdx.x) + threadIdx.x; unsigned int size_cp = size; unsigned int ten = size_cp/10; if(id < ten){ for(unsigned int i = 1; i < 10; i++){ if(dev_num[ten*i + id] > dev_num[id]) dev_num[id] = dev_num[ten*i + id]; } } } int main(int argc, char *argv[]) { unsigned int size = 0; // The size of the array unsigned int i; // loop index unsigned int * numbers; //pointer to the array if(argc !=2) { printf("usage: maxseq num\n"); printf("num = size of the array\n"); exit(1); } size = atol(argv[1]); numbers = (unsigned int *)malloc(size * sizeof(unsigned int)); if( !numbers ) { printf("Unable to allocate mem for an array of size %u\n", size); exit(1); } srand(time(NULL)); // setting a seed for the random number generator // Fill-up the array with random numbers from 0 to size-1 for( i = 0; i < size; i++) numbers[i] = rand() % size; // for( i = 0; i < size; i++) // printf("%u\n", numbers[i]); unsigned int num_blocks = (size + TPB - 1)/TPB; unsigned int* dev_num; cudaMalloc((void**) &dev_num, size*sizeof(unsigned int)); cudaMemcpy(dev_num, numbers, size*sizeof(unsigned int), cudaMemcpyHostToDevice); unsigned int size_cp = size; while(size_cp > 1){ get_cuda_max<<<num_blocks, TPB>>>(dev_num, size_cp); size_cp = size_cp/10; } cudaMemcpy(numbers, dev_num, size*sizeof(unsigned int), cudaMemcpyDeviceToHost); unsigned int ans = numbers[0]; cudaFree(dev_num); printf(" The maximum number in the array is: %u\n", ans); printf("The max num sequentially is: %u\n", getmax(numbers, size)); free(numbers); exit(0); } /* input: pointer to an array of long int number of elements in the array output: the maximum number of the array */ unsigned int getmax(unsigned int num[], unsigned int size) { unsigned int i; unsigned int max = num[0]; for(i = 1; i < size; i++) if(num[i] > max) max = num[i]; return( max ); }
dc0f80708a9ccfba83807b79b04bab5b46f9d24e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <math.h> __global__ void s_scal(int n, float *alpha, float *vector) { int stride = blockDim.x; int start = threadIdx.x; for (int i = start; i < n; i += stride) { vector[i] = vector[i] * (*alpha); } } int main() { int N = 2000000; int threads_per_block = 512; int blocks_per_grid = 1; float alpha, *vector, *d_alpha, *d_vector; alpha = 12.0; vector = new float[N]; for (int i = 0; i < N; i += 1) { vector[i] = 1.0f; } hipMalloc(&d_alpha, sizeof(float)); hipMalloc(&d_vector, N * sizeof(float)); hipMemcpy(d_alpha, &alpha, sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_vector, vector, N * sizeof(float), hipMemcpyHostToDevice); hipLaunchKernelGGL(( s_scal), dim3(blocks_per_grid), dim3(threads_per_block), 0, 0, N, d_alpha, d_vector); hipMemcpy(vector, d_vector, N * sizeof(float), hipMemcpyDeviceToHost); float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(vector[i]-alpha)); std::cout << "Max error: " << maxError << std::endl; hipFree(d_alpha); hipFree(d_vector); free(vector); hipDeviceReset(); return 0; }
dc0f80708a9ccfba83807b79b04bab5b46f9d24e.cu
#include <iostream> #include <math.h> __global__ void s_scal(int n, float *alpha, float *vector) { int stride = blockDim.x; int start = threadIdx.x; for (int i = start; i < n; i += stride) { vector[i] = vector[i] * (*alpha); } } int main() { int N = 2000000; int threads_per_block = 512; int blocks_per_grid = 1; float alpha, *vector, *d_alpha, *d_vector; alpha = 12.0; vector = new float[N]; for (int i = 0; i < N; i += 1) { vector[i] = 1.0f; } cudaMalloc(&d_alpha, sizeof(float)); cudaMalloc(&d_vector, N * sizeof(float)); cudaMemcpy(d_alpha, &alpha, sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_vector, vector, N * sizeof(float), cudaMemcpyHostToDevice); s_scal<<<blocks_per_grid, threads_per_block>>>(N, d_alpha, d_vector); cudaMemcpy(vector, d_vector, N * sizeof(float), cudaMemcpyDeviceToHost); float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(vector[i]-alpha)); std::cout << "Max error: " << maxError << std::endl; cudaFree(d_alpha); cudaFree(d_vector); free(vector); cudaDeviceReset(); return 0; }
e2cf3459ab445ac498fe0b855e4bb3c7cbddbfb1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" #define B 2 /* */ __global__ void cudaAcc_GetPowerSpectrum_kernel( int NumDataPoints, float2* FreqData, float* PowerSpectrum) { const int i = blockIdx.x * blockDim.x + threadIdx.x; // if (i < NumDataPoints) { float ax = FreqData[i].x; float ay = FreqData[i].y; // PowerSpectrum[i] = freqData.x * freqData.x + freqData.y * freqData.y; PowerSpectrum[i] = __fadd_rn( __fmul_rn(ax,ax),__fmul_rn(ay,ay)); // } }
e2cf3459ab445ac498fe0b855e4bb3c7cbddbfb1.cu
#include "includes.h" #define B 2 /* */ __global__ void cudaAcc_GetPowerSpectrum_kernel( int NumDataPoints, float2* FreqData, float* PowerSpectrum) { const int i = blockIdx.x * blockDim.x + threadIdx.x; // if (i < NumDataPoints) { float ax = FreqData[i].x; float ay = FreqData[i].y; // PowerSpectrum[i] = freqData.x * freqData.x + freqData.y * freqData.y; PowerSpectrum[i] = __fadd_rn( __fmul_rn(ax,ax),__fmul_rn(ay,ay)); // } }
00a2bc8f3b5ede0df641a50a9be6a62ddcde3122.hip
// !!! This is a file automatically generated by hipify!!! // incrementArray.cu #include <stdio.h> #include <assert.h> #include <hip/hip_runtime.h> void incrementArrayOnHost(int *a, int N) { int i; for (i=0; i < N; i++) a[i] = a[i]+1.f; } __global__ void incrementArrayOnDevice(int *a, int N) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx<N) a[idx] = a[idx]+1.f; } int main(void) { int *a_h, *b_h; // pointers to host memory int *a_d; // pointer to device memory int i, N = 10; size_t size = N*sizeof(int); // allocate arrays on host a_h = (int *)malloc(size); b_h = (int *)malloc(size); // allocate array on device hipMalloc((void **) &a_d, size); // initialization of host data for (i=0; i<N; i++) a_h[i] = i; printf("%s\n", "These are on the host"); for(i = 0; i < N; ++i) { printf("%d\n", a_h[i]); } // copy data from host to device hipMemcpy(a_d, a_h, sizeof(int)*N, hipMemcpyHostToDevice); // do calculation on host incrementArrayOnHost(a_h, N); // do calculation on device: // Part 1 of 2. Compute execution configuration int blockSize = 4; int nBlocks = N/blockSize + (N%blockSize == 0?0:1); // Part 2 of 2. Call incrementArrayOnDevice kernel hipLaunchKernelGGL(( incrementArrayOnDevice) , dim3(nBlocks), dim3(blockSize) , 0, 0, a_d, N); // Retrieve result from device and store in b_h hipMemcpy(b_h, a_d, sizeof(int)*N, hipMemcpyDeviceToHost); // check results // for (i=0; i<N; i++) assert(a_h[i] == b_h[i]); // printf("%s\n", "These are on the host"); // for(i = 0; i < N; ++i) // { // printf("%d\n", b_h[i]); // } // cleanup free(a_h); free(b_h); hipFree(a_d); }
00a2bc8f3b5ede0df641a50a9be6a62ddcde3122.cu
// incrementArray.cu #include <stdio.h> #include <assert.h> #include <cuda.h> void incrementArrayOnHost(int *a, int N) { int i; for (i=0; i < N; i++) a[i] = a[i]+1.f; } __global__ void incrementArrayOnDevice(int *a, int N) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx<N) a[idx] = a[idx]+1.f; } int main(void) { int *a_h, *b_h; // pointers to host memory int *a_d; // pointer to device memory int i, N = 10; size_t size = N*sizeof(int); // allocate arrays on host a_h = (int *)malloc(size); b_h = (int *)malloc(size); // allocate array on device cudaMalloc((void **) &a_d, size); // initialization of host data for (i=0; i<N; i++) a_h[i] = i; printf("%s\n", "These are on the host"); for(i = 0; i < N; ++i) { printf("%d\n", a_h[i]); } // copy data from host to device cudaMemcpy(a_d, a_h, sizeof(int)*N, cudaMemcpyHostToDevice); // do calculation on host incrementArrayOnHost(a_h, N); // do calculation on device: // Part 1 of 2. Compute execution configuration int blockSize = 4; int nBlocks = N/blockSize + (N%blockSize == 0?0:1); // Part 2 of 2. Call incrementArrayOnDevice kernel incrementArrayOnDevice <<< nBlocks, blockSize >>> (a_d, N); // Retrieve result from device and store in b_h cudaMemcpy(b_h, a_d, sizeof(int)*N, cudaMemcpyDeviceToHost); // check results // for (i=0; i<N; i++) assert(a_h[i] == b_h[i]); // printf("%s\n", "These are on the host"); // for(i = 0; i < N; ++i) // { // printf("%d\n", b_h[i]); // } // cleanup free(a_h); free(b_h); cudaFree(a_d); }
97c225af0e1060bc49311d5d43f9c3a1e2b4d0bb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/device/cuda_util.h" #include "oneflow/core/framework/framework.h" #include "oneflow/core/cuda/atomic.cuh" #include <float.h> namespace oneflow { namespace { // NOTE(Liang Depeng): refer to // https://stackoverflow.com/questions/17371275/implementing-max-reduce-in-cuda template<typename T> __global__ void ReduceMaxMinPerLayer(const T *input_ptr, const int64_t elements, T *max_ptr, T *min_ptr) { extern __shared__ unsigned char shared_max_min_memory[]; T *shared_max = reinterpret_cast<T *>(shared_max_min_memory); T *shared_min = shared_max + blockDim.x; int64_t tid = threadIdx.x; int64_t gid = (blockDim.x * blockIdx.x) + tid; shared_max[tid] = -FLT_MAX; shared_min[tid] = -FLT_MAX; while (gid < elements) { shared_max[tid] = max(shared_max[tid], input_ptr[gid]); shared_min[tid] = max(shared_min[tid], -input_ptr[gid]); gid += gridDim.x * blockDim.x; } __syncthreads(); gid = (blockDim.x * blockIdx.x) + tid; for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s && gid < elements) { shared_max[tid] = max(shared_max[tid], shared_max[tid + s]); shared_min[tid] = max(shared_min[tid], shared_min[tid + s]); } __syncthreads(); } if (tid == 0) { cuda::atomic::Max(max_ptr, shared_max[0]); cuda::atomic::Max(min_ptr, shared_min[0]); } } template<typename T> __global__ void InitMaxMin(const int64_t elements, T *max_ptr, T *min_ptr) { int64_t tid = threadIdx.x; int64_t gid = (blockDim.x * blockIdx.x) + tid; while (gid < elements) { max_ptr[gid] = -FLT_MAX; min_ptr[gid] = -FLT_MAX; gid += gridDim.x * blockDim.x; } } template<typename T> __global__ void CalScaleZeroPointSymmetric(const int64_t elements, const double quantization_bit, const float momentum, const T *max_ptr, const T *min_ptr, T *moving_max_ptr, T *moving_min_ptr, T *scale, T *zero_point) { int64_t tid = threadIdx.x; int64_t gid = (blockDim.x * blockIdx.x) + tid; while (gid < elements) { T activation_max = max(fabs(max_ptr[gid]), fabs(min_ptr[gid])); T denominator = static_cast<T>(pow(2.0, quantization_bit - 1)) - 1; if (moving_max_ptr[gid] == 0) moving_max_ptr[gid] = activation_max; else moving_max_ptr[gid] = moving_max_ptr[gid] * momentum + activation_max * (1 - momentum); // NOTE(Liang Depeng): symmetric quantization only use moving_max to calculate the scale moving_min_ptr[gid] = moving_max_ptr[gid]; scale[gid] = moving_max_ptr[gid] / denominator; zero_point[gid] = 0; gid += gridDim.x * blockDim.x; } } template<typename T> __global__ void CalFreezeScaleZeroPointSymmetric(const int64_t elements, const double quantization_bit, const float momentum, const T *moving_max_ptr, T *scale, T *zero_point) { int64_t tid = threadIdx.x; int64_t gid = (blockDim.x * blockIdx.x) + tid; while (gid < elements) { T denominator = static_cast<T>(pow(2.0, quantization_bit - 1)) - 1; scale[gid] = moving_max_ptr[gid] / denominator; zero_point[gid] = 0; gid += gridDim.x * blockDim.x; } } template<typename T> __global__ void CalScaleZeroPointAffine(const int64_t elements, const double quantization_bit, const float momentum, const T *max_ptr, const T *min_ptr, T *moving_max_ptr, T *moving_min_ptr, T *scale, T *zero_point) { int64_t tid = threadIdx.x; int64_t gid = (blockDim.x * blockIdx.x) + tid; while (gid < elements) { T denominator = static_cast<T>(pow(2.0, quantization_bit)) - 1; if (moving_max_ptr[gid] == 0) moving_max_ptr[gid] = max_ptr[gid]; else moving_max_ptr[gid] = moving_max_ptr[gid] * momentum + max_ptr[gid] * (1 - momentum); if (moving_min_ptr[gid] == 0) moving_min_ptr[gid] = -min_ptr[gid]; else moving_min_ptr[gid] = moving_min_ptr[gid] * momentum + -min_ptr[gid] * (1 - momentum); T min = moving_min_ptr[gid]; T s = (moving_max_ptr[gid] - min) / denominator; scale[gid] = s; zero_point[gid] = -min / s; gid += gridDim.x * blockDim.x; } } template<typename T> __global__ void CalFreezeScaleZeroPointAffine(const int64_t elements, const double quantization_bit, const float momentum, const T *moving_max_ptr, const T *moving_min_ptr, T *scale, T *zero_point) { int64_t tid = threadIdx.x; int64_t gid = (blockDim.x * blockIdx.x) + tid; while (gid < elements) { T denominator = static_cast<T>(pow(2.0, quantization_bit)) - 1; T min = moving_min_ptr[gid]; T s = (moving_max_ptr[gid] - min) / denominator; scale[gid] = s; zero_point[gid] = -min / s; gid += gridDim.x * blockDim.x; } } template<typename T> __global__ void CalScaleZeroPointCambricon(const int64_t elements, const double quantization_bit, const float momentum, const T *max_ptr, const T *min_ptr, T *moving_max_ptr, T *moving_min_ptr, T *scale, T *zero_point) { int64_t tid = threadIdx.x; int64_t gid = (blockDim.x * blockIdx.x) + tid; while (gid < elements) { T activation_max = max(fabs(max_ptr[gid]), fabs(min_ptr[gid])); if (moving_max_ptr[gid] == 0) moving_max_ptr[gid] = activation_max; else moving_max_ptr[gid] = moving_max_ptr[gid] * momentum + activation_max * (1 - momentum); // NOTE(Liang Depeng): cambricon quantization only use moving_max to calculate the scale moving_min_ptr[gid] = moving_max_ptr[gid]; scale[gid] = floor(log2(moving_max_ptr[gid])) - (quantization_bit - 2); zero_point[gid] = 0; gid += gridDim.x * blockDim.x; } } template<typename T> __global__ void CalFreezeScaleZeroPointCambricon(const int64_t elements, const double quantization_bit, const float momentum, const T *moving_max_ptr, T *scale, T *zero_point) { int64_t tid = threadIdx.x; int64_t gid = (blockDim.x * blockIdx.x) + tid; while (gid < elements) { T denominator = static_cast<T>(pow(2.0, quantization_bit - 1)) - 1; scale[gid] = floor(log2(moving_max_ptr[gid])) - (quantization_bit - 2); zero_point[gid] = 0; gid += gridDim.x * blockDim.x; } } } // namespace #define LAUNCH_CUDA_KERNEL(func, device_ctx_ptr, thread_num, shared_mem_size, ...) \ hipLaunchKernelGGL(( func), dim3(SMBlocksNum4ThreadsNum(thread_num)), dim3(kCudaThreadsNumPerBlock), shared_mem_size, \ (device_ctx_ptr)->cuda_stream(), __VA_ARGS__) template<typename T> class GpuMovingAverageMinMaxObserverKernel final : public user_op::OpKernel { public: GpuMovingAverageMinMaxObserverKernel() = default; ~GpuMovingAverageMinMaxObserverKernel() = default; private: void Compute(user_op::KernelComputeContext *ctx) const override { const user_op::Tensor *in = ctx->Tensor4ArgNameAndIndex("in", 0); const user_op::Tensor *current_train_step = ctx->Tensor4ArgNameAndIndex("current_train_step", 0); user_op::Tensor *moving_max = ctx->Tensor4ArgNameAndIndex("moving_max", 0); user_op::Tensor *moving_min = ctx->Tensor4ArgNameAndIndex("moving_min", 0); user_op::Tensor *scale = ctx->Tensor4ArgNameAndIndex("scale", 0); user_op::Tensor *zero_point = ctx->Tensor4ArgNameAndIndex("zero_point", 0); user_op::Tensor *tmp_buffer = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0); const bool is_training = ctx->Attr<bool>("training"); const int64_t stop_update_after_iters = ctx->Attr<int64_t>("stop_update_after_iters"); const std::string quantization_scheme = ctx->Attr<std::string>("quantization_scheme"); const int32_t quantization_bit = ctx->Attr<int32_t>("quantization_bit"); const float momentum = ctx->Attr<float>("momentum"); const std::string quantization_formula = ctx->Attr<std::string>("quantization_formula"); int64_t elements = in->shape().elem_cnt(); T *max_ptr = tmp_buffer->mut_dptr<T>(); T *min_ptr = max_ptr + 1; int64_t *host_current_train_step_ptr = new int64_t[current_train_step->shape().elem_cnt()]; OF_CUDA_CHECK(hipMemcpy(host_current_train_step_ptr, current_train_step->dptr<int64_t>(), current_train_step->shape().elem_cnt() * sizeof(int64_t), hipMemcpyDefault)); if (*host_current_train_step_ptr <= stop_update_after_iters && is_training) { LAUNCH_CUDA_KERNEL((InitMaxMin<T>), ctx->device_ctx(), 1, 0, 1, max_ptr, min_ptr); LAUNCH_CUDA_KERNEL((ReduceMaxMinPerLayer<T>), ctx->device_ctx(), elements, kCudaThreadsNumPerBlock * 2 * sizeof(T), in->dptr<T>(), elements, max_ptr, min_ptr); } if (quantization_formula == "google") { if (quantization_scheme == "symmetric") { if (*host_current_train_step_ptr <= stop_update_after_iters) { LAUNCH_CUDA_KERNEL((CalScaleZeroPointSymmetric<T>), ctx->device_ctx(), 1, 0, 1, static_cast<double>(quantization_bit), momentum, max_ptr, min_ptr, moving_max->mut_dptr<T>(), moving_min->mut_dptr<T>(), scale->mut_dptr<T>(), zero_point->mut_dptr<T>()); } else { LAUNCH_CUDA_KERNEL((CalFreezeScaleZeroPointSymmetric<T>), ctx->device_ctx(), 1, 0, 1, static_cast<double>(quantization_bit), momentum, moving_max->dptr<T>(), scale->mut_dptr<T>(), zero_point->mut_dptr<T>()); } } else { // quantization_scheme == "affine" if (*host_current_train_step_ptr <= stop_update_after_iters) { LAUNCH_CUDA_KERNEL((CalScaleZeroPointAffine<T>), ctx->device_ctx(), 1, 0, 1, static_cast<double>(quantization_bit), momentum, max_ptr, min_ptr, moving_max->mut_dptr<T>(), moving_min->mut_dptr<T>(), scale->mut_dptr<T>(), zero_point->mut_dptr<T>()); } else { LAUNCH_CUDA_KERNEL((CalFreezeScaleZeroPointAffine<T>), ctx->device_ctx(), 1, 0, 1, static_cast<double>(quantization_bit), momentum, moving_max->dptr<T>(), moving_min->dptr<T>(), scale->mut_dptr<T>(), zero_point->mut_dptr<T>()); } } } else if (quantization_formula == "cambricon") { if (*host_current_train_step_ptr <= stop_update_after_iters) { LAUNCH_CUDA_KERNEL((CalScaleZeroPointCambricon<T>), ctx->device_ctx(), 1, 0, 1, static_cast<double>(quantization_bit), momentum, max_ptr, min_ptr, moving_max->mut_dptr<T>(), moving_min->mut_dptr<T>(), scale->mut_dptr<T>(), zero_point->mut_dptr<T>()); } else { LAUNCH_CUDA_KERNEL((CalFreezeScaleZeroPointCambricon<T>), ctx->device_ctx(), 1, 0, 1, static_cast<double>(quantization_bit), momentum, moving_max->dptr<T>(), scale->mut_dptr<T>(), zero_point->mut_dptr<T>()); } } else { UNIMPLEMENTED(); } delete[] host_current_train_step_ptr; } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; #define REGISTER_MOVING_AVERAGE_MIN_MAX_OBSERVER_KERNEL(dtype) \ REGISTER_USER_KERNEL("moving_average_min_max_observer") \ .SetCreateFn<GpuMovingAverageMinMaxObserverKernel<dtype>>() \ .SetIsMatchedHob((user_op::HobDeviceTag() == DeviceType::kGPU) \ & (user_op::HobDataType("in", 0) == GetDataType<dtype>::value)) \ .SetInferTmpSizeFn([](user_op::InferContext *ctx) -> size_t { return 2 * sizeof(dtype); }) REGISTER_MOVING_AVERAGE_MIN_MAX_OBSERVER_KERNEL(float); REGISTER_MOVING_AVERAGE_MIN_MAX_OBSERVER_KERNEL(double); } // namespace oneflow
97c225af0e1060bc49311d5d43f9c3a1e2b4d0bb.cu
/* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/device/cuda_util.h" #include "oneflow/core/framework/framework.h" #include "oneflow/core/cuda/atomic.cuh" #include <float.h> namespace oneflow { namespace { // NOTE(Liang Depeng): refer to // https://stackoverflow.com/questions/17371275/implementing-max-reduce-in-cuda template<typename T> __global__ void ReduceMaxMinPerLayer(const T *input_ptr, const int64_t elements, T *max_ptr, T *min_ptr) { extern __shared__ unsigned char shared_max_min_memory[]; T *shared_max = reinterpret_cast<T *>(shared_max_min_memory); T *shared_min = shared_max + blockDim.x; int64_t tid = threadIdx.x; int64_t gid = (blockDim.x * blockIdx.x) + tid; shared_max[tid] = -FLT_MAX; shared_min[tid] = -FLT_MAX; while (gid < elements) { shared_max[tid] = max(shared_max[tid], input_ptr[gid]); shared_min[tid] = max(shared_min[tid], -input_ptr[gid]); gid += gridDim.x * blockDim.x; } __syncthreads(); gid = (blockDim.x * blockIdx.x) + tid; for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s && gid < elements) { shared_max[tid] = max(shared_max[tid], shared_max[tid + s]); shared_min[tid] = max(shared_min[tid], shared_min[tid + s]); } __syncthreads(); } if (tid == 0) { cuda::atomic::Max(max_ptr, shared_max[0]); cuda::atomic::Max(min_ptr, shared_min[0]); } } template<typename T> __global__ void InitMaxMin(const int64_t elements, T *max_ptr, T *min_ptr) { int64_t tid = threadIdx.x; int64_t gid = (blockDim.x * blockIdx.x) + tid; while (gid < elements) { max_ptr[gid] = -FLT_MAX; min_ptr[gid] = -FLT_MAX; gid += gridDim.x * blockDim.x; } } template<typename T> __global__ void CalScaleZeroPointSymmetric(const int64_t elements, const double quantization_bit, const float momentum, const T *max_ptr, const T *min_ptr, T *moving_max_ptr, T *moving_min_ptr, T *scale, T *zero_point) { int64_t tid = threadIdx.x; int64_t gid = (blockDim.x * blockIdx.x) + tid; while (gid < elements) { T activation_max = max(fabs(max_ptr[gid]), fabs(min_ptr[gid])); T denominator = static_cast<T>(pow(2.0, quantization_bit - 1)) - 1; if (moving_max_ptr[gid] == 0) moving_max_ptr[gid] = activation_max; else moving_max_ptr[gid] = moving_max_ptr[gid] * momentum + activation_max * (1 - momentum); // NOTE(Liang Depeng): symmetric quantization only use moving_max to calculate the scale moving_min_ptr[gid] = moving_max_ptr[gid]; scale[gid] = moving_max_ptr[gid] / denominator; zero_point[gid] = 0; gid += gridDim.x * blockDim.x; } } template<typename T> __global__ void CalFreezeScaleZeroPointSymmetric(const int64_t elements, const double quantization_bit, const float momentum, const T *moving_max_ptr, T *scale, T *zero_point) { int64_t tid = threadIdx.x; int64_t gid = (blockDim.x * blockIdx.x) + tid; while (gid < elements) { T denominator = static_cast<T>(pow(2.0, quantization_bit - 1)) - 1; scale[gid] = moving_max_ptr[gid] / denominator; zero_point[gid] = 0; gid += gridDim.x * blockDim.x; } } template<typename T> __global__ void CalScaleZeroPointAffine(const int64_t elements, const double quantization_bit, const float momentum, const T *max_ptr, const T *min_ptr, T *moving_max_ptr, T *moving_min_ptr, T *scale, T *zero_point) { int64_t tid = threadIdx.x; int64_t gid = (blockDim.x * blockIdx.x) + tid; while (gid < elements) { T denominator = static_cast<T>(pow(2.0, quantization_bit)) - 1; if (moving_max_ptr[gid] == 0) moving_max_ptr[gid] = max_ptr[gid]; else moving_max_ptr[gid] = moving_max_ptr[gid] * momentum + max_ptr[gid] * (1 - momentum); if (moving_min_ptr[gid] == 0) moving_min_ptr[gid] = -min_ptr[gid]; else moving_min_ptr[gid] = moving_min_ptr[gid] * momentum + -min_ptr[gid] * (1 - momentum); T min = moving_min_ptr[gid]; T s = (moving_max_ptr[gid] - min) / denominator; scale[gid] = s; zero_point[gid] = -min / s; gid += gridDim.x * blockDim.x; } } template<typename T> __global__ void CalFreezeScaleZeroPointAffine(const int64_t elements, const double quantization_bit, const float momentum, const T *moving_max_ptr, const T *moving_min_ptr, T *scale, T *zero_point) { int64_t tid = threadIdx.x; int64_t gid = (blockDim.x * blockIdx.x) + tid; while (gid < elements) { T denominator = static_cast<T>(pow(2.0, quantization_bit)) - 1; T min = moving_min_ptr[gid]; T s = (moving_max_ptr[gid] - min) / denominator; scale[gid] = s; zero_point[gid] = -min / s; gid += gridDim.x * blockDim.x; } } template<typename T> __global__ void CalScaleZeroPointCambricon(const int64_t elements, const double quantization_bit, const float momentum, const T *max_ptr, const T *min_ptr, T *moving_max_ptr, T *moving_min_ptr, T *scale, T *zero_point) { int64_t tid = threadIdx.x; int64_t gid = (blockDim.x * blockIdx.x) + tid; while (gid < elements) { T activation_max = max(fabs(max_ptr[gid]), fabs(min_ptr[gid])); if (moving_max_ptr[gid] == 0) moving_max_ptr[gid] = activation_max; else moving_max_ptr[gid] = moving_max_ptr[gid] * momentum + activation_max * (1 - momentum); // NOTE(Liang Depeng): cambricon quantization only use moving_max to calculate the scale moving_min_ptr[gid] = moving_max_ptr[gid]; scale[gid] = floor(log2(moving_max_ptr[gid])) - (quantization_bit - 2); zero_point[gid] = 0; gid += gridDim.x * blockDim.x; } } template<typename T> __global__ void CalFreezeScaleZeroPointCambricon(const int64_t elements, const double quantization_bit, const float momentum, const T *moving_max_ptr, T *scale, T *zero_point) { int64_t tid = threadIdx.x; int64_t gid = (blockDim.x * blockIdx.x) + tid; while (gid < elements) { T denominator = static_cast<T>(pow(2.0, quantization_bit - 1)) - 1; scale[gid] = floor(log2(moving_max_ptr[gid])) - (quantization_bit - 2); zero_point[gid] = 0; gid += gridDim.x * blockDim.x; } } } // namespace #define LAUNCH_CUDA_KERNEL(func, device_ctx_ptr, thread_num, shared_mem_size, ...) \ func<<<SMBlocksNum4ThreadsNum(thread_num), kCudaThreadsNumPerBlock, shared_mem_size, \ (device_ctx_ptr)->cuda_stream()>>>(__VA_ARGS__) template<typename T> class GpuMovingAverageMinMaxObserverKernel final : public user_op::OpKernel { public: GpuMovingAverageMinMaxObserverKernel() = default; ~GpuMovingAverageMinMaxObserverKernel() = default; private: void Compute(user_op::KernelComputeContext *ctx) const override { const user_op::Tensor *in = ctx->Tensor4ArgNameAndIndex("in", 0); const user_op::Tensor *current_train_step = ctx->Tensor4ArgNameAndIndex("current_train_step", 0); user_op::Tensor *moving_max = ctx->Tensor4ArgNameAndIndex("moving_max", 0); user_op::Tensor *moving_min = ctx->Tensor4ArgNameAndIndex("moving_min", 0); user_op::Tensor *scale = ctx->Tensor4ArgNameAndIndex("scale", 0); user_op::Tensor *zero_point = ctx->Tensor4ArgNameAndIndex("zero_point", 0); user_op::Tensor *tmp_buffer = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0); const bool is_training = ctx->Attr<bool>("training"); const int64_t stop_update_after_iters = ctx->Attr<int64_t>("stop_update_after_iters"); const std::string quantization_scheme = ctx->Attr<std::string>("quantization_scheme"); const int32_t quantization_bit = ctx->Attr<int32_t>("quantization_bit"); const float momentum = ctx->Attr<float>("momentum"); const std::string quantization_formula = ctx->Attr<std::string>("quantization_formula"); int64_t elements = in->shape().elem_cnt(); T *max_ptr = tmp_buffer->mut_dptr<T>(); T *min_ptr = max_ptr + 1; int64_t *host_current_train_step_ptr = new int64_t[current_train_step->shape().elem_cnt()]; OF_CUDA_CHECK(cudaMemcpy(host_current_train_step_ptr, current_train_step->dptr<int64_t>(), current_train_step->shape().elem_cnt() * sizeof(int64_t), cudaMemcpyDefault)); if (*host_current_train_step_ptr <= stop_update_after_iters && is_training) { LAUNCH_CUDA_KERNEL((InitMaxMin<T>), ctx->device_ctx(), 1, 0, 1, max_ptr, min_ptr); LAUNCH_CUDA_KERNEL((ReduceMaxMinPerLayer<T>), ctx->device_ctx(), elements, kCudaThreadsNumPerBlock * 2 * sizeof(T), in->dptr<T>(), elements, max_ptr, min_ptr); } if (quantization_formula == "google") { if (quantization_scheme == "symmetric") { if (*host_current_train_step_ptr <= stop_update_after_iters) { LAUNCH_CUDA_KERNEL((CalScaleZeroPointSymmetric<T>), ctx->device_ctx(), 1, 0, 1, static_cast<double>(quantization_bit), momentum, max_ptr, min_ptr, moving_max->mut_dptr<T>(), moving_min->mut_dptr<T>(), scale->mut_dptr<T>(), zero_point->mut_dptr<T>()); } else { LAUNCH_CUDA_KERNEL((CalFreezeScaleZeroPointSymmetric<T>), ctx->device_ctx(), 1, 0, 1, static_cast<double>(quantization_bit), momentum, moving_max->dptr<T>(), scale->mut_dptr<T>(), zero_point->mut_dptr<T>()); } } else { // quantization_scheme == "affine" if (*host_current_train_step_ptr <= stop_update_after_iters) { LAUNCH_CUDA_KERNEL((CalScaleZeroPointAffine<T>), ctx->device_ctx(), 1, 0, 1, static_cast<double>(quantization_bit), momentum, max_ptr, min_ptr, moving_max->mut_dptr<T>(), moving_min->mut_dptr<T>(), scale->mut_dptr<T>(), zero_point->mut_dptr<T>()); } else { LAUNCH_CUDA_KERNEL((CalFreezeScaleZeroPointAffine<T>), ctx->device_ctx(), 1, 0, 1, static_cast<double>(quantization_bit), momentum, moving_max->dptr<T>(), moving_min->dptr<T>(), scale->mut_dptr<T>(), zero_point->mut_dptr<T>()); } } } else if (quantization_formula == "cambricon") { if (*host_current_train_step_ptr <= stop_update_after_iters) { LAUNCH_CUDA_KERNEL((CalScaleZeroPointCambricon<T>), ctx->device_ctx(), 1, 0, 1, static_cast<double>(quantization_bit), momentum, max_ptr, min_ptr, moving_max->mut_dptr<T>(), moving_min->mut_dptr<T>(), scale->mut_dptr<T>(), zero_point->mut_dptr<T>()); } else { LAUNCH_CUDA_KERNEL((CalFreezeScaleZeroPointCambricon<T>), ctx->device_ctx(), 1, 0, 1, static_cast<double>(quantization_bit), momentum, moving_max->dptr<T>(), scale->mut_dptr<T>(), zero_point->mut_dptr<T>()); } } else { UNIMPLEMENTED(); } delete[] host_current_train_step_ptr; } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; #define REGISTER_MOVING_AVERAGE_MIN_MAX_OBSERVER_KERNEL(dtype) \ REGISTER_USER_KERNEL("moving_average_min_max_observer") \ .SetCreateFn<GpuMovingAverageMinMaxObserverKernel<dtype>>() \ .SetIsMatchedHob((user_op::HobDeviceTag() == DeviceType::kGPU) \ & (user_op::HobDataType("in", 0) == GetDataType<dtype>::value)) \ .SetInferTmpSizeFn([](user_op::InferContext *ctx) -> size_t { return 2 * sizeof(dtype); }) REGISTER_MOVING_AVERAGE_MIN_MAX_OBSERVER_KERNEL(float); REGISTER_MOVING_AVERAGE_MIN_MAX_OBSERVER_KERNEL(double); } // namespace oneflow
30a30a8e0ea6c6729f8c82a647dddf0e05bca924.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "cuConvertC3ToC4Kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const float3 *src = NULL; hipMalloc(&src, XSIZE*YSIZE); size_t src_stride = 2; float4 *dst = NULL; hipMalloc(&dst, XSIZE*YSIZE); size_t dst_stride = 2; int width = XSIZE; int height = YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( cuConvertC3ToC4Kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, src,src_stride,dst,dst_stride,width,height); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( cuConvertC3ToC4Kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, src,src_stride,dst,dst_stride,width,height); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( cuConvertC3ToC4Kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, src,src_stride,dst,dst_stride,width,height); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
30a30a8e0ea6c6729f8c82a647dddf0e05bca924.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "cuConvertC3ToC4Kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const float3 *src = NULL; cudaMalloc(&src, XSIZE*YSIZE); size_t src_stride = 2; float4 *dst = NULL; cudaMalloc(&dst, XSIZE*YSIZE); size_t dst_stride = 2; int width = XSIZE; int height = YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); cuConvertC3ToC4Kernel<<<gridBlock,threadBlock>>>(src,src_stride,dst,dst_stride,width,height); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { cuConvertC3ToC4Kernel<<<gridBlock,threadBlock>>>(src,src_stride,dst,dst_stride,width,height); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { cuConvertC3ToC4Kernel<<<gridBlock,threadBlock>>>(src,src_stride,dst,dst_stride,width,height); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
545a4d66fe8bb80a8f8cb352fe1ea19855c05b97.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // #include <hip/hip_vector_types.h> #include <optix_device.h> #include "optixWhitted.h" #include "helpers.h" extern "C" { __constant__ Params params; } static __device__ __inline__ RadiancePRD getRadiancePRD() { RadiancePRD prd; prd.result.x = int_as_float( optixGetPayload_0() ); prd.result.y = int_as_float( optixGetPayload_1() ); prd.result.z = int_as_float( optixGetPayload_2() ); prd.importance = int_as_float( optixGetPayload_3() ); prd.depth = optixGetPayload_4(); return prd; } static __device__ __inline__ void setRadiancePRD( const RadiancePRD &prd ) { optixSetPayload_0( float_as_int(prd.result.x) ); optixSetPayload_1( float_as_int(prd.result.y) ); optixSetPayload_2( float_as_int(prd.result.z) ); optixSetPayload_3( float_as_int(prd.importance) ); optixSetPayload_4( prd.depth ); } static __device__ __inline__ OcclusionPRD getOcclusionPRD() { OcclusionPRD prd; prd.attenuation.x = int_as_float( optixGetPayload_0() ); prd.attenuation.y = int_as_float( optixGetPayload_1() ); prd.attenuation.z = int_as_float( optixGetPayload_2() ); return prd; } static __device__ __inline__ void setOcclusionPRD( const OcclusionPRD &prd ) { optixSetPayload_0( float_as_int(prd.attenuation.x) ); optixSetPayload_1( float_as_int(prd.attenuation.y) ); optixSetPayload_2( float_as_int(prd.attenuation.z) ); } static __device__ __inline__ float3 traceRadianceRay( float3 origin, float3 direction, int depth, float importance) { RadiancePRD prd; prd.depth = depth; prd.importance = importance; optixTrace( params.handle, origin, direction, params.scene_epsilon, 1e16f, 0.0f, OptixVisibilityMask( 1 ), OPTIX_RAY_FLAG_NONE, RAY_TYPE_RADIANCE, RAY_TYPE_COUNT, RAY_TYPE_RADIANCE, float3_as_args(prd.result), /* Can't use float_as_int() because it returns rvalue but payload requires a lvalue */ reinterpret_cast<unsigned int&>(prd.importance), reinterpret_cast<unsigned int&>(prd.depth) ); return prd.result; } static __device__ void phongShadowed() { // this material is opaque, so it fully attenuates all shadow rays OcclusionPRD prd; prd.attenuation = make_float3(0.f); setOcclusionPRD(prd); } static __device__ void phongShade( float3 p_Kd, float3 p_Ka, float3 p_Ks, float3 p_Kr, float p_phong_exp, float3 p_normal ) { const float3 ray_orig = optixGetWorldRayOrigin(); const float3 ray_dir = optixGetWorldRayDirection(); const float ray_t = optixGetRayTmax(); RadiancePRD prd = getRadiancePRD(); float3 hit_point = ray_orig + ray_t * ray_dir; // ambient contribution float3 result = p_Ka * params.ambient_light_color; // compute direct lighting BasicLight light = params.light; float Ldist = length(light.pos - hit_point); float3 L = normalize(light.pos - hit_point); float nDl = dot( p_normal, L); // cast shadow ray float3 light_attenuation = make_float3(static_cast<float>( nDl > 0.0f )); if ( nDl > 0.0f ) { OcclusionPRD shadow_prd; shadow_prd.attenuation = make_float3(1.0f); optixTrace( params.handle, hit_point, L, 0.01f, Ldist, 0.0f, OptixVisibilityMask( 1 ), OPTIX_RAY_FLAG_NONE, RAY_TYPE_OCCLUSION, RAY_TYPE_COUNT, RAY_TYPE_OCCLUSION, float3_as_args(shadow_prd.attenuation) ); light_attenuation = shadow_prd.attenuation; } // If not completely shadowed, light the hit point if( fmaxf(light_attenuation) > 0.0f ) { float3 Lc = light.color * light_attenuation; result += p_Kd * nDl * Lc; float3 H = normalize(L - ray_dir); float nDh = dot( p_normal, H ); if(nDh > 0) { float power = pow(nDh, p_phong_exp); result += p_Ks * power * Lc; } } if( fmaxf( p_Kr ) > 0 ) { // ray tree attenuation float new_importance = prd.importance * luminance( p_Kr ); int new_depth = prd.depth + 1; // reflection ray // compare new_depth to max_depth - 1 to leave room for a potential shadow ray trace if( new_importance >= 0.01f && new_depth <= params.max_depth - 1) { float3 R = reflect( ray_dir, p_normal ); result += p_Kr * traceRadianceRay( hit_point, R, new_depth, new_importance); } } // pass the color back prd.result = result; setRadiancePRD(prd); } extern "C" __global__ void __closesthit__checker_radiance() { const HitGroupData* sbt_data = (HitGroupData*) optixGetSbtDataPointer(); const CheckerPhong &checker = sbt_data->shading.checker; float3 Kd, Ka, Ks, Kr; float phong_exp; float2 texcoord = make_float2( int_as_float( optixGetAttribute_3() ), int_as_float( optixGetAttribute_4() ) ); float2 t = texcoord * checker.inv_checker_size; t.x = floorf(t.x); t.y = floorf(t.y); int which_check = ( static_cast<int>( t.x ) + static_cast<int>( t.y ) ) & 1; if ( which_check ) { Kd = checker.Kd1; Ka = checker.Ka1; Ks = checker.Ks1; Kr = checker.Kr1; phong_exp = checker.phong_exp1; } else { Kd = checker.Kd2; Ka = checker.Ka2; Ks = checker.Ks2; Kr = checker.Kr2; phong_exp = checker.phong_exp2; } float3 object_normal = make_float3( int_as_float( optixGetAttribute_0() ), int_as_float( optixGetAttribute_1() ), int_as_float( optixGetAttribute_2() )); float3 world_normal = normalize( optixTransformNormalFromObjectToWorldSpace(object_normal) ); float3 ffnormal = faceforward( world_normal, -optixGetWorldRayDirection(), world_normal ); phongShade( Kd, Ka, Ks, Kr, phong_exp, ffnormal ); } extern "C" __global__ void __closesthit__metal_radiance() { const HitGroupData* sbt_data = (HitGroupData*) optixGetSbtDataPointer(); const Phong &phong = sbt_data->shading.metal; float3 object_normal = make_float3( int_as_float( optixGetAttribute_0() ), int_as_float( optixGetAttribute_1() ), int_as_float( optixGetAttribute_2() )); float3 world_normal = normalize( optixTransformNormalFromObjectToWorldSpace( object_normal ) ); float3 ffnormal = faceforward( world_normal, -optixGetWorldRayDirection(), world_normal ); phongShade( phong.Kd, phong.Ka, phong.Ks, phong.Kr, phong.phong_exp, ffnormal ); } extern "C" __global__ void __closesthit__full_occlusion() { phongShadowed(); } extern "C" __global__ void __closesthit__glass_radiance() { const HitGroupData* sbt_data = (HitGroupData*) optixGetSbtDataPointer(); const Glass &glass = sbt_data->shading.glass; RadiancePRD prd_radiance = getRadiancePRD(); float3 object_normal = make_float3( int_as_float( optixGetAttribute_0() ), int_as_float( optixGetAttribute_1() ), int_as_float( optixGetAttribute_2() )); object_normal = normalize( object_normal ); // intersection vectors const float3 n = normalize( optixTransformNormalFromObjectToWorldSpace( object_normal) ); // normal const float3 ray_orig = optixGetWorldRayOrigin(); const float3 ray_dir = optixGetWorldRayDirection(); // incident direction const float ray_t = optixGetRayTmax(); float3 t; // transmission direction float3 r; // reflection direction float3 hit_point = ray_orig + ray_t * ray_dir; SphereShellHitType hit_type = (SphereShellHitType) optixGetHitKind(); float3 front_hit_point = hit_point, back_hit_point = hit_point; if (hit_type & HIT_OUTSIDE_FROM_OUTSIDE || hit_type & HIT_INSIDE_FROM_INSIDE) { front_hit_point += params.scene_epsilon * object_normal; back_hit_point -= params.scene_epsilon * object_normal; } else { front_hit_point -= params.scene_epsilon * object_normal; back_hit_point += params.scene_epsilon * object_normal; } const float3 fhp = optixTransformPointFromObjectToWorldSpace( front_hit_point ); const float3 bhp = optixTransformPointFromObjectToWorldSpace( back_hit_point ); float reflection = 1.0f; float3 result = make_float3(0.0f); const int depth = prd_radiance.depth; float3 beer_attenuation; if(dot(n, ray_dir) > 0) { // Beer's law attenuation beer_attenuation = exp(glass.extinction_constant * ray_t); } else { beer_attenuation = make_float3(1); } // refraction // compare depth to max_depth - 1 to leave room for a potential shadow ray trace if (depth < min(glass.refraction_maxdepth, params.max_depth - 1)) { if ( refract(t, ray_dir, n, glass.refraction_index) ) { // check for external or internal reflection float cos_theta = dot(ray_dir, n); if (cos_theta < 0.0f) cos_theta = -cos_theta; else cos_theta = dot(t, n); reflection = fresnel_schlick( cos_theta, glass.fresnel_exponent, glass.fresnel_minimum, glass.fresnel_maximum); float importance = prd_radiance.importance * (1.0f-reflection) * luminance( glass.refraction_color * beer_attenuation ); float3 color = glass.cutoff_color; if ( importance > glass.importance_cutoff ) { color = traceRadianceRay(bhp, t, depth+1, importance); } result += (1.0f - reflection) * glass.refraction_color * color; } // else TIR } // else reflection==1 so refraction has 0 weight // reflection // compare depth to max_depth - 1 to leave room for a potential shadow ray trace float3 color = glass.cutoff_color; if (depth < min(glass.reflection_maxdepth, params.max_depth - 1)) { r = reflect(ray_dir, n); float importance = prd_radiance.importance * reflection * luminance( glass.reflection_color * beer_attenuation ); if ( importance > glass.importance_cutoff ) { color = traceRadianceRay( fhp, r, depth+1, importance ); } } result += reflection * glass.reflection_color * color; result = result * beer_attenuation; prd_radiance.result = result; setRadiancePRD(prd_radiance); } extern "C" __global__ void __anyhit__glass_occlusion() { const HitGroupData* sbt_data = (HitGroupData*) optixGetSbtDataPointer(); const Glass &glass = sbt_data->shading.glass; float3 object_normal = make_float3( int_as_float( optixGetAttribute_0() ), int_as_float( optixGetAttribute_1() ), int_as_float( optixGetAttribute_2() )); OcclusionPRD shadow_prd = getOcclusionPRD(); float3 world_normal = normalize( optixTransformNormalFromObjectToWorldSpace( object_normal ) ); float nDi = fabs(dot(world_normal, optixGetWorldRayDirection())); shadow_prd.attenuation *= 1-fresnel_schlick(nDi, 5, 1-glass.shadow_attenuation, make_float3(1)); setOcclusionPRD(shadow_prd); // Test the attenuation of the light from the glass shell if(luminance(shadow_prd.attenuation) < glass.importance_cutoff) // The attenuation is so high, > 99% blocked, that we can consider testing to be done. optixTerminateRay(); else // There is still some light coming through the glass shell that we should test other occluders. // We "ignore" the intersection with the glass shell, meaning that shadow testing will continue. // If the ray does not hit another occluder, the light's attenuation from this glass shell // (along with other glass shells) is then used. optixIgnoreIntersection(); } extern "C" __global__ void __miss__constant_bg() { const MissData* sbt_data = (MissData*) optixGetSbtDataPointer(); RadiancePRD prd = getRadiancePRD(); prd.result = sbt_data->bg_color; setRadiancePRD(prd); }
545a4d66fe8bb80a8f8cb352fe1ea19855c05b97.cu
// // Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // #include <vector_types.h> #include <optix_device.h> #include "optixWhitted.h" #include "helpers.h" extern "C" { __constant__ Params params; } static __device__ __inline__ RadiancePRD getRadiancePRD() { RadiancePRD prd; prd.result.x = int_as_float( optixGetPayload_0() ); prd.result.y = int_as_float( optixGetPayload_1() ); prd.result.z = int_as_float( optixGetPayload_2() ); prd.importance = int_as_float( optixGetPayload_3() ); prd.depth = optixGetPayload_4(); return prd; } static __device__ __inline__ void setRadiancePRD( const RadiancePRD &prd ) { optixSetPayload_0( float_as_int(prd.result.x) ); optixSetPayload_1( float_as_int(prd.result.y) ); optixSetPayload_2( float_as_int(prd.result.z) ); optixSetPayload_3( float_as_int(prd.importance) ); optixSetPayload_4( prd.depth ); } static __device__ __inline__ OcclusionPRD getOcclusionPRD() { OcclusionPRD prd; prd.attenuation.x = int_as_float( optixGetPayload_0() ); prd.attenuation.y = int_as_float( optixGetPayload_1() ); prd.attenuation.z = int_as_float( optixGetPayload_2() ); return prd; } static __device__ __inline__ void setOcclusionPRD( const OcclusionPRD &prd ) { optixSetPayload_0( float_as_int(prd.attenuation.x) ); optixSetPayload_1( float_as_int(prd.attenuation.y) ); optixSetPayload_2( float_as_int(prd.attenuation.z) ); } static __device__ __inline__ float3 traceRadianceRay( float3 origin, float3 direction, int depth, float importance) { RadiancePRD prd; prd.depth = depth; prd.importance = importance; optixTrace( params.handle, origin, direction, params.scene_epsilon, 1e16f, 0.0f, OptixVisibilityMask( 1 ), OPTIX_RAY_FLAG_NONE, RAY_TYPE_RADIANCE, RAY_TYPE_COUNT, RAY_TYPE_RADIANCE, float3_as_args(prd.result), /* Can't use float_as_int() because it returns rvalue but payload requires a lvalue */ reinterpret_cast<unsigned int&>(prd.importance), reinterpret_cast<unsigned int&>(prd.depth) ); return prd.result; } static __device__ void phongShadowed() { // this material is opaque, so it fully attenuates all shadow rays OcclusionPRD prd; prd.attenuation = make_float3(0.f); setOcclusionPRD(prd); } static __device__ void phongShade( float3 p_Kd, float3 p_Ka, float3 p_Ks, float3 p_Kr, float p_phong_exp, float3 p_normal ) { const float3 ray_orig = optixGetWorldRayOrigin(); const float3 ray_dir = optixGetWorldRayDirection(); const float ray_t = optixGetRayTmax(); RadiancePRD prd = getRadiancePRD(); float3 hit_point = ray_orig + ray_t * ray_dir; // ambient contribution float3 result = p_Ka * params.ambient_light_color; // compute direct lighting BasicLight light = params.light; float Ldist = length(light.pos - hit_point); float3 L = normalize(light.pos - hit_point); float nDl = dot( p_normal, L); // cast shadow ray float3 light_attenuation = make_float3(static_cast<float>( nDl > 0.0f )); if ( nDl > 0.0f ) { OcclusionPRD shadow_prd; shadow_prd.attenuation = make_float3(1.0f); optixTrace( params.handle, hit_point, L, 0.01f, Ldist, 0.0f, OptixVisibilityMask( 1 ), OPTIX_RAY_FLAG_NONE, RAY_TYPE_OCCLUSION, RAY_TYPE_COUNT, RAY_TYPE_OCCLUSION, float3_as_args(shadow_prd.attenuation) ); light_attenuation = shadow_prd.attenuation; } // If not completely shadowed, light the hit point if( fmaxf(light_attenuation) > 0.0f ) { float3 Lc = light.color * light_attenuation; result += p_Kd * nDl * Lc; float3 H = normalize(L - ray_dir); float nDh = dot( p_normal, H ); if(nDh > 0) { float power = pow(nDh, p_phong_exp); result += p_Ks * power * Lc; } } if( fmaxf( p_Kr ) > 0 ) { // ray tree attenuation float new_importance = prd.importance * luminance( p_Kr ); int new_depth = prd.depth + 1; // reflection ray // compare new_depth to max_depth - 1 to leave room for a potential shadow ray trace if( new_importance >= 0.01f && new_depth <= params.max_depth - 1) { float3 R = reflect( ray_dir, p_normal ); result += p_Kr * traceRadianceRay( hit_point, R, new_depth, new_importance); } } // pass the color back prd.result = result; setRadiancePRD(prd); } extern "C" __global__ void __closesthit__checker_radiance() { const HitGroupData* sbt_data = (HitGroupData*) optixGetSbtDataPointer(); const CheckerPhong &checker = sbt_data->shading.checker; float3 Kd, Ka, Ks, Kr; float phong_exp; float2 texcoord = make_float2( int_as_float( optixGetAttribute_3() ), int_as_float( optixGetAttribute_4() ) ); float2 t = texcoord * checker.inv_checker_size; t.x = floorf(t.x); t.y = floorf(t.y); int which_check = ( static_cast<int>( t.x ) + static_cast<int>( t.y ) ) & 1; if ( which_check ) { Kd = checker.Kd1; Ka = checker.Ka1; Ks = checker.Ks1; Kr = checker.Kr1; phong_exp = checker.phong_exp1; } else { Kd = checker.Kd2; Ka = checker.Ka2; Ks = checker.Ks2; Kr = checker.Kr2; phong_exp = checker.phong_exp2; } float3 object_normal = make_float3( int_as_float( optixGetAttribute_0() ), int_as_float( optixGetAttribute_1() ), int_as_float( optixGetAttribute_2() )); float3 world_normal = normalize( optixTransformNormalFromObjectToWorldSpace(object_normal) ); float3 ffnormal = faceforward( world_normal, -optixGetWorldRayDirection(), world_normal ); phongShade( Kd, Ka, Ks, Kr, phong_exp, ffnormal ); } extern "C" __global__ void __closesthit__metal_radiance() { const HitGroupData* sbt_data = (HitGroupData*) optixGetSbtDataPointer(); const Phong &phong = sbt_data->shading.metal; float3 object_normal = make_float3( int_as_float( optixGetAttribute_0() ), int_as_float( optixGetAttribute_1() ), int_as_float( optixGetAttribute_2() )); float3 world_normal = normalize( optixTransformNormalFromObjectToWorldSpace( object_normal ) ); float3 ffnormal = faceforward( world_normal, -optixGetWorldRayDirection(), world_normal ); phongShade( phong.Kd, phong.Ka, phong.Ks, phong.Kr, phong.phong_exp, ffnormal ); } extern "C" __global__ void __closesthit__full_occlusion() { phongShadowed(); } extern "C" __global__ void __closesthit__glass_radiance() { const HitGroupData* sbt_data = (HitGroupData*) optixGetSbtDataPointer(); const Glass &glass = sbt_data->shading.glass; RadiancePRD prd_radiance = getRadiancePRD(); float3 object_normal = make_float3( int_as_float( optixGetAttribute_0() ), int_as_float( optixGetAttribute_1() ), int_as_float( optixGetAttribute_2() )); object_normal = normalize( object_normal ); // intersection vectors const float3 n = normalize( optixTransformNormalFromObjectToWorldSpace( object_normal) ); // normal const float3 ray_orig = optixGetWorldRayOrigin(); const float3 ray_dir = optixGetWorldRayDirection(); // incident direction const float ray_t = optixGetRayTmax(); float3 t; // transmission direction float3 r; // reflection direction float3 hit_point = ray_orig + ray_t * ray_dir; SphereShellHitType hit_type = (SphereShellHitType) optixGetHitKind(); float3 front_hit_point = hit_point, back_hit_point = hit_point; if (hit_type & HIT_OUTSIDE_FROM_OUTSIDE || hit_type & HIT_INSIDE_FROM_INSIDE) { front_hit_point += params.scene_epsilon * object_normal; back_hit_point -= params.scene_epsilon * object_normal; } else { front_hit_point -= params.scene_epsilon * object_normal; back_hit_point += params.scene_epsilon * object_normal; } const float3 fhp = optixTransformPointFromObjectToWorldSpace( front_hit_point ); const float3 bhp = optixTransformPointFromObjectToWorldSpace( back_hit_point ); float reflection = 1.0f; float3 result = make_float3(0.0f); const int depth = prd_radiance.depth; float3 beer_attenuation; if(dot(n, ray_dir) > 0) { // Beer's law attenuation beer_attenuation = exp(glass.extinction_constant * ray_t); } else { beer_attenuation = make_float3(1); } // refraction // compare depth to max_depth - 1 to leave room for a potential shadow ray trace if (depth < min(glass.refraction_maxdepth, params.max_depth - 1)) { if ( refract(t, ray_dir, n, glass.refraction_index) ) { // check for external or internal reflection float cos_theta = dot(ray_dir, n); if (cos_theta < 0.0f) cos_theta = -cos_theta; else cos_theta = dot(t, n); reflection = fresnel_schlick( cos_theta, glass.fresnel_exponent, glass.fresnel_minimum, glass.fresnel_maximum); float importance = prd_radiance.importance * (1.0f-reflection) * luminance( glass.refraction_color * beer_attenuation ); float3 color = glass.cutoff_color; if ( importance > glass.importance_cutoff ) { color = traceRadianceRay(bhp, t, depth+1, importance); } result += (1.0f - reflection) * glass.refraction_color * color; } // else TIR } // else reflection==1 so refraction has 0 weight // reflection // compare depth to max_depth - 1 to leave room for a potential shadow ray trace float3 color = glass.cutoff_color; if (depth < min(glass.reflection_maxdepth, params.max_depth - 1)) { r = reflect(ray_dir, n); float importance = prd_radiance.importance * reflection * luminance( glass.reflection_color * beer_attenuation ); if ( importance > glass.importance_cutoff ) { color = traceRadianceRay( fhp, r, depth+1, importance ); } } result += reflection * glass.reflection_color * color; result = result * beer_attenuation; prd_radiance.result = result; setRadiancePRD(prd_radiance); } extern "C" __global__ void __anyhit__glass_occlusion() { const HitGroupData* sbt_data = (HitGroupData*) optixGetSbtDataPointer(); const Glass &glass = sbt_data->shading.glass; float3 object_normal = make_float3( int_as_float( optixGetAttribute_0() ), int_as_float( optixGetAttribute_1() ), int_as_float( optixGetAttribute_2() )); OcclusionPRD shadow_prd = getOcclusionPRD(); float3 world_normal = normalize( optixTransformNormalFromObjectToWorldSpace( object_normal ) ); float nDi = fabs(dot(world_normal, optixGetWorldRayDirection())); shadow_prd.attenuation *= 1-fresnel_schlick(nDi, 5, 1-glass.shadow_attenuation, make_float3(1)); setOcclusionPRD(shadow_prd); // Test the attenuation of the light from the glass shell if(luminance(shadow_prd.attenuation) < glass.importance_cutoff) // The attenuation is so high, > 99% blocked, that we can consider testing to be done. optixTerminateRay(); else // There is still some light coming through the glass shell that we should test other occluders. // We "ignore" the intersection with the glass shell, meaning that shadow testing will continue. // If the ray does not hit another occluder, the light's attenuation from this glass shell // (along with other glass shells) is then used. optixIgnoreIntersection(); } extern "C" __global__ void __miss__constant_bg() { const MissData* sbt_data = (MissData*) optixGetSbtDataPointer(); RadiancePRD prd = getRadiancePRD(); prd.result = sbt_data->bg_color; setRadiancePRD(prd); }
8ccbea056d7af9a45607502b77fad5d01ea9f644.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <fstream> #include <iomanip> #include <iostream> #include <cstdio> hipEvent_t start, stop; float elapsedTime = 0.0; constexpr int N = 1000; constexpr int BlkNum = 100; __global__ void dot(double *a, double *b, double *c) { int i = blockIdx.x * blockDim.x + threadIdx.x; c[i] = 0; for (int k = 0; k < N; k++) c[i] += a[i * N + k] * b[k]; } int main() { std::ios::sync_with_stdio(false); std::ifstream in("in.txt"); if (!in) { std::cerr << "Err: input\n"; return -2; } auto a = new double[N * N], b = new double[N], res = new double[N]; for (int i = 0; i < N; i++) for (int j = 0; j < N; j++) in >> a[i * N + j]; for (int i = 0; i < N; i++) in >> b[i]; in.close(); double *a1, *b1, *res1; hipMalloc(&a1, sizeof(double) * N * N); hipMemcpy(a1, a, sizeof(double) * N * N, hipMemcpyHostToDevice); hipMalloc(&b1, sizeof(double) * N); hipMemcpy(b1, b, sizeof(double) * N, hipMemcpyHostToDevice); hipMalloc(&res1, sizeof(double) * N); hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); hipLaunchKernelGGL(( dot), dim3(BlkNum), dim3(N / BlkNum), 0, 0, a1, b1, res1); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); hipEventDestroy(start); hipEventDestroy(stop); hipMemcpy(res, res1, sizeof(double) * N, hipMemcpyDeviceToHost); std::cout << "Running Time: " << elapsedTime << "s" << std::endl; std::ofstream out("out.txt"); if (!out) { std::cerr << "Err: output\n"; return -1; } for (int i = 0; i < N; i++) out << std::setprecision(15) << res[i] << "\n"; out.close(); free(a); free(b); free(res); hipFree(a1); hipFree(b1); hipFree(res1); return 0; }
8ccbea056d7af9a45607502b77fad5d01ea9f644.cu
#include <fstream> #include <iomanip> #include <iostream> #include <cstdio> cudaEvent_t start, stop; float elapsedTime = 0.0; constexpr int N = 1000; constexpr int BlkNum = 100; __global__ void dot(double *a, double *b, double *c) { int i = blockIdx.x * blockDim.x + threadIdx.x; c[i] = 0; for (int k = 0; k < N; k++) c[i] += a[i * N + k] * b[k]; } int main() { std::ios::sync_with_stdio(false); std::ifstream in("in.txt"); if (!in) { std::cerr << "Err: input\n"; return -2; } auto a = new double[N * N], b = new double[N], res = new double[N]; for (int i = 0; i < N; i++) for (int j = 0; j < N; j++) in >> a[i * N + j]; for (int i = 0; i < N; i++) in >> b[i]; in.close(); double *a1, *b1, *res1; cudaMalloc(&a1, sizeof(double) * N * N); cudaMemcpy(a1, a, sizeof(double) * N * N, cudaMemcpyHostToDevice); cudaMalloc(&b1, sizeof(double) * N); cudaMemcpy(b1, b, sizeof(double) * N, cudaMemcpyHostToDevice); cudaMalloc(&res1, sizeof(double) * N); cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); dot<<<BlkNum, N / BlkNum>>>(a1, b1, res1); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); cudaMemcpy(res, res1, sizeof(double) * N, cudaMemcpyDeviceToHost); std::cout << "Running Time: " << elapsedTime << "s" << std::endl; std::ofstream out("out.txt"); if (!out) { std::cerr << "Err: output\n"; return -1; } for (int i = 0; i < N; i++) out << std::setprecision(15) << res[i] << "\n"; out.close(); free(a); free(b); free(res); cudaFree(a1); cudaFree(b1); cudaFree(res1); return 0; }
d38b6c9cbdc19a56775d4c58e6d1016bf092144e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <string.h> #include <stdlib.h> #include <math.h> #include <stdint.h> #include <stdio.h> #include <unistd.h> // ********************************************** // For floats vector on device // ********************************************** typedef struct { float x; float y; float z; } Vec; __device__ Vec vec_add(Vec v1, Vec v2) { Vec res; res.x = v1.x + v2.x; res.y = v1.y + v2.y; res.z = v1.z + v2.z; return res; } __device__ Vec vec_sub(Vec v1, Vec v2) { Vec res; res.x = v1.x - v2.x; res.y = v1.y - v2.y; res.z = v1.z - v2.z; return res; } __device__ Vec vec_scale(Vec v, float i) { Vec res; res.x = i * v.x; res.y = i * v.y; res.z = i * v.z; return res; } __device__ float random_float(uint64_t * seed){ const uint64_t m = 9223372036854775808ULL; const uint64_t a = 2806196910506780709ULL; const uint64_t c = 1ULL; *seed = (a * (*seed) + c)%m; float res = (float) (*seed)/(float)m; return res; } __device__ uint64_t forward(uint64_t seed, uint64_t n){ const uint64_t m = 9223372036854775808ULL; uint64_t a = 2806196910506780709ULL; uint64_t c = 1ULL; n = n % m; uint64_t a_new = 1; uint64_t c_new = 0; while(n>0){ if(n & 1){ a_new *= a; c_new = c_new *a + c; } c *= (a + 1); a *= a; n >>= 1; } return (a_new * seed + c_new) % m; } __device__ Vec vec_sample_unit(uint64_t i) { Vec res; uint64_t seed = forward(i, i*200); float pho = random_float(&seed) * 2 * 3.141592653; float cos_theta = random_float(&seed) * 2 - 1; float sin_theta = sqrt( 1 - cos_theta * cos_theta); res.x = sin_theta * cos(pho); res.y = sin_theta * sin(pho); res.z = cos_theta; return res; } __device__ float vec_dot_product(Vec v1, Vec v2) { float res; res = v1.x * v2.x + v1.y * v2.y + v1.z * v2.z; return res; } __device__ float vec_norm(Vec v) { float res; res = sqrt(v.x * v.x + v.y * v.y + v.z * v.z); return res; } // ********************************************** // ray_trace_kernel // ********************************************** inline void cuchk(hipError_t err){ if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } } __global__ void ray_trace_kernel(float* d_window, int width, int ray_num) { //tried to use the shared mem, but it's too small to store the whole window 1000*1000*4bytes = 4 mb //issue the threads to do the work __shared__ int ray_chunck; __shared__ Vec C; __shared__ Vec L; __shared__ float w_y; __shared__ float w_max; __shared__ float R; int tx = blockIdx.x * gridDim.x + threadIdx.x; ray_chunck = ray_num/gridDim.x; C.x = 0.0; C.y = 12.0; C.z = 0.0; L.x = 4.0; L.y = 4.0; L.z = -1; w_y = 10; w_max = 10; R = 6.0; for (int i = 0; i < ray_chunck; i+=blockDim.x*gridDim.x){ Vec V, W; uint64_t seed = tx; while(1){ seed += ray_num; V = vec_sample_unit(seed); if (V.y == 0) continue; W = vec_scale(V, (w_y/V.y)); float temp = vec_dot_product(V, C); if (fabs(W.x) < w_max && fabs(W.z) < w_max && temp * temp + R * R - vec_dot_product(C,C) > 0) break; } float temp2 = vec_dot_product(V,C); float t = temp2 - sqrt(temp2 * temp2 + R * R - vec_dot_product(C,C)); Vec II = vec_scale(V, t); Vec N = vec_scale(vec_sub(II,C), 1.0/vec_norm(vec_sub(II,C))); Vec S = vec_scale(vec_sub(L,II), 1.0/vec_norm(vec_sub(L,II))); float b = vec_dot_product(S,N); b = 0 >= b ? 0 : b; int x = floor((W.x + w_max) /(2 * w_max) * (width - 1)); int z = floor((W.z + w_max) /(2 * w_max) * (width - 1)); atomicAdd(&d_window[x*width + z], b); } } // ********************************************** // get the args // ********************************************** void get_input(int argc, char *argv[], int* num_rays, int* len, int *grid_dim, int *block_dim){ *num_rays = 1000000; *len = 1000; *grid_dim = -1; *block_dim = 256; int opt; while((opt = getopt(argc, argv, "r:l:g:b:")) != -1) { switch (opt) { case 'r': *num_rays = atoi(optarg); break; case 'l': *len = atoi(optarg); break; case 'g': *grid_dim = atoi(optarg); break; case 'b': *block_dim = atoi(optarg); break; default:break; } } if (*grid_dim == -1) { *grid_dim = (*num_rays + *block_dim-1) / *block_dim; } } int main(int argc, char* argv[]) { int num_rays; int len; int grid_dim; int block_dim; get_input(argc, argv, &num_rays, &len, &grid_dim, &block_dim); size_t size = len * len * sizeof(float); float* d_window; hipMalloc((void **) &d_window, size); hipMemset((void *) d_window, 0.0, size); hipEvent_t start ,end; hipEventCreate(&start); hipEventCreate(&end); hipEventRecord(start, 0); hipLaunchKernelGGL(( ray_trace_kernel), dim3(grid_dim),dim3(block_dim), 0, 0, d_window, len, num_rays); hipEventRecord(end, 0); float time; hipEventSynchronize(end); hipEventElapsedTime(&time, start, end); time = time / 1000; printf("ray_num\tgrid_dim\tblock_dim\ttime\n"); printf("%d\t%d\t%d\t%f\n", num_rays, grid_dim, block_dim, time); float* window = (float*) malloc(size); hipMemcpy(window, d_window, size, hipMemcpyDeviceToHost); char filename[] = "ball.dat"; FILE *f = fopen(filename, "wb"); if (f != NULL) { fwrite(window, sizeof(float), len * len, f); fclose(f); } else { fprintf(stderr, "Error opening %s: ", filename); perror(""); free(window); cuchk(hipFree(d_window)); exit(EXIT_FAILURE); } free(window); cuchk(hipFree(d_window)); return 0; }
d38b6c9cbdc19a56775d4c58e6d1016bf092144e.cu
#include <string.h> #include <stdlib.h> #include <math.h> #include <stdint.h> #include <stdio.h> #include <unistd.h> // ********************************************** // For floats vector on device // ********************************************** typedef struct { float x; float y; float z; } Vec; __device__ Vec vec_add(Vec v1, Vec v2) { Vec res; res.x = v1.x + v2.x; res.y = v1.y + v2.y; res.z = v1.z + v2.z; return res; } __device__ Vec vec_sub(Vec v1, Vec v2) { Vec res; res.x = v1.x - v2.x; res.y = v1.y - v2.y; res.z = v1.z - v2.z; return res; } __device__ Vec vec_scale(Vec v, float i) { Vec res; res.x = i * v.x; res.y = i * v.y; res.z = i * v.z; return res; } __device__ float random_float(uint64_t * seed){ const uint64_t m = 9223372036854775808ULL; const uint64_t a = 2806196910506780709ULL; const uint64_t c = 1ULL; *seed = (a * (*seed) + c)%m; float res = (float) (*seed)/(float)m; return res; } __device__ uint64_t forward(uint64_t seed, uint64_t n){ const uint64_t m = 9223372036854775808ULL; uint64_t a = 2806196910506780709ULL; uint64_t c = 1ULL; n = n % m; uint64_t a_new = 1; uint64_t c_new = 0; while(n>0){ if(n & 1){ a_new *= a; c_new = c_new *a + c; } c *= (a + 1); a *= a; n >>= 1; } return (a_new * seed + c_new) % m; } __device__ Vec vec_sample_unit(uint64_t i) { Vec res; uint64_t seed = forward(i, i*200); float pho = random_float(&seed) * 2 * 3.141592653; float cos_theta = random_float(&seed) * 2 - 1; float sin_theta = sqrt( 1 - cos_theta * cos_theta); res.x = sin_theta * cos(pho); res.y = sin_theta * sin(pho); res.z = cos_theta; return res; } __device__ float vec_dot_product(Vec v1, Vec v2) { float res; res = v1.x * v2.x + v1.y * v2.y + v1.z * v2.z; return res; } __device__ float vec_norm(Vec v) { float res; res = sqrt(v.x * v.x + v.y * v.y + v.z * v.z); return res; } // ********************************************** // ray_trace_kernel // ********************************************** inline void cuchk(cudaError_t err){ if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } } __global__ void ray_trace_kernel(float* d_window, int width, int ray_num) { //tried to use the shared mem, but it's too small to store the whole window 1000*1000*4bytes = 4 mb //issue the threads to do the work __shared__ int ray_chunck; __shared__ Vec C; __shared__ Vec L; __shared__ float w_y; __shared__ float w_max; __shared__ float R; int tx = blockIdx.x * gridDim.x + threadIdx.x; ray_chunck = ray_num/gridDim.x; C.x = 0.0; C.y = 12.0; C.z = 0.0; L.x = 4.0; L.y = 4.0; L.z = -1; w_y = 10; w_max = 10; R = 6.0; for (int i = 0; i < ray_chunck; i+=blockDim.x*gridDim.x){ Vec V, W; uint64_t seed = tx; while(1){ seed += ray_num; V = vec_sample_unit(seed); if (V.y == 0) continue; W = vec_scale(V, (w_y/V.y)); float temp = vec_dot_product(V, C); if (fabs(W.x) < w_max && fabs(W.z) < w_max && temp * temp + R * R - vec_dot_product(C,C) > 0) break; } float temp2 = vec_dot_product(V,C); float t = temp2 - sqrt(temp2 * temp2 + R * R - vec_dot_product(C,C)); Vec II = vec_scale(V, t); Vec N = vec_scale(vec_sub(II,C), 1.0/vec_norm(vec_sub(II,C))); Vec S = vec_scale(vec_sub(L,II), 1.0/vec_norm(vec_sub(L,II))); float b = vec_dot_product(S,N); b = 0 >= b ? 0 : b; int x = floor((W.x + w_max) /(2 * w_max) * (width - 1)); int z = floor((W.z + w_max) /(2 * w_max) * (width - 1)); atomicAdd(&d_window[x*width + z], b); } } // ********************************************** // get the args // ********************************************** void get_input(int argc, char *argv[], int* num_rays, int* len, int *grid_dim, int *block_dim){ *num_rays = 1000000; *len = 1000; *grid_dim = -1; *block_dim = 256; int opt; while((opt = getopt(argc, argv, "r:l:g:b:")) != -1) { switch (opt) { case 'r': *num_rays = atoi(optarg); break; case 'l': *len = atoi(optarg); break; case 'g': *grid_dim = atoi(optarg); break; case 'b': *block_dim = atoi(optarg); break; default:break; } } if (*grid_dim == -1) { *grid_dim = (*num_rays + *block_dim-1) / *block_dim; } } int main(int argc, char* argv[]) { int num_rays; int len; int grid_dim; int block_dim; get_input(argc, argv, &num_rays, &len, &grid_dim, &block_dim); size_t size = len * len * sizeof(float); float* d_window; cudaMalloc((void **) &d_window, size); cudaMemset((void *) d_window, 0.0, size); cudaEvent_t start ,end; cudaEventCreate(&start); cudaEventCreate(&end); cudaEventRecord(start, 0); ray_trace_kernel<<<grid_dim,block_dim>>>(d_window, len, num_rays); cudaEventRecord(end, 0); float time; cudaEventSynchronize(end); cudaEventElapsedTime(&time, start, end); time = time / 1000; printf("ray_num\tgrid_dim\tblock_dim\ttime\n"); printf("%d\t%d\t%d\t%f\n", num_rays, grid_dim, block_dim, time); float* window = (float*) malloc(size); cudaMemcpy(window, d_window, size, cudaMemcpyDeviceToHost); char filename[] = "ball.dat"; FILE *f = fopen(filename, "wb"); if (f != NULL) { fwrite(window, sizeof(float), len * len, f); fclose(f); } else { fprintf(stderr, "Error opening %s: ", filename); perror(""); free(window); cuchk(cudaFree(d_window)); exit(EXIT_FAILURE); } free(window); cuchk(cudaFree(d_window)); return 0; }
7e9bbed11e687be2e8f3ab742124c32ce3dd6cd1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by op2.py // //user function __device__ void updateUR_gpu( double *u, double *r, const double *p, double *v, const double *alpha) { *u += (*alpha) * (*p); *r -= (*alpha) * (*v); *v = 0.0f; } // CUDA kernel function __global__ void op_cuda_updateUR( double *arg0, double *arg1, const double *__restrict arg2, double *arg3, const double *arg4, int set_size ) { //process set elements for ( int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x ){ //user-supplied kernel call updateUR_gpu(arg0+n*1, arg1+n*1, arg2+n*1, arg3+n*1, arg4); } } //host stub function void op_par_loop_updateUR(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2, op_arg arg3, op_arg arg4){ double*arg4h = (double *)arg4.data; int nargs = 5; op_arg args[5]; args[0] = arg0; args[1] = arg1; args[2] = arg2; args[3] = arg3; args[4] = arg4; // initialise timers double cpu_t1, cpu_t2, wall_t1, wall_t2; op_timing_realloc(5); op_timers_core(&cpu_t1, &wall_t1); OP_kernels[5].name = name; OP_kernels[5].count += 1; if (OP_diags>2) { printf(" kernel routine w/o indirection: updateUR"); } op_mpi_halo_exchanges_cuda(set, nargs, args); if (set->size > 0) { //transfer constants to GPU int consts_bytes = 0; consts_bytes += ROUND_UP(1*sizeof(double)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg4.data = OP_consts_h + consts_bytes; arg4.data_d = OP_consts_d + consts_bytes; for ( int d=0; d<1; d++ ){ ((double *)arg4.data)[d] = arg4h[d]; } consts_bytes += ROUND_UP(1*sizeof(double)); mvConstArraysToDevice(consts_bytes); //set CUDA execution parameters #ifdef OP_BLOCK_SIZE_5 int nthread = OP_BLOCK_SIZE_5; #else int nthread = OP_block_size; // int nthread = 128; #endif int nblocks = 200; hipLaunchKernelGGL(( op_cuda_updateUR), dim3(nblocks),dim3(nthread), 0, 0, (double *) arg0.data_d, (double *) arg1.data_d, (double *) arg2.data_d, (double *) arg3.data_d, (double *) arg4.data_d, set->size ); } op_mpi_set_dirtybit_cuda(nargs, args); cutilSafeCall(hipDeviceSynchronize()); //update kernel record op_timers_core(&cpu_t2, &wall_t2); OP_kernels[5].time += wall_t2 - wall_t1; OP_kernels[5].transfer += (float)set->size * arg0.size * 2.0f; OP_kernels[5].transfer += (float)set->size * arg1.size * 2.0f; OP_kernels[5].transfer += (float)set->size * arg2.size; OP_kernels[5].transfer += (float)set->size * arg3.size * 2.0f; }
7e9bbed11e687be2e8f3ab742124c32ce3dd6cd1.cu
// // auto-generated by op2.py // //user function __device__ void updateUR_gpu( double *u, double *r, const double *p, double *v, const double *alpha) { *u += (*alpha) * (*p); *r -= (*alpha) * (*v); *v = 0.0f; } // CUDA kernel function __global__ void op_cuda_updateUR( double *arg0, double *arg1, const double *__restrict arg2, double *arg3, const double *arg4, int set_size ) { //process set elements for ( int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x ){ //user-supplied kernel call updateUR_gpu(arg0+n*1, arg1+n*1, arg2+n*1, arg3+n*1, arg4); } } //host stub function void op_par_loop_updateUR(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2, op_arg arg3, op_arg arg4){ double*arg4h = (double *)arg4.data; int nargs = 5; op_arg args[5]; args[0] = arg0; args[1] = arg1; args[2] = arg2; args[3] = arg3; args[4] = arg4; // initialise timers double cpu_t1, cpu_t2, wall_t1, wall_t2; op_timing_realloc(5); op_timers_core(&cpu_t1, &wall_t1); OP_kernels[5].name = name; OP_kernels[5].count += 1; if (OP_diags>2) { printf(" kernel routine w/o indirection: updateUR"); } op_mpi_halo_exchanges_cuda(set, nargs, args); if (set->size > 0) { //transfer constants to GPU int consts_bytes = 0; consts_bytes += ROUND_UP(1*sizeof(double)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg4.data = OP_consts_h + consts_bytes; arg4.data_d = OP_consts_d + consts_bytes; for ( int d=0; d<1; d++ ){ ((double *)arg4.data)[d] = arg4h[d]; } consts_bytes += ROUND_UP(1*sizeof(double)); mvConstArraysToDevice(consts_bytes); //set CUDA execution parameters #ifdef OP_BLOCK_SIZE_5 int nthread = OP_BLOCK_SIZE_5; #else int nthread = OP_block_size; // int nthread = 128; #endif int nblocks = 200; op_cuda_updateUR<<<nblocks,nthread>>>( (double *) arg0.data_d, (double *) arg1.data_d, (double *) arg2.data_d, (double *) arg3.data_d, (double *) arg4.data_d, set->size ); } op_mpi_set_dirtybit_cuda(nargs, args); cutilSafeCall(cudaDeviceSynchronize()); //update kernel record op_timers_core(&cpu_t2, &wall_t2); OP_kernels[5].time += wall_t2 - wall_t1; OP_kernels[5].transfer += (float)set->size * arg0.size * 2.0f; OP_kernels[5].transfer += (float)set->size * arg1.size * 2.0f; OP_kernels[5].transfer += (float)set->size * arg2.size; OP_kernels[5].transfer += (float)set->size * arg3.size * 2.0f; }
105eac6fbbf21c15b4747608eb6e2eba1460998a.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************************************** * Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are permitted * provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used * to endorse or promote products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /** In the normal GEMM, the fast changing dimension of a matrix always has stride equals to 1, e.g. ColumnMajor and RowMajor matrix. Affine2 matrix can have larger than 1 stride in both dimensions. To support such layout, we need to change to method to visit the global memory: 1. We can only visit 1 element a time because elements are not stored consecutively anymore. Vectorized load/store is not possible. 2. One extra multiplication is needed in calculating the global memory address addr = base_pointer + coord1 * stride1 + coord2 * stride2 The rest part of GEMM which includes shared memory load/store, mma comutation is the same. This example uses Ampere fp64 tensore core Affine2 GEMM as an example. SIMT (e.g. sgemm, dgemm) has support Affine2 layout. */ #include <iostream> #include <sstream> #include "cutlass/cutlass.h" #include "cutlass/gemm/device/gemm_universal_adapter.h" #include "cutlass/gemm/kernel/default_gemm_with_k_reduction.h" #include "cutlass/reduction/device/reduce_split_k.h" #include "cutlass/reduction/kernel/reduce_split_k.h" #include "cutlass/reduction/thread/reduction_operators.h" #include "cutlass/matrix_coord.h" #include "cutlass/util/command_line.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/reference/device/gemm.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "helper.h" // The code section below describes datatype for input, output tensors and computation between // elements using ElementAccumulator = double; // Data type of accumulator using ElementComputeEpilogue = ElementAccumulator; // Data type of epilogue computation using ElementInputA = double; // Data type of elements in input tensor using ElementInputB = double; // Data type of elements in input tensor using ElementOutput = double; // Data type of elements in output tensor // Since Affine2 explicitly lists the strides of both dimensions, it does not really matter if // it is columnmajor and rowmajor. However, it helps CUTLASS to improve the load locality if // CUTLASS can know which dimension of A/B operand has smaller stride or more dense. // // Affine2 ColumnMajor means the row stride is smaller and Affine2 RowMajor means the column // stride is smaller. // // The Affine2 epilogue reuses AffineN epilogue so it does not need to specify column majore // or row major. using LayoutInputA = cutlass::layout::AffineRank2ColumnMajor; using LayoutInputB = cutlass::layout::AffineRank2RowMajor; using LayoutOutput = cutlass::layout::AffineRankN<2>; // This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM using MMAOp = cutlass::arch::OpClassTensorOp; // This code section describes CUDA SM architecture number using SmArch = cutlass::arch::Sm80; // This code section describes the tile size a thread block will compute using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 16>; // Threadblock tile shape // This code section describes tile size a warp will compute using WarpShape = cutlass::gemm::GemmShape<64, 32, 16>; // Warp tile shape // This code section describes the size of MMA op using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; // TensorCore instruction shape // This code section describes how threadblocks are scheduled on GPU using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>; // Number of pipelines you want to use constexpr int NumStages = 3; // This code section describes the epilogue part of the kernel, we use default value using EpilogueOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, // Data type of output matrix. 1, // The number of elements per memory // access has. It has to be 1 for // affine2. ElementComputeEpilogue>; using GemmKernel = typename cutlass::gemm::kernel::DefaultGemmUniversal< ElementInputA, LayoutInputA, cutlass::ComplexTransform::kNone, 1, // AlignmentA has to be 1 ElementInputB, LayoutInputB, cutlass::ComplexTransform::kNone, 1, // AlignmentB has to be 1 ElementOutput, LayoutOutput, ElementAccumulator, MMAOp, SmArch, ThreadblockShape, WarpShape, InstructionShape, EpilogueOp, SwizzleThreadBlock, NumStages, cutlass::arch::OpMultiplyAdd >::GemmKernel; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; ///////////////////////////////////////////////////////////////////////////////////////////////// int run() { // Construct Gemm ProblemSize with user defined output size cutlass::gemm::GemmCoord problem_size = {1024, 512, 1024}; // Stride factor shows the distance between two elements in the differnet dimensions. The // first data is the logical distance between two rows, the second is between two columns. // CUTLASS has a utility tool cutlass::layout::Affine2Layout_Factory<Layout>::layout_factory // to help to convert stride_factor to the two strides. // // It is also totally fine to compute the strides directly without using the utility to // construct the affine2 layout. typename LayoutInputA::Stride::Index stride_factor_A[] = {3, 4}; typename LayoutInputB::Stride::Index stride_factor_B[] = {5, 6}; typename LayoutOutput::Stride::Index stride_factor_C[] = {7, 8}; // Initialize tensors using CUTLASS helper functions cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(problem_size.mk(), cutlass::layout::Affine2Layout_Factory<LayoutInputA>::layout_factory(problem_size.mk(), stride_factor_A)); cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(problem_size.kn(), cutlass::layout::Affine2Layout_Factory<LayoutInputB>::layout_factory(problem_size.kn(), stride_factor_B)); // Create matrix C used to load for bias addition. cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c(problem_size.mn(), cutlass::layout::Affine2Layout_Factory<LayoutOutput>::layout_factory(problem_size.mn(), stride_factor_C)); // Create matrix D used to store output from CUTLASS kernel cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d(problem_size.mn(), cutlass::layout::Affine2Layout_Factory<LayoutOutput>::layout_factory(problem_size.mn(), stride_factor_C)); // Create matrix D with dimensions M x N used to store output from reference // kernel cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(problem_size.mn(), cutlass::layout::Affine2Layout_Factory<LayoutOutput>::layout_factory(problem_size.mn(), stride_factor_C)); // Fill input and output matrices on host using CUTLASS helper functions cutlass::reference::host::TensorFillRandomUniform( tensor_a.host_view(), 1, ElementInputA(4), ElementInputA(-4), 0); // <- Fill matrix A on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_b.host_view(), 1, ElementInputB(4), ElementInputB(-4), 0); // <- Fill matrix B on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_c.host_view(), 1, ElementOutput(4), ElementOutput(-4), 0); // <- Fill matrix C on host with uniform-distribution random data cutlass::reference::host::TensorFill( tensor_d.host_view()); // <- fill matrix D on host with zeros cutlass::reference::host::TensorFill( tensor_ref_d.host_view()); // <- fill matrix D for reference on host with zeros // Copy data from host to GPU tensor_a.sync_device(); tensor_b.sync_device(); tensor_c.sync_device(); tensor_d.sync_device(); tensor_ref_d.sync_device(); // Initialize alpha for dot product computation ElementComputeEpilogue alpha = ElementComputeEpilogue(1); ElementComputeEpilogue beta = ElementComputeEpilogue(1); cutlass::gemm::GemmUniversalMode mode = cutlass::gemm::GemmUniversalMode::kGemm; int batch_count = 1; // Create a tuple of gemm kernel arguments. This is later passed as arguments to launch // instantiated CUTLASS kernel typename Gemm::Arguments arguments{ mode, problem_size, batch_count, {alpha, beta}, tensor_a.device_ref().data(), // <- reference to matrix A on device tensor_b.device_ref().data(), // <- reference to matrix B on device tensor_c.device_ref().data(), // <- reference to matrix C on device tensor_d.device_ref().data(), // <- reference to matrix D on device tensor_a.layout().capacity(problem_size.mn()), tensor_b.layout().capacity(problem_size.kn()), tensor_c.layout().capacity(problem_size.mn()), tensor_d.layout().capacity(problem_size.mn()), tensor_a.layout().stride(), tensor_b.layout().stride(), tensor_c.layout().stride(), tensor_d.layout().stride() }; // Instantiate CUTLASS kernel depending on templates Gemm gemm_op; // Using the arguments, query for extra workspace required for matrix multiplication computation size_t workspace_size = Gemm::get_workspace_size(arguments); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); // Check the problem size is supported or not cutlass::Status status = gemm_op.can_implement(arguments); CUTLASS_CHECK(status); // Initialize CUTLASS kernel with arguments and workspace pointer status = gemm_op.initialize(arguments, workspace.get()); CUTLASS_CHECK(status); // Launch initialized CUTLASS kernel status = gemm_op(); CUTLASS_CHECK(status); // // Create instantiation for device reference gemm kernel // // Launch device reference to compute strictly the product A * B cutlass::reference::device::Gemm< ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementComputeEpilogue, ElementAccumulator> gemm_device; gemm_device ( problem_size, alpha, tensor_a.device_ref(), tensor_b.device_ref(), beta, tensor_c.device_ref(), tensor_ref_d.device_ref() ); // Wait for kernels to finish hipDeviceSynchronize(); // Copy output data from CUTLASS and reference kernel to host for comparison tensor_d.sync_host(); tensor_ref_d.sync_host(); bool pass = cutlass::reference::host::TensorEquals(tensor_d.host_view(), tensor_ref_d.host_view()); // Check if output from CUTLASS kernel and reference kernel are equal or not std::cout << (pass ? "Passed" : "Failed") << std::endl; CUTLASS_CHECK(status); return 0; } int main(int argc, char const **args) { bool notSupported = false; // Ampere Tensor Core operations exposed with mma.sync are first available in CUDA 11.0. // // CUTLASS must be compiled with CUDA 11 Toolkit to run Conv2dFprop examples. if (!(__CUDACC_VER_MAJOR__ > 11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 0))) { std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl; notSupported = true; } hipDeviceProp_t props; CUDA_CHECK(hipGetDeviceProperties(&props, 0)); if (!(props.major > 8 || (props.major == 8 && props.minor >= 0))) { std::cerr << "Ampere Tensor Ops must be run on a machine with compute capability at least 80." << std::endl; notSupported = true; } if (notSupported) { return 0; } return run(); } /////////////////////////////////////////////////////////////////////////////////////////////////
105eac6fbbf21c15b4747608eb6e2eba1460998a.cu
/*************************************************************************************************** * Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are permitted * provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used * to endorse or promote products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /** In the normal GEMM, the fast changing dimension of a matrix always has stride equals to 1, e.g. ColumnMajor and RowMajor matrix. Affine2 matrix can have larger than 1 stride in both dimensions. To support such layout, we need to change to method to visit the global memory: 1. We can only visit 1 element a time because elements are not stored consecutively anymore. Vectorized load/store is not possible. 2. One extra multiplication is needed in calculating the global memory address addr = base_pointer + coord1 * stride1 + coord2 * stride2 The rest part of GEMM which includes shared memory load/store, mma comutation is the same. This example uses Ampere fp64 tensore core Affine2 GEMM as an example. SIMT (e.g. sgemm, dgemm) has support Affine2 layout. */ #include <iostream> #include <sstream> #include "cutlass/cutlass.h" #include "cutlass/gemm/device/gemm_universal_adapter.h" #include "cutlass/gemm/kernel/default_gemm_with_k_reduction.h" #include "cutlass/reduction/device/reduce_split_k.h" #include "cutlass/reduction/kernel/reduce_split_k.h" #include "cutlass/reduction/thread/reduction_operators.h" #include "cutlass/matrix_coord.h" #include "cutlass/util/command_line.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/reference/device/gemm.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "helper.h" // The code section below describes datatype for input, output tensors and computation between // elements using ElementAccumulator = double; // Data type of accumulator using ElementComputeEpilogue = ElementAccumulator; // Data type of epilogue computation using ElementInputA = double; // Data type of elements in input tensor using ElementInputB = double; // Data type of elements in input tensor using ElementOutput = double; // Data type of elements in output tensor // Since Affine2 explicitly lists the strides of both dimensions, it does not really matter if // it is columnmajor and rowmajor. However, it helps CUTLASS to improve the load locality if // CUTLASS can know which dimension of A/B operand has smaller stride or more dense. // // Affine2 ColumnMajor means the row stride is smaller and Affine2 RowMajor means the column // stride is smaller. // // The Affine2 epilogue reuses AffineN epilogue so it does not need to specify column majore // or row major. using LayoutInputA = cutlass::layout::AffineRank2ColumnMajor; using LayoutInputB = cutlass::layout::AffineRank2RowMajor; using LayoutOutput = cutlass::layout::AffineRankN<2>; // This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM using MMAOp = cutlass::arch::OpClassTensorOp; // This code section describes CUDA SM architecture number using SmArch = cutlass::arch::Sm80; // This code section describes the tile size a thread block will compute using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 16>; // Threadblock tile shape // This code section describes tile size a warp will compute using WarpShape = cutlass::gemm::GemmShape<64, 32, 16>; // Warp tile shape // This code section describes the size of MMA op using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; // TensorCore instruction shape // This code section describes how threadblocks are scheduled on GPU using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>; // Number of pipelines you want to use constexpr int NumStages = 3; // This code section describes the epilogue part of the kernel, we use default value using EpilogueOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, // Data type of output matrix. 1, // The number of elements per memory // access has. It has to be 1 for // affine2. ElementComputeEpilogue>; using GemmKernel = typename cutlass::gemm::kernel::DefaultGemmUniversal< ElementInputA, LayoutInputA, cutlass::ComplexTransform::kNone, 1, // AlignmentA has to be 1 ElementInputB, LayoutInputB, cutlass::ComplexTransform::kNone, 1, // AlignmentB has to be 1 ElementOutput, LayoutOutput, ElementAccumulator, MMAOp, SmArch, ThreadblockShape, WarpShape, InstructionShape, EpilogueOp, SwizzleThreadBlock, NumStages, cutlass::arch::OpMultiplyAdd >::GemmKernel; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; ///////////////////////////////////////////////////////////////////////////////////////////////// int run() { // Construct Gemm ProblemSize with user defined output size cutlass::gemm::GemmCoord problem_size = {1024, 512, 1024}; // Stride factor shows the distance between two elements in the differnet dimensions. The // first data is the logical distance between two rows, the second is between two columns. // CUTLASS has a utility tool cutlass::layout::Affine2Layout_Factory<Layout>::layout_factory // to help to convert stride_factor to the two strides. // // It is also totally fine to compute the strides directly without using the utility to // construct the affine2 layout. typename LayoutInputA::Stride::Index stride_factor_A[] = {3, 4}; typename LayoutInputB::Stride::Index stride_factor_B[] = {5, 6}; typename LayoutOutput::Stride::Index stride_factor_C[] = {7, 8}; // Initialize tensors using CUTLASS helper functions cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(problem_size.mk(), cutlass::layout::Affine2Layout_Factory<LayoutInputA>::layout_factory(problem_size.mk(), stride_factor_A)); cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(problem_size.kn(), cutlass::layout::Affine2Layout_Factory<LayoutInputB>::layout_factory(problem_size.kn(), stride_factor_B)); // Create matrix C used to load for bias addition. cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c(problem_size.mn(), cutlass::layout::Affine2Layout_Factory<LayoutOutput>::layout_factory(problem_size.mn(), stride_factor_C)); // Create matrix D used to store output from CUTLASS kernel cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d(problem_size.mn(), cutlass::layout::Affine2Layout_Factory<LayoutOutput>::layout_factory(problem_size.mn(), stride_factor_C)); // Create matrix D with dimensions M x N used to store output from reference // kernel cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(problem_size.mn(), cutlass::layout::Affine2Layout_Factory<LayoutOutput>::layout_factory(problem_size.mn(), stride_factor_C)); // Fill input and output matrices on host using CUTLASS helper functions cutlass::reference::host::TensorFillRandomUniform( tensor_a.host_view(), 1, ElementInputA(4), ElementInputA(-4), 0); // <- Fill matrix A on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_b.host_view(), 1, ElementInputB(4), ElementInputB(-4), 0); // <- Fill matrix B on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_c.host_view(), 1, ElementOutput(4), ElementOutput(-4), 0); // <- Fill matrix C on host with uniform-distribution random data cutlass::reference::host::TensorFill( tensor_d.host_view()); // <- fill matrix D on host with zeros cutlass::reference::host::TensorFill( tensor_ref_d.host_view()); // <- fill matrix D for reference on host with zeros // Copy data from host to GPU tensor_a.sync_device(); tensor_b.sync_device(); tensor_c.sync_device(); tensor_d.sync_device(); tensor_ref_d.sync_device(); // Initialize alpha for dot product computation ElementComputeEpilogue alpha = ElementComputeEpilogue(1); ElementComputeEpilogue beta = ElementComputeEpilogue(1); cutlass::gemm::GemmUniversalMode mode = cutlass::gemm::GemmUniversalMode::kGemm; int batch_count = 1; // Create a tuple of gemm kernel arguments. This is later passed as arguments to launch // instantiated CUTLASS kernel typename Gemm::Arguments arguments{ mode, problem_size, batch_count, {alpha, beta}, tensor_a.device_ref().data(), // <- reference to matrix A on device tensor_b.device_ref().data(), // <- reference to matrix B on device tensor_c.device_ref().data(), // <- reference to matrix C on device tensor_d.device_ref().data(), // <- reference to matrix D on device tensor_a.layout().capacity(problem_size.mn()), tensor_b.layout().capacity(problem_size.kn()), tensor_c.layout().capacity(problem_size.mn()), tensor_d.layout().capacity(problem_size.mn()), tensor_a.layout().stride(), tensor_b.layout().stride(), tensor_c.layout().stride(), tensor_d.layout().stride() }; // Instantiate CUTLASS kernel depending on templates Gemm gemm_op; // Using the arguments, query for extra workspace required for matrix multiplication computation size_t workspace_size = Gemm::get_workspace_size(arguments); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); // Check the problem size is supported or not cutlass::Status status = gemm_op.can_implement(arguments); CUTLASS_CHECK(status); // Initialize CUTLASS kernel with arguments and workspace pointer status = gemm_op.initialize(arguments, workspace.get()); CUTLASS_CHECK(status); // Launch initialized CUTLASS kernel status = gemm_op(); CUTLASS_CHECK(status); // // Create instantiation for device reference gemm kernel // // Launch device reference to compute strictly the product A * B cutlass::reference::device::Gemm< ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementComputeEpilogue, ElementAccumulator> gemm_device; gemm_device ( problem_size, alpha, tensor_a.device_ref(), tensor_b.device_ref(), beta, tensor_c.device_ref(), tensor_ref_d.device_ref() ); // Wait for kernels to finish cudaDeviceSynchronize(); // Copy output data from CUTLASS and reference kernel to host for comparison tensor_d.sync_host(); tensor_ref_d.sync_host(); bool pass = cutlass::reference::host::TensorEquals(tensor_d.host_view(), tensor_ref_d.host_view()); // Check if output from CUTLASS kernel and reference kernel are equal or not std::cout << (pass ? "Passed" : "Failed") << std::endl; CUTLASS_CHECK(status); return 0; } int main(int argc, char const **args) { bool notSupported = false; // Ampere Tensor Core operations exposed with mma.sync are first available in CUDA 11.0. // // CUTLASS must be compiled with CUDA 11 Toolkit to run Conv2dFprop examples. if (!(__CUDACC_VER_MAJOR__ > 11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 0))) { std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl; notSupported = true; } cudaDeviceProp props; CUDA_CHECK(cudaGetDeviceProperties(&props, 0)); if (!(props.major > 8 || (props.major == 8 && props.minor >= 0))) { std::cerr << "Ampere Tensor Ops must be run on a machine with compute capability at least 80." << std::endl; notSupported = true; } if (notSupported) { return 0; } return run(); } /////////////////////////////////////////////////////////////////////////////////////////////////
c5db4b0d9b208569406b649fad3ba8ccf1c5b330.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../../cpp/kernels.cu" #include "common.h" namespace hexcuda { const uint BLOCK = 8; enum { EMPTY, BLACK, WHITE, TOP, BOT, LEFT, RIGHT }; __device__ void flood(C3D::PTA board, int row, int col, uint8_t new_val) { const uint S = board.size(1); const int b = blockIdx.x*blockDim.x + threadIdx.x; const int neighbours[6][2] = {{-1, 0}, {-1, +1}, {0, -1}, {0, +1}, {+1, -1}, {+1, 0}}; // If we don't need to flood the value, break if (new_val < TOP) { return; } uint8_t old_val = board[b][row][col]; extern __shared__ uint8_t colors[]; // Set up a queue to keep track of which cells need exploring int start = 0; int end = 1; uint8_t *queue = (uint8_t*)&colors[(3*threadIdx.x+0)*S*S]; queue[0] = row; queue[1] = col; // Set up a mask to keep track of which cells we've already seen uint8_t *seen = (uint8_t*)&colors[(3*threadIdx.x+2)*S*S]; for (int s=0; s<S*S; s++) { seen[s] = 0; } while (true) { // See if there's anything left in the queue if (start == end) { break; } // Pull a value out of the queue int r0 = queue[2*start+0]; int c0 = queue[2*start+1]; start += 1; uint8_t cell_val = board[b][r0][c0]; // If the old and new vals are the same, continue flooding! if (cell_val == old_val) { // Put the new value into place board[b][r0][c0] = new_val; // and add the neighbours to the queue for (int n=0; n<6; n++) { int r = r0 + neighbours[n][0]; int c = c0 + neighbours[n][1]; // but only if they're not over the edge if ((0 <= r) && (r < S) && (0 <= c) && (c < S)) { // and we haven't seen them already if (!seen[r*S+c]) { queue[2*end+0] = r; queue[2*end+1] = c; end += 1; seen[r*S+c] = 1; } } } } } } __global__ void step_kernel( C3D::PTA board, I1D::PTA seats, I1D::PTA actions, F2D::PTA results) { const uint B = board.size(0); const uint S = board.size(1); const int b = blockIdx.x*blockDim.x + threadIdx.x; if (b >= B) return; const int seat = seats[b]; // Swap rows and cols if we're playing white const int action = actions[b]; int row, col; if (seat == 0) { row = action / S, col = action % S; } else { row = action % S, col = action / S; } // Set up the adjacency indicator bool adj[9]; for (int a=0; a<9; a++) { adj[a] = false; } const int neighbours[6][2] = {{-1, 0}, {-1, +1}, {0, -1}, {0, +1}, {+1, -1}, {+1, 0}}; // Populate the adjacency indicator for (int n=0; n<6; n++) { int r = row + neighbours[n][0]; int c = col + neighbours[n][1]; if (r < 0) { adj[TOP] = true; } else if (r >= S) { adj[BOT] = true; } else if (c < 0) { adj[LEFT] = true; } else if (c >= S) { adj[RIGHT] = true; } else { adj[board[b][r][c]] = true; } } // Use the adjacency to decide what the new cell should be char new_val; if (seat) { if (adj[LEFT] && adj[RIGHT]) { results[b][0] = -1.f; results[b][1] = +1.f; } if (adj[LEFT]) { new_val = LEFT; } else if (adj[RIGHT]) { new_val = RIGHT; } else { new_val = WHITE; } } else { if (adj[TOP] && adj[BOT]) { results[b][0] = +1.f; results[b][1] = -1.f; } if (adj[TOP]) { new_val = TOP; } else if (adj[BOT]) { new_val = BOT; } else { new_val = BLACK; } } board[b][row][col] = seat? WHITE : BLACK; flood(board, row, col, new_val); } __host__ TT step(TT board, TT seats, TT actions) { c10::hip::HIPGuardMasqueradingAsCUDA g(board.device()); const uint B = board.size(0); const uint S = board.size(1); TT results = board.new_zeros({B, 2}, at::kFloat); const uint n_blocks = (B + BLOCK - 1)/BLOCK; hipLaunchKernelGGL(( step_kernel), dim3({n_blocks}), dim3({BLOCK}), BLOCK*S*S*3*sizeof(uint8_t), stream(), C3D(board).pta(), I1D(seats).pta(), I1D(actions).pta(), F2D(results).pta()); C10_HIP_CHECK(hipGetLastError()); return results; } __global__ void observe_kernel(C3D::PTA board, I1D::PTA seats, F4D::PTA obs) { const uint B = board.size(0); const uint S = board.size(1); const int b = blockIdx.x*blockDim.x + threadIdx.x; if (b >= B) return; extern __shared__ uint8_t shared[]; uint8_t* colors = (uint8_t*)&shared[threadIdx.x*S*S]; // Copy the color into shared memory for (int i=0; i<S; i++) { for (int j=0; j<S; j++) { auto c = board[b][i][j]; uint8_t color = 2; if ((c == BLACK) | (c == TOP) | (c == BOT)) { color = 0; } else if ((c == WHITE) | (c == LEFT) | (c == RIGHT)) { color = 1; } colors[i*S+j] = color; } } // Copy the colors to the obs auto flip = seats[b] == 1; for (int i=0; i<S; i++) { for (int j=0; j<S; j++) { auto idx = flip? (j*S+i) : (i*S+j); auto c = colors[idx]; // printf("%d/%d %d/%d %d/%d %d/%d\n", b, B, i, S, j, S, c, 2); if (c < 2) { if (flip){ obs[b][i][j][1-c] = 1.f; } else { obs[b][i][j][c] = 1.f; } } } } } __host__ TT observe(TT board, TT seats) { c10::hip::HIPGuardMasqueradingAsCUDA g(board.device()); auto flatboard = board.clone().view({-1, board.size(-1), board.size(-1)}); auto flatseats = seats.clone().view({-1}).to(at::kInt); const uint B = flatboard.size(0); const uint S = flatboard.size(1); auto obs = flatboard.new_zeros({B, S, S, 2}, at::kFloat); const uint n_blocks = (B + BLOCK - 1)/BLOCK; hipLaunchKernelGGL(( observe_kernel), dim3({n_blocks}), dim3({BLOCK}), BLOCK*S*S*sizeof(uint8_t), stream(), C3D(flatboard).pta(), I1D(flatseats).pta(), F4D(obs).pta()); C10_HIP_CHECK(hipGetLastError()); auto sizes = board.sizes().vec(); sizes.push_back(2); return obs.view(sizes); } }
c5db4b0d9b208569406b649fad3ba8ccf1c5b330.cu
#include "../../cpp/kernels.cu" #include "common.h" namespace hexcuda { const uint BLOCK = 8; enum { EMPTY, BLACK, WHITE, TOP, BOT, LEFT, RIGHT }; __device__ void flood(C3D::PTA board, int row, int col, uint8_t new_val) { const uint S = board.size(1); const int b = blockIdx.x*blockDim.x + threadIdx.x; const int neighbours[6][2] = {{-1, 0}, {-1, +1}, {0, -1}, {0, +1}, {+1, -1}, {+1, 0}}; // If we don't need to flood the value, break if (new_val < TOP) { return; } uint8_t old_val = board[b][row][col]; extern __shared__ uint8_t colors[]; // Set up a queue to keep track of which cells need exploring int start = 0; int end = 1; uint8_t *queue = (uint8_t*)&colors[(3*threadIdx.x+0)*S*S]; queue[0] = row; queue[1] = col; // Set up a mask to keep track of which cells we've already seen uint8_t *seen = (uint8_t*)&colors[(3*threadIdx.x+2)*S*S]; for (int s=0; s<S*S; s++) { seen[s] = 0; } while (true) { // See if there's anything left in the queue if (start == end) { break; } // Pull a value out of the queue int r0 = queue[2*start+0]; int c0 = queue[2*start+1]; start += 1; uint8_t cell_val = board[b][r0][c0]; // If the old and new vals are the same, continue flooding! if (cell_val == old_val) { // Put the new value into place board[b][r0][c0] = new_val; // and add the neighbours to the queue for (int n=0; n<6; n++) { int r = r0 + neighbours[n][0]; int c = c0 + neighbours[n][1]; // but only if they're not over the edge if ((0 <= r) && (r < S) && (0 <= c) && (c < S)) { // and we haven't seen them already if (!seen[r*S+c]) { queue[2*end+0] = r; queue[2*end+1] = c; end += 1; seen[r*S+c] = 1; } } } } } } __global__ void step_kernel( C3D::PTA board, I1D::PTA seats, I1D::PTA actions, F2D::PTA results) { const uint B = board.size(0); const uint S = board.size(1); const int b = blockIdx.x*blockDim.x + threadIdx.x; if (b >= B) return; const int seat = seats[b]; // Swap rows and cols if we're playing white const int action = actions[b]; int row, col; if (seat == 0) { row = action / S, col = action % S; } else { row = action % S, col = action / S; } // Set up the adjacency indicator bool adj[9]; for (int a=0; a<9; a++) { adj[a] = false; } const int neighbours[6][2] = {{-1, 0}, {-1, +1}, {0, -1}, {0, +1}, {+1, -1}, {+1, 0}}; // Populate the adjacency indicator for (int n=0; n<6; n++) { int r = row + neighbours[n][0]; int c = col + neighbours[n][1]; if (r < 0) { adj[TOP] = true; } else if (r >= S) { adj[BOT] = true; } else if (c < 0) { adj[LEFT] = true; } else if (c >= S) { adj[RIGHT] = true; } else { adj[board[b][r][c]] = true; } } // Use the adjacency to decide what the new cell should be char new_val; if (seat) { if (adj[LEFT] && adj[RIGHT]) { results[b][0] = -1.f; results[b][1] = +1.f; } if (adj[LEFT]) { new_val = LEFT; } else if (adj[RIGHT]) { new_val = RIGHT; } else { new_val = WHITE; } } else { if (adj[TOP] && adj[BOT]) { results[b][0] = +1.f; results[b][1] = -1.f; } if (adj[TOP]) { new_val = TOP; } else if (adj[BOT]) { new_val = BOT; } else { new_val = BLACK; } } board[b][row][col] = seat? WHITE : BLACK; flood(board, row, col, new_val); } __host__ TT step(TT board, TT seats, TT actions) { c10::cuda::CUDAGuard g(board.device()); const uint B = board.size(0); const uint S = board.size(1); TT results = board.new_zeros({B, 2}, at::kFloat); const uint n_blocks = (B + BLOCK - 1)/BLOCK; step_kernel<<<{n_blocks}, {BLOCK}, BLOCK*S*S*3*sizeof(uint8_t), stream()>>>( C3D(board).pta(), I1D(seats).pta(), I1D(actions).pta(), F2D(results).pta()); C10_CUDA_CHECK(cudaGetLastError()); return results; } __global__ void observe_kernel(C3D::PTA board, I1D::PTA seats, F4D::PTA obs) { const uint B = board.size(0); const uint S = board.size(1); const int b = blockIdx.x*blockDim.x + threadIdx.x; if (b >= B) return; extern __shared__ uint8_t shared[]; uint8_t* colors = (uint8_t*)&shared[threadIdx.x*S*S]; // Copy the color into shared memory for (int i=0; i<S; i++) { for (int j=0; j<S; j++) { auto c = board[b][i][j]; uint8_t color = 2; if ((c == BLACK) | (c == TOP) | (c == BOT)) { color = 0; } else if ((c == WHITE) | (c == LEFT) | (c == RIGHT)) { color = 1; } colors[i*S+j] = color; } } // Copy the colors to the obs auto flip = seats[b] == 1; for (int i=0; i<S; i++) { for (int j=0; j<S; j++) { auto idx = flip? (j*S+i) : (i*S+j); auto c = colors[idx]; // printf("%d/%d %d/%d %d/%d %d/%d\n", b, B, i, S, j, S, c, 2); if (c < 2) { if (flip){ obs[b][i][j][1-c] = 1.f; } else { obs[b][i][j][c] = 1.f; } } } } } __host__ TT observe(TT board, TT seats) { c10::cuda::CUDAGuard g(board.device()); auto flatboard = board.clone().view({-1, board.size(-1), board.size(-1)}); auto flatseats = seats.clone().view({-1}).to(at::kInt); const uint B = flatboard.size(0); const uint S = flatboard.size(1); auto obs = flatboard.new_zeros({B, S, S, 2}, at::kFloat); const uint n_blocks = (B + BLOCK - 1)/BLOCK; observe_kernel<<<{n_blocks}, {BLOCK}, BLOCK*S*S*sizeof(uint8_t), stream()>>>( C3D(flatboard).pta(), I1D(flatseats).pta(), F4D(obs).pta()); C10_CUDA_CHECK(cudaGetLastError()); auto sizes = board.sizes().vec(); sizes.push_back(2); return obs.view(sizes); } }
af054cded735b45bc4b34f87efcb6c17e2e206a6.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "CycleTimer.h" #include "matrix.h" // Integer division, rounding up static inline int updiv(int n, int d) { return (n+d-1)/d; } /* Transpose matrix */ __global__ void cudaTransposeKernel(int N, const float *dmatS, float *dmatD) { int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; if (i >= N || j >= N) return; dmatD[CM(i,j,N)] = dmatS[RM(i,j,N)]; } __global__ void cudaSimpleKernelOld(int N, float *dmatA, float *dmatB, float *dmatC) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i >= N || j >= N) return; float sum = 0.0; for (int k = 0; k < N; k++) { sum += dmatA[RM(i,k,N)] * dmatB[RM(k,j,N)]; } dmatC[RM(i,j,N)] = sum; } __global__ void cudaSimpleKernel(int N, float* dmatA, float* dmatB, float * dmatC) { int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; if (i >= N || j >= N) return; float sum = 0.0; for (int k = 0; k < N; k++) { sum += dmatA[RM(i,k,N)] * dmatB[RM(k,j,N)]; } dmatC[RM(i,j,N)] = sum; } __global__ void cudaTransposedKernel(int N, float *dmatA, float *dmatB, float *dmatC) { int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; if (i >= N || j >= N) return; float sum = 0.0; for (int k = 0; k < N; k++) { sum += dmatA[RM(i,k,N)] * dmatB[CM(k,j,N)]; } dmatC[RM(i,j,N)] = sum; } __global__ void cudaBlockKernelOld(int N, float *dmatA, float *dmatB, float *dmatC) { // Assume that thread block contains submatrix of size LBLK x LBLK int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int bi = threadIdx.x; int bj = threadIdx.y; float sum = 0.0; // Accumulate result for C[i][j] // Shared space for two submatrices of A and B __shared__ float subA[LBLK*LBLK]; __shared__ float subB[LBLK*LBLK]; // Loop over k to compute product of all submatrices A[i][k] and B[k][j] for (int k = 0; k < N; k+= LBLK) { // Grab the two submatrices if (i < N && k+bj < N) subA[RM(bi,bj,LBLK)] = dmatA[RM(i,k+bj,N)]; else subA[RM(bi,bj,LBLK)] = 0.0; if (j < N && k+bi < N) subB[RM(bi,bj,LBLK)] = dmatB[RM(k+bi,j,N)]; else subB[RM(bi,bj,LBLK)] = 0.0; // Wait until entire block gets filled __syncthreads(); // Generate contribution to C[i][j] of these submatrices for (int bk = 0; bk < LBLK; bk++) sum += subA[RM(bi,bk,LBLK)] * subB[RM(bk,bj,LBLK)]; // Wait until all products computed __syncthreads(); } if (i < N && j < N) dmatC[RM(i,j,N)] = sum; } __global__ void cudaBlockKernelCoarse(int N, float *dmatA, float *dmatB, float *dmatC) { int i = blockIdx.y * blockDim.y + threadIdx.y; i *= LBLK; /* Threads --> submatrices */ int j = blockIdx.x * blockDim.x + threadIdx.x; j *= LBLK; float subA[LBLK * LBLK]; float subB[LBLK * LBLK]; float subC[LBLK * LBLK]; for (int bi = 0; bi < LBLK; bi++) /* Zero out C */ for (int bj = 0; bj < LBLK; bj++) subC[RM(bi,bj,LBLK)] = 0; for (int k = 0; k <= N-LBLK; k+=LBLK) { /* Compute product for each submatrix */ for (int bi = 0; bi < LBLK; bi++) { for (int bj = 0; bj < LBLK; bj++) { subA[RM(bi,bj,LBLK)] = dmatA[RM(i+bi,k+bj,N)]; subB[RM(bi,bj,LBLK)] = dmatB[RM(k+bi,j+bj,N)]; } } for (int bi = 0; bi < LBLK; bi++) { for (int bj = 0; bj < LBLK; bj++) { float sum = 0.0; for (int bk = 0; bk < LBLK; bk++) { sum += subA[RM(bi,bk,LBLK)] * subB[RM(bk,bj,LBLK)]; } subC[RM(bi,bj,LBLK)] += sum; } } } for (int bi = 0; bi < LBLK; bi++) for (int bj = 0; bj < LBLK; bj++) dmatC[RM(i+bi,j+bj,N)] = subC[RM(bi,bj,LBLK)]; } #if 1 __global__ void cudaBlockKernel(int N, float *dmatA, float *dmatB, float *dmatC) { int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; int bi = threadIdx.y; int bj = threadIdx.x; __shared__ float subA[LBLK * LBLK]; __shared__ float subB[LBLK * LBLK]; float sum = 0; for (int k = 0; k < N; k += LBLK) { subA[RM(bi,bj,LBLK)] = dmatA[RM(i,k+bj,N)]; subB[RM(bi,bj,LBLK)] = dmatB[RM(k+bi,j,N)]; __syncthreads(); for (int bk = 0; bk < LBLK; bk++) { sum += subA[RM(bi,bk,LBLK)] * subB[RM(bk,bj,LBLK)]; } __syncthreads(); } dmatC[RM(i,j,N)] = sum; } #else __global__ void cudaBlockKernel(int N, float *dmatA, float *dmatB, float *dmatC) { // Assume that thread block contains submatrix of size LBLK x LBLK int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; int bi = threadIdx.y; int bj = threadIdx.x; float sum = 0.0; // Accumulate result for C[i][j] // Shared space for two submatrices of A and B __shared__ float subA[LBLK*LBLK]; __shared__ float subB[LBLK*LBLK]; // Loop over k to compute product of all submatrices A[i][k] and B[k][j] for (int k = 0; k < N; k+= LBLK) { // Grab the two submatrices if (i < N && k+bj < N) subA[RM(bi,bj,LBLK)] = dmatA[RM(i,k+bj,N)]; else subA[RM(bi,bj,LBLK)] = 0.0; if (j < N && k+bi < N) subB[RM(bi,bj,LBLK)] = dmatB[RM(k+bi,j,N)]; else subB[RM(bi,bj,LBLK)] = 0.0; // Wait until entire block gets filled __syncthreads(); // Generate contribution to C[i][j] of these submatrices for (int bk = 0; bk < LBLK; bk++) sum += subA[RM(bi,bk,LBLK)] * subB[RM(bk,bj,LBLK)]; // Wait until all products computed __syncthreads(); } if (i < N && j < N) dmatC[RM(i,j,N)] = sum; } #endif // Transpose submatrix of B as read it in. Decreases performance. __global__ void cudaBlockTransposeKernel(int N, float *dmatA, float *dmatB, float *dmatC) { // Assume that thread block contains submatrix of size LBLK x LBLK int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; int bi = threadIdx.y; int bj = threadIdx.x; float sum = 0.0; // Accumulate result for C[i][j] // Shared space for two submatrices of A and B __shared__ float subA[LBLK*LBLK]; __shared__ float subB[LBLK*LBLK]; // Loop over k to compute product of all submatrices A[i][k] and B[k][j] for (int k = 0; k < N; k+= LBLK) { // Grab the two submatrices if (i < N && k+bj < N) subA[RM(bi,bj,LBLK)] = dmatA[RM(i,k+bj,N)]; else subA[RM(bi,bj,LBLK)] = 0.0; if (j < N && k+bi < N) subB[CM(bi,bj,LBLK)] = dmatB[RM(k+bi,j,N)]; else subB[CM(bi,bj,LBLK)] = 0.0; // Wait until entire block gets filled __syncthreads(); // Generate contribution to C[i][j] of these submatrices for (int bk = 0; bk < LBLK; bk++) sum += subA[RM(bi,bk,LBLK)] * subB[CM(bk,bj,LBLK)]; // Wait until all products computed __syncthreads(); } if (i < N && j < N) dmatC[RM(i,j,N)] = sum; } // The following version only works when N is a multiple of 4 // Each Cuda block handles 4 elements per thread to increase work per thread // and uses wider accesses to memory. // Each Cuda block has 64 threads in y dimension (rows) // and 16 threads in x dimension (columns) // Each thread generates elements C[i][j] ... C[i][j+3] of the product #define NROW 64 #define NCOL 16 // Structure data as float4's, with NCOL of them in each column union mdata_t { float f[4]; float4 f4; }; __global__ void cudaBlockQuadKernel(int N, float* dmatA, float* dmatB, float * dmatC) { // Prefix Key: // s: scaled. Divided by 4. Used when indexing columns // b: block. Used to refer to elements within block // No prefix. Used to refer to elements in global array // // Indexes into row of array int i = blockIdx.y * blockDim.y + threadIdx.y; // Indexes into column, but in units of float4's int sj = blockIdx.x * blockDim.x + threadIdx.x; int bi = threadIdx.y; // Ranges between 0 and NROW-1 int sbj = threadIdx.x; // Ranges between 0 and NCOL-1 int sN = N/4; // Number of float4's in each row of matrices // Representing source & destination matrices as float4's: float4 *matAf4 = (float4 *) dmatA; float4 *matBf4 = (float4 *) dmatB; float4 *matCf4 = (float4 *) dmatC; /* Accumulate 4 elements in row of C */ mdata_t sums; sums.f[0] = sums.f[1] = sums.f[2] = sums.f[3] = 0.0; mdata_t zeros; zeros.f[0] = zeros.f[1] = zeros.f[2] = zeros.f[3] = 0.0; // Shared space for two submatrices of A and B __shared__ mdata_t subA[NROW*NCOL]; __shared__ mdata_t subB[NROW*NCOL]; // Loop over k to compute product of all submatrices A[i][k] and B[k][j] for (int sk = 0; sk < sN; sk += NCOL) { int k = sk * 4; // Read the two submatrices from global memory if (i < N && sk+sbj < sN) subA[RM(bi,sbj,NCOL)].f4 = matAf4[RM(i,sk+sbj,sN)]; else subA[RM(bi,sbj,NCOL)].f4 = zeros.f4; if (sj < sN && k+bi < N) subB[RM(bi,sbj,NCOL)].f4 = matBf4[RM(k+bi,sj,sN)]; else subB[RM(bi,sbj,NCOL)].f4 = zeros.f4; // Wait until entire block gets filled __syncthreads(); // Generate contribution to C[i][4*sj] .. C[i][4*sj+3] for (int sbk = 0; sbk < NCOL; sbk++) { int bk = 4*sbk; mdata_t a = subA[RM(bi,sbk,NCOL)]; mdata_t bfill[4]; bfill[0] = subB[RM(bk+0,sbj,NCOL)]; bfill[1] = subB[RM(bk+1,sbj,NCOL)]; bfill[2] = subB[RM(bk+2,sbj,NCOL)]; bfill[3] = subB[RM(bk+3,sbj,NCOL)]; float *b = (float *) &bfill; for (int tj = 0; tj < 4; tj++) { sums.f[tj] += a.f[0] * b[RM(0,tj,4)] + a.f[1] * b[RM(1,tj,4)] + a.f[2] * b[RM(2,tj,4)] + a.f[3] * b[RM(3,tj,4)]; } } // Wait until all products computed __syncthreads(); } /* Store 4 elements into C */ if (i < N && sj < sN) matCf4[RM(i,sj,sN)] = sums.f4; } // nVidia kernel. Only works when N multiple of block size #define BLOCK_SIZE LBLK __global__ void matrixMulCUDA(float *C, float *A, float *B, int wA, int wB) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; // Index of the first sub-matrix of A processed by the block int aBegin = wA * BLOCK_SIZE * by; // Index of the last sub-matrix of A processed by the block int aEnd = aBegin + wA - 1; // Step size used to iterate through the sub-matrices of A int aStep = BLOCK_SIZE; // Index of the first sub-matrix of B processed by the block int bBegin = BLOCK_SIZE * bx; // Step size used to iterate through the sub-matrices of B int bStep = BLOCK_SIZE * wB; // Csub is used to store the element of the block sub-matrix // that is computed by the thread float Csub = 0; // Loop over all the sub-matrices of A and B // required to compute the block sub-matrix for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // Declaration of the shared memory array As used to // store the sub-matrix of A __shared__ float As[BLOCK_SIZE][BLOCK_SIZE]; // Declaration of the shared memory array Bs used to // store the sub-matrix of B __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE]; // Load the matrices from device memory // to shared memory; each thread loads // one element of each matrix As[ty][tx] = A[a + wA * ty + tx]; Bs[ty][tx] = B[b + wB * ty + tx]; // Synchronize to make sure the matrices are loaded __syncthreads(); // Multiply the two matrices together; // each thread computes one element // of the block sub-matrix for (int k = 0; k < BLOCK_SIZE; ++k) { Csub += As[ty][k] * Bs[k][tx]; } // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write the block sub-matrix to device memory; // each thread writes one element int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx; C[c + wB * ty + tx] = Csub; } // Old version. Has synchronization problems __global__ void cudaSmallBlockKernel(int N, float *dmatA, float *dmatB, float *dmatC) { // Assume that thread block contains submatrix of size SBLK x SBLK // Have SBLK extra threads available to serve as third index. // These are all within a single warp int bk = threadIdx.x; // Range within single warp int i = blockIdx.z * blockDim.z + threadIdx.z; int j = blockIdx.y * blockDim.y + threadIdx.y; int bi = threadIdx.z; int bj = threadIdx.y; // Shared space for two submatrices of A and B __shared__ float subA[SBLK*SBLK]; __shared__ float subB[SBLK*SBLK]; // Shared space for partial products __shared__ float vals[SBLK*SBLK*SBLK]; float sum = 0.0; // Loop over k to compute product of all submatrices A[i][k] and B[k][j] for (int k = 0; k < N; k+= SBLK) { // Designate threads with bk == 0 to fill A if (bk == 0) { // Grab the two submatrices if (i < N && k+bj < N) subA[RM(bi,bj,SBLK)] = dmatA[RM(i,k+bj,N)]; else subA[RM(bi,bj,SBLK)] = 0.0; } // Designate threads with bk == 1 to fill B if (bk == 1) { if (j < N && k+bi < N) subB[RM(bi,bj,SBLK)] = dmatB[RM(k+bi,j,N)]; else subB[RM(bi,bj,SBLK)] = 0.0; } // Wait until entire block gets filled __syncthreads(); // Compute all partial products of the submatrices vals[RM3(bi,bj,bk,SBLK)] = subA[RM(bi,bk,SBLK)] * subB[RM(bk,bj,SBLK)]; // Wait until partial products computed __syncthreads(); // Sum the values across the value of bk using tree reduction // These are all in same warp, but cannot guarantee synchronization float v1, v2; v1 = vals[RM3(bi,bj,bk,SBLK)]; v2 = vals[RM3(bi,bj,bk+1,SBLK)]; if (bk % 2 == 0) { vals[RM3(bi,bj,bk,SBLK)] = v1 + v2; } __syncthreads(); // Shouldn't need this v1 = vals[RM3(bi,bj,bk,SBLK)]; v2 = vals[RM3(bi,bj,bk+2,SBLK)]; if (bk % 4 == 0) { vals[RM3(bi,bj,bk,SBLK)] = v1 + v2; } __syncthreads(); // Shouldn't need this v1 = vals[RM3(bi,bj,bk,SBLK)]; v2 = vals[RM3(bi,bj,bk+4,SBLK)]; if (bk % 8 == 0) { sum += v1 + v2; } __syncthreads(); } if (i < N && j < N && bk == 0) dmatC[RM(i,j,N)] = sum; } /* Preallocated blocks */ static int allocN = -1; static float *aDevData = NULL; static float *bDevData = NULL; static float *tDevData = NULL; static float *gDevData = NULL; static float *sDevData = NULL; static float *tHostData = NULL; static float *gHostData = NULL; void cudaSetup(int N, float *aData, float *bData, float *gData) { if (allocN == N) return; if (allocN > 0) { hipFree(sDevData); hipFree(aDevData); hipFree(bDevData); hipFree(tDevData); hipFree(gDevData); } if (N > 0) { hipMalloc((void **) &aDevData, N*N * sizeof(float)); hipMalloc((void **) &bDevData, N*N * sizeof(float)); hipMalloc((void **) &tDevData, N*N * sizeof(float)); hipMalloc((void **) &sDevData, N*N * sizeof(float)); tHostData = (float *) calloc(N*N, sizeof(float)); } gHostData = gData; hipMemcpy(aDevData, aData, N*N * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(bDevData, bData, N*N * sizeof(float), hipMemcpyHostToDevice); allocN = N; } // Get scratch for matrix static float *cudaScratchMatrix(int N) { if (allocN != N) { setup(N); } return sDevData; } void cudaMultMatrixSimpleOld(int N, float *dmatA, float *dmatB, float *dmatC) { dim3 threadsPerBlock(LBLK, LBLK); dim3 blocks(updiv(N, LBLK), updiv(N, LBLK)); hipLaunchKernelGGL(( cudaSimpleKernelOld), dim3(blocks), dim3(threadsPerBlock), 0, 0, N, dmatA, dmatB, dmatC); } void cudaMultMatrixSimple(int N, float *dmatA, float *dmatB, float *dmatC) { dim3 threadsPerBlock(LBLK, LBLK); dim3 blocks(updiv(N, LBLK), updiv(N, LBLK)); hipLaunchKernelGGL(( cudaSimpleKernel), dim3(blocks), dim3(threadsPerBlock), 0, 0, N, dmatA, dmatB, dmatC); } void cudaMultMatrixTransposed(int N, float *dmatA, float *dmatB, float *dmatC) { dim3 threadsPerBlock(LBLK, LBLK); dim3 blocks(updiv(N, LBLK), updiv(N, LBLK)); float *tranB = cudaScratchMatrix(N); hipLaunchKernelGGL(( cudaTransposeKernel), dim3(blocks), dim3(threadsPerBlock), 0, 0, N, dmatB, tranB); hipLaunchKernelGGL(( cudaTransposedKernel), dim3(blocks), dim3(threadsPerBlock), 0, 0, N, dmatA, tranB, dmatC); } void cudaMultMatrixBlocked(int N, float *dmatA, float *dmatB, float *dmatC) { dim3 threadsPerBlock(LBLK, LBLK); dim3 blocks(updiv(N, LBLK), updiv(N, LBLK)); hipLaunchKernelGGL(( cudaBlockKernel), dim3(blocks), dim3(threadsPerBlock), 0, 0, N, dmatA, dmatB, dmatC); } void cudaMultMatrixBlockedCoarse(int N, float *dmatA, float *dmatB, float *dmatC) { dim3 threadsPerBlock(LBLK, LBLK); dim3 blocks(updiv(updiv(N, LBLK), LBLK), updiv(updiv(N, LBLK), LBLK)); // printf("Spawning %d %d blocks of %d %d threads\n", blocks.x, blocks.y, threadsPerBlock.x, threadsPerBlock.y); hipLaunchKernelGGL(( cudaBlockKernelCoarse), dim3(blocks), dim3(threadsPerBlock), 0, 0, N, dmatA, dmatB, dmatC); } void cudaMultMatrixBlockedOld(int N, float *dmatA, float *dmatB, float *dmatC) { dim3 threadsPerBlock(LBLK, LBLK); dim3 blocks(updiv(N, LBLK), updiv(N, LBLK)); hipLaunchKernelGGL(( cudaBlockKernelOld), dim3(blocks), dim3(threadsPerBlock), 0, 0, N, dmatA, dmatB, dmatC); } void cudaMultMatrixBlockedQuad(int N, float *dmatA, float *dmatB, float *dmatC) { dim3 threadsPerBlock(NCOL,NROW); // Have same N/NROW blocks in both dimensions, // since each block computes NROW x NROW portion of product dim3 blocks(updiv(N, NROW), updiv(N, NROW)); hipLaunchKernelGGL(( cudaBlockQuadKernel), dim3(blocks), dim3(threadsPerBlock), 0, 0, N, dmatA, dmatB, dmatC); } void cudaMultMatrixNvidia(int N, float *dmatA, float *dmatB, float *dmatC) { dim3 threadsPerBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 blocks(updiv(N, BLOCK_SIZE), updiv(N, BLOCK_SIZE)); hipLaunchKernelGGL(( matrixMulCUDA), dim3(blocks), dim3(threadsPerBlock), 0, 0, dmatC, dmatA, dmatB, N, N); } void cudaMultMatrixSmallBlocked(int N, float *dmatA, float *dmatB, float *dmatC) { dim3 threadsPerBlock(SBLK, SBLK, SBLK); dim3 blocks(1, updiv(N, SBLK), updiv(N, SBLK)); hipLaunchKernelGGL(( cudaSmallBlockKernel), dim3(blocks), dim3(threadsPerBlock), 0, 0, N, dmatA, dmatB, dmatC); } static int cudaRunMM(int N, mmul_t method) { switch (method) { case MMUL_CUDA_OLD_REFERENCE: cudaMultMatrixSimpleOld(N, aDevData, bDevData, tDevData); break; case MMUL_CUDA_REFERENCE: cudaMultMatrixSimple(N, aDevData, bDevData, tDevData); break; case MMUL_CUDA_TRANSPOSE: cudaMultMatrixTransposed(N, aDevData, bDevData, tDevData); break; case MMUL_CUDA_BLK: cudaMultMatrixBlocked(N, aDevData, bDevData, tDevData); break; case MMUL_CUDA_OLD_BLK: cudaMultMatrixBlockedOld(N, aDevData, bDevData, tDevData); break; case MMUL_CUDA_BLK_COARSE: cudaMultMatrixBlockedCoarse(N, aDevData, bDevData, tDevData); break; case MMUL_CUDA_NVIDIA: cudaMultMatrixNvidia(N, aDevData, bDevData, tDevData); break; case MMUL_CUDA_SMALL_BLK: cudaMultMatrixSmallBlocked(N, aDevData, bDevData, tDevData); break; case MMUL_CUDA_QUAD_BLK: cudaMultMatrixBlockedQuad(N, aDevData, bDevData, tDevData); break; default: fprintf(stderr, "Haven't implemented method yet\n"); return 0; } return 1; } double cudaBenchMM(int N, mmul_t method) { // Should already have done the setup if (allocN != N) { setup(N); } if (!cudaRunMM(N, method)) return 1000.0; hipMemcpy(tHostData, tDevData, N*N*sizeof(float), hipMemcpyDeviceToHost); if (checkMatrix(N, tHostData, gHostData) > 0) return 1000.0; /* Now do the real benchmarking */ long ops = (long) 2 * N * N * N; long runs = (targetOps+ops-1)/ops; double startTime = CycleTimer::currentSeconds(); for (long r = 0; r < runs; r++) cudaRunMM(N, method); hipDeviceSynchronize(); double endTime = CycleTimer::currentSeconds(); double ms = (endTime - startTime) * 1000.0; double gflops = (long) (runs*ops)/ms * 1e-6; fprintf(stderr, "%ld runs, %ld ops/run, %.2f ms, %.3f GFlops\n", runs, ops, ms, gflops); return gflops; } void printCudaInfo() { // for fun, just print out some stats on the machine int deviceCount = 0; hipError_t err = hipGetDeviceCount(&deviceCount); printf("---------------------------------------------------------\n"); printf("Found %d CUDA devices\n", deviceCount); for (int i=0; i<deviceCount; i++) { hipDeviceProp_t deviceProps; hipGetDeviceProperties(&deviceProps, i); printf("Device %d: %s\n", i, deviceProps.name); printf(" SMs: %d\n", deviceProps.multiProcessorCount); printf(" Global mem: %.0f MB\n", static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024)); printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor); } printf("---------------------------------------------------------\n"); }
af054cded735b45bc4b34f87efcb6c17e2e206a6.cu
#include <stdio.h> #include <cuda.h> #include <cuda_runtime.h> #include "CycleTimer.h" #include "matrix.h" // Integer division, rounding up static inline int updiv(int n, int d) { return (n+d-1)/d; } /* Transpose matrix */ __global__ void cudaTransposeKernel(int N, const float *dmatS, float *dmatD) { int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; if (i >= N || j >= N) return; dmatD[CM(i,j,N)] = dmatS[RM(i,j,N)]; } __global__ void cudaSimpleKernelOld(int N, float *dmatA, float *dmatB, float *dmatC) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i >= N || j >= N) return; float sum = 0.0; for (int k = 0; k < N; k++) { sum += dmatA[RM(i,k,N)] * dmatB[RM(k,j,N)]; } dmatC[RM(i,j,N)] = sum; } __global__ void cudaSimpleKernel(int N, float* dmatA, float* dmatB, float * dmatC) { int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; if (i >= N || j >= N) return; float sum = 0.0; for (int k = 0; k < N; k++) { sum += dmatA[RM(i,k,N)] * dmatB[RM(k,j,N)]; } dmatC[RM(i,j,N)] = sum; } __global__ void cudaTransposedKernel(int N, float *dmatA, float *dmatB, float *dmatC) { int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; if (i >= N || j >= N) return; float sum = 0.0; for (int k = 0; k < N; k++) { sum += dmatA[RM(i,k,N)] * dmatB[CM(k,j,N)]; } dmatC[RM(i,j,N)] = sum; } __global__ void cudaBlockKernelOld(int N, float *dmatA, float *dmatB, float *dmatC) { // Assume that thread block contains submatrix of size LBLK x LBLK int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int bi = threadIdx.x; int bj = threadIdx.y; float sum = 0.0; // Accumulate result for C[i][j] // Shared space for two submatrices of A and B __shared__ float subA[LBLK*LBLK]; __shared__ float subB[LBLK*LBLK]; // Loop over k to compute product of all submatrices A[i][k] and B[k][j] for (int k = 0; k < N; k+= LBLK) { // Grab the two submatrices if (i < N && k+bj < N) subA[RM(bi,bj,LBLK)] = dmatA[RM(i,k+bj,N)]; else subA[RM(bi,bj,LBLK)] = 0.0; if (j < N && k+bi < N) subB[RM(bi,bj,LBLK)] = dmatB[RM(k+bi,j,N)]; else subB[RM(bi,bj,LBLK)] = 0.0; // Wait until entire block gets filled __syncthreads(); // Generate contribution to C[i][j] of these submatrices for (int bk = 0; bk < LBLK; bk++) sum += subA[RM(bi,bk,LBLK)] * subB[RM(bk,bj,LBLK)]; // Wait until all products computed __syncthreads(); } if (i < N && j < N) dmatC[RM(i,j,N)] = sum; } __global__ void cudaBlockKernelCoarse(int N, float *dmatA, float *dmatB, float *dmatC) { int i = blockIdx.y * blockDim.y + threadIdx.y; i *= LBLK; /* Threads --> submatrices */ int j = blockIdx.x * blockDim.x + threadIdx.x; j *= LBLK; float subA[LBLK * LBLK]; float subB[LBLK * LBLK]; float subC[LBLK * LBLK]; for (int bi = 0; bi < LBLK; bi++) /* Zero out C */ for (int bj = 0; bj < LBLK; bj++) subC[RM(bi,bj,LBLK)] = 0; for (int k = 0; k <= N-LBLK; k+=LBLK) { /* Compute product for each submatrix */ for (int bi = 0; bi < LBLK; bi++) { for (int bj = 0; bj < LBLK; bj++) { subA[RM(bi,bj,LBLK)] = dmatA[RM(i+bi,k+bj,N)]; subB[RM(bi,bj,LBLK)] = dmatB[RM(k+bi,j+bj,N)]; } } for (int bi = 0; bi < LBLK; bi++) { for (int bj = 0; bj < LBLK; bj++) { float sum = 0.0; for (int bk = 0; bk < LBLK; bk++) { sum += subA[RM(bi,bk,LBLK)] * subB[RM(bk,bj,LBLK)]; } subC[RM(bi,bj,LBLK)] += sum; } } } for (int bi = 0; bi < LBLK; bi++) for (int bj = 0; bj < LBLK; bj++) dmatC[RM(i+bi,j+bj,N)] = subC[RM(bi,bj,LBLK)]; } #if 1 __global__ void cudaBlockKernel(int N, float *dmatA, float *dmatB, float *dmatC) { int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; int bi = threadIdx.y; int bj = threadIdx.x; __shared__ float subA[LBLK * LBLK]; __shared__ float subB[LBLK * LBLK]; float sum = 0; for (int k = 0; k < N; k += LBLK) { subA[RM(bi,bj,LBLK)] = dmatA[RM(i,k+bj,N)]; subB[RM(bi,bj,LBLK)] = dmatB[RM(k+bi,j,N)]; __syncthreads(); for (int bk = 0; bk < LBLK; bk++) { sum += subA[RM(bi,bk,LBLK)] * subB[RM(bk,bj,LBLK)]; } __syncthreads(); } dmatC[RM(i,j,N)] = sum; } #else __global__ void cudaBlockKernel(int N, float *dmatA, float *dmatB, float *dmatC) { // Assume that thread block contains submatrix of size LBLK x LBLK int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; int bi = threadIdx.y; int bj = threadIdx.x; float sum = 0.0; // Accumulate result for C[i][j] // Shared space for two submatrices of A and B __shared__ float subA[LBLK*LBLK]; __shared__ float subB[LBLK*LBLK]; // Loop over k to compute product of all submatrices A[i][k] and B[k][j] for (int k = 0; k < N; k+= LBLK) { // Grab the two submatrices if (i < N && k+bj < N) subA[RM(bi,bj,LBLK)] = dmatA[RM(i,k+bj,N)]; else subA[RM(bi,bj,LBLK)] = 0.0; if (j < N && k+bi < N) subB[RM(bi,bj,LBLK)] = dmatB[RM(k+bi,j,N)]; else subB[RM(bi,bj,LBLK)] = 0.0; // Wait until entire block gets filled __syncthreads(); // Generate contribution to C[i][j] of these submatrices for (int bk = 0; bk < LBLK; bk++) sum += subA[RM(bi,bk,LBLK)] * subB[RM(bk,bj,LBLK)]; // Wait until all products computed __syncthreads(); } if (i < N && j < N) dmatC[RM(i,j,N)] = sum; } #endif // Transpose submatrix of B as read it in. Decreases performance. __global__ void cudaBlockTransposeKernel(int N, float *dmatA, float *dmatB, float *dmatC) { // Assume that thread block contains submatrix of size LBLK x LBLK int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; int bi = threadIdx.y; int bj = threadIdx.x; float sum = 0.0; // Accumulate result for C[i][j] // Shared space for two submatrices of A and B __shared__ float subA[LBLK*LBLK]; __shared__ float subB[LBLK*LBLK]; // Loop over k to compute product of all submatrices A[i][k] and B[k][j] for (int k = 0; k < N; k+= LBLK) { // Grab the two submatrices if (i < N && k+bj < N) subA[RM(bi,bj,LBLK)] = dmatA[RM(i,k+bj,N)]; else subA[RM(bi,bj,LBLK)] = 0.0; if (j < N && k+bi < N) subB[CM(bi,bj,LBLK)] = dmatB[RM(k+bi,j,N)]; else subB[CM(bi,bj,LBLK)] = 0.0; // Wait until entire block gets filled __syncthreads(); // Generate contribution to C[i][j] of these submatrices for (int bk = 0; bk < LBLK; bk++) sum += subA[RM(bi,bk,LBLK)] * subB[CM(bk,bj,LBLK)]; // Wait until all products computed __syncthreads(); } if (i < N && j < N) dmatC[RM(i,j,N)] = sum; } // The following version only works when N is a multiple of 4 // Each Cuda block handles 4 elements per thread to increase work per thread // and uses wider accesses to memory. // Each Cuda block has 64 threads in y dimension (rows) // and 16 threads in x dimension (columns) // Each thread generates elements C[i][j] ... C[i][j+3] of the product #define NROW 64 #define NCOL 16 // Structure data as float4's, with NCOL of them in each column union mdata_t { float f[4]; float4 f4; }; __global__ void cudaBlockQuadKernel(int N, float* dmatA, float* dmatB, float * dmatC) { // Prefix Key: // s: scaled. Divided by 4. Used when indexing columns // b: block. Used to refer to elements within block // No prefix. Used to refer to elements in global array // // Indexes into row of array int i = blockIdx.y * blockDim.y + threadIdx.y; // Indexes into column, but in units of float4's int sj = blockIdx.x * blockDim.x + threadIdx.x; int bi = threadIdx.y; // Ranges between 0 and NROW-1 int sbj = threadIdx.x; // Ranges between 0 and NCOL-1 int sN = N/4; // Number of float4's in each row of matrices // Representing source & destination matrices as float4's: float4 *matAf4 = (float4 *) dmatA; float4 *matBf4 = (float4 *) dmatB; float4 *matCf4 = (float4 *) dmatC; /* Accumulate 4 elements in row of C */ mdata_t sums; sums.f[0] = sums.f[1] = sums.f[2] = sums.f[3] = 0.0; mdata_t zeros; zeros.f[0] = zeros.f[1] = zeros.f[2] = zeros.f[3] = 0.0; // Shared space for two submatrices of A and B __shared__ mdata_t subA[NROW*NCOL]; __shared__ mdata_t subB[NROW*NCOL]; // Loop over k to compute product of all submatrices A[i][k] and B[k][j] for (int sk = 0; sk < sN; sk += NCOL) { int k = sk * 4; // Read the two submatrices from global memory if (i < N && sk+sbj < sN) subA[RM(bi,sbj,NCOL)].f4 = matAf4[RM(i,sk+sbj,sN)]; else subA[RM(bi,sbj,NCOL)].f4 = zeros.f4; if (sj < sN && k+bi < N) subB[RM(bi,sbj,NCOL)].f4 = matBf4[RM(k+bi,sj,sN)]; else subB[RM(bi,sbj,NCOL)].f4 = zeros.f4; // Wait until entire block gets filled __syncthreads(); // Generate contribution to C[i][4*sj] .. C[i][4*sj+3] for (int sbk = 0; sbk < NCOL; sbk++) { int bk = 4*sbk; mdata_t a = subA[RM(bi,sbk,NCOL)]; mdata_t bfill[4]; bfill[0] = subB[RM(bk+0,sbj,NCOL)]; bfill[1] = subB[RM(bk+1,sbj,NCOL)]; bfill[2] = subB[RM(bk+2,sbj,NCOL)]; bfill[3] = subB[RM(bk+3,sbj,NCOL)]; float *b = (float *) &bfill; for (int tj = 0; tj < 4; tj++) { sums.f[tj] += a.f[0] * b[RM(0,tj,4)] + a.f[1] * b[RM(1,tj,4)] + a.f[2] * b[RM(2,tj,4)] + a.f[3] * b[RM(3,tj,4)]; } } // Wait until all products computed __syncthreads(); } /* Store 4 elements into C */ if (i < N && sj < sN) matCf4[RM(i,sj,sN)] = sums.f4; } // nVidia kernel. Only works when N multiple of block size #define BLOCK_SIZE LBLK __global__ void matrixMulCUDA(float *C, float *A, float *B, int wA, int wB) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; // Index of the first sub-matrix of A processed by the block int aBegin = wA * BLOCK_SIZE * by; // Index of the last sub-matrix of A processed by the block int aEnd = aBegin + wA - 1; // Step size used to iterate through the sub-matrices of A int aStep = BLOCK_SIZE; // Index of the first sub-matrix of B processed by the block int bBegin = BLOCK_SIZE * bx; // Step size used to iterate through the sub-matrices of B int bStep = BLOCK_SIZE * wB; // Csub is used to store the element of the block sub-matrix // that is computed by the thread float Csub = 0; // Loop over all the sub-matrices of A and B // required to compute the block sub-matrix for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // Declaration of the shared memory array As used to // store the sub-matrix of A __shared__ float As[BLOCK_SIZE][BLOCK_SIZE]; // Declaration of the shared memory array Bs used to // store the sub-matrix of B __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE]; // Load the matrices from device memory // to shared memory; each thread loads // one element of each matrix As[ty][tx] = A[a + wA * ty + tx]; Bs[ty][tx] = B[b + wB * ty + tx]; // Synchronize to make sure the matrices are loaded __syncthreads(); // Multiply the two matrices together; // each thread computes one element // of the block sub-matrix for (int k = 0; k < BLOCK_SIZE; ++k) { Csub += As[ty][k] * Bs[k][tx]; } // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write the block sub-matrix to device memory; // each thread writes one element int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx; C[c + wB * ty + tx] = Csub; } // Old version. Has synchronization problems __global__ void cudaSmallBlockKernel(int N, float *dmatA, float *dmatB, float *dmatC) { // Assume that thread block contains submatrix of size SBLK x SBLK // Have SBLK extra threads available to serve as third index. // These are all within a single warp int bk = threadIdx.x; // Range within single warp int i = blockIdx.z * blockDim.z + threadIdx.z; int j = blockIdx.y * blockDim.y + threadIdx.y; int bi = threadIdx.z; int bj = threadIdx.y; // Shared space for two submatrices of A and B __shared__ float subA[SBLK*SBLK]; __shared__ float subB[SBLK*SBLK]; // Shared space for partial products __shared__ float vals[SBLK*SBLK*SBLK]; float sum = 0.0; // Loop over k to compute product of all submatrices A[i][k] and B[k][j] for (int k = 0; k < N; k+= SBLK) { // Designate threads with bk == 0 to fill A if (bk == 0) { // Grab the two submatrices if (i < N && k+bj < N) subA[RM(bi,bj,SBLK)] = dmatA[RM(i,k+bj,N)]; else subA[RM(bi,bj,SBLK)] = 0.0; } // Designate threads with bk == 1 to fill B if (bk == 1) { if (j < N && k+bi < N) subB[RM(bi,bj,SBLK)] = dmatB[RM(k+bi,j,N)]; else subB[RM(bi,bj,SBLK)] = 0.0; } // Wait until entire block gets filled __syncthreads(); // Compute all partial products of the submatrices vals[RM3(bi,bj,bk,SBLK)] = subA[RM(bi,bk,SBLK)] * subB[RM(bk,bj,SBLK)]; // Wait until partial products computed __syncthreads(); // Sum the values across the value of bk using tree reduction // These are all in same warp, but cannot guarantee synchronization float v1, v2; v1 = vals[RM3(bi,bj,bk,SBLK)]; v2 = vals[RM3(bi,bj,bk+1,SBLK)]; if (bk % 2 == 0) { vals[RM3(bi,bj,bk,SBLK)] = v1 + v2; } __syncthreads(); // Shouldn't need this v1 = vals[RM3(bi,bj,bk,SBLK)]; v2 = vals[RM3(bi,bj,bk+2,SBLK)]; if (bk % 4 == 0) { vals[RM3(bi,bj,bk,SBLK)] = v1 + v2; } __syncthreads(); // Shouldn't need this v1 = vals[RM3(bi,bj,bk,SBLK)]; v2 = vals[RM3(bi,bj,bk+4,SBLK)]; if (bk % 8 == 0) { sum += v1 + v2; } __syncthreads(); } if (i < N && j < N && bk == 0) dmatC[RM(i,j,N)] = sum; } /* Preallocated blocks */ static int allocN = -1; static float *aDevData = NULL; static float *bDevData = NULL; static float *tDevData = NULL; static float *gDevData = NULL; static float *sDevData = NULL; static float *tHostData = NULL; static float *gHostData = NULL; void cudaSetup(int N, float *aData, float *bData, float *gData) { if (allocN == N) return; if (allocN > 0) { cudaFree(sDevData); cudaFree(aDevData); cudaFree(bDevData); cudaFree(tDevData); cudaFree(gDevData); } if (N > 0) { cudaMalloc((void **) &aDevData, N*N * sizeof(float)); cudaMalloc((void **) &bDevData, N*N * sizeof(float)); cudaMalloc((void **) &tDevData, N*N * sizeof(float)); cudaMalloc((void **) &sDevData, N*N * sizeof(float)); tHostData = (float *) calloc(N*N, sizeof(float)); } gHostData = gData; cudaMemcpy(aDevData, aData, N*N * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(bDevData, bData, N*N * sizeof(float), cudaMemcpyHostToDevice); allocN = N; } // Get scratch for matrix static float *cudaScratchMatrix(int N) { if (allocN != N) { setup(N); } return sDevData; } void cudaMultMatrixSimpleOld(int N, float *dmatA, float *dmatB, float *dmatC) { dim3 threadsPerBlock(LBLK, LBLK); dim3 blocks(updiv(N, LBLK), updiv(N, LBLK)); cudaSimpleKernelOld<<<blocks, threadsPerBlock>>>(N, dmatA, dmatB, dmatC); } void cudaMultMatrixSimple(int N, float *dmatA, float *dmatB, float *dmatC) { dim3 threadsPerBlock(LBLK, LBLK); dim3 blocks(updiv(N, LBLK), updiv(N, LBLK)); cudaSimpleKernel<<<blocks, threadsPerBlock>>>(N, dmatA, dmatB, dmatC); } void cudaMultMatrixTransposed(int N, float *dmatA, float *dmatB, float *dmatC) { dim3 threadsPerBlock(LBLK, LBLK); dim3 blocks(updiv(N, LBLK), updiv(N, LBLK)); float *tranB = cudaScratchMatrix(N); cudaTransposeKernel<<<blocks, threadsPerBlock>>>(N, dmatB, tranB); cudaTransposedKernel<<<blocks, threadsPerBlock>>>(N, dmatA, tranB, dmatC); } void cudaMultMatrixBlocked(int N, float *dmatA, float *dmatB, float *dmatC) { dim3 threadsPerBlock(LBLK, LBLK); dim3 blocks(updiv(N, LBLK), updiv(N, LBLK)); cudaBlockKernel<<<blocks, threadsPerBlock>>>(N, dmatA, dmatB, dmatC); } void cudaMultMatrixBlockedCoarse(int N, float *dmatA, float *dmatB, float *dmatC) { dim3 threadsPerBlock(LBLK, LBLK); dim3 blocks(updiv(updiv(N, LBLK), LBLK), updiv(updiv(N, LBLK), LBLK)); // printf("Spawning %d %d blocks of %d %d threads\n", blocks.x, blocks.y, threadsPerBlock.x, threadsPerBlock.y); cudaBlockKernelCoarse<<<blocks, threadsPerBlock>>>(N, dmatA, dmatB, dmatC); } void cudaMultMatrixBlockedOld(int N, float *dmatA, float *dmatB, float *dmatC) { dim3 threadsPerBlock(LBLK, LBLK); dim3 blocks(updiv(N, LBLK), updiv(N, LBLK)); cudaBlockKernelOld<<<blocks, threadsPerBlock>>>(N, dmatA, dmatB, dmatC); } void cudaMultMatrixBlockedQuad(int N, float *dmatA, float *dmatB, float *dmatC) { dim3 threadsPerBlock(NCOL,NROW); // Have same N/NROW blocks in both dimensions, // since each block computes NROW x NROW portion of product dim3 blocks(updiv(N, NROW), updiv(N, NROW)); cudaBlockQuadKernel<<<blocks, threadsPerBlock>>>(N, dmatA, dmatB, dmatC); } void cudaMultMatrixNvidia(int N, float *dmatA, float *dmatB, float *dmatC) { dim3 threadsPerBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 blocks(updiv(N, BLOCK_SIZE), updiv(N, BLOCK_SIZE)); matrixMulCUDA<<<blocks, threadsPerBlock>>>(dmatC, dmatA, dmatB, N, N); } void cudaMultMatrixSmallBlocked(int N, float *dmatA, float *dmatB, float *dmatC) { dim3 threadsPerBlock(SBLK, SBLK, SBLK); dim3 blocks(1, updiv(N, SBLK), updiv(N, SBLK)); cudaSmallBlockKernel<<<blocks, threadsPerBlock>>>(N, dmatA, dmatB, dmatC); } static int cudaRunMM(int N, mmul_t method) { switch (method) { case MMUL_CUDA_OLD_REFERENCE: cudaMultMatrixSimpleOld(N, aDevData, bDevData, tDevData); break; case MMUL_CUDA_REFERENCE: cudaMultMatrixSimple(N, aDevData, bDevData, tDevData); break; case MMUL_CUDA_TRANSPOSE: cudaMultMatrixTransposed(N, aDevData, bDevData, tDevData); break; case MMUL_CUDA_BLK: cudaMultMatrixBlocked(N, aDevData, bDevData, tDevData); break; case MMUL_CUDA_OLD_BLK: cudaMultMatrixBlockedOld(N, aDevData, bDevData, tDevData); break; case MMUL_CUDA_BLK_COARSE: cudaMultMatrixBlockedCoarse(N, aDevData, bDevData, tDevData); break; case MMUL_CUDA_NVIDIA: cudaMultMatrixNvidia(N, aDevData, bDevData, tDevData); break; case MMUL_CUDA_SMALL_BLK: cudaMultMatrixSmallBlocked(N, aDevData, bDevData, tDevData); break; case MMUL_CUDA_QUAD_BLK: cudaMultMatrixBlockedQuad(N, aDevData, bDevData, tDevData); break; default: fprintf(stderr, "Haven't implemented method yet\n"); return 0; } return 1; } double cudaBenchMM(int N, mmul_t method) { // Should already have done the setup if (allocN != N) { setup(N); } if (!cudaRunMM(N, method)) return 1000.0; cudaMemcpy(tHostData, tDevData, N*N*sizeof(float), cudaMemcpyDeviceToHost); if (checkMatrix(N, tHostData, gHostData) > 0) return 1000.0; /* Now do the real benchmarking */ long ops = (long) 2 * N * N * N; long runs = (targetOps+ops-1)/ops; double startTime = CycleTimer::currentSeconds(); for (long r = 0; r < runs; r++) cudaRunMM(N, method); cudaDeviceSynchronize(); double endTime = CycleTimer::currentSeconds(); double ms = (endTime - startTime) * 1000.0; double gflops = (long) (runs*ops)/ms * 1e-6; fprintf(stderr, "%ld runs, %ld ops/run, %.2f ms, %.3f GFlops\n", runs, ops, ms, gflops); return gflops; } void printCudaInfo() { // for fun, just print out some stats on the machine int deviceCount = 0; cudaError_t err = cudaGetDeviceCount(&deviceCount); printf("---------------------------------------------------------\n"); printf("Found %d CUDA devices\n", deviceCount); for (int i=0; i<deviceCount; i++) { cudaDeviceProp deviceProps; cudaGetDeviceProperties(&deviceProps, i); printf("Device %d: %s\n", i, deviceProps.name); printf(" SMs: %d\n", deviceProps.multiProcessorCount); printf(" Global mem: %.0f MB\n", static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024)); printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor); } printf("---------------------------------------------------------\n"); }
45569b6af47d17a8c52afbc08051f99547fa0c39.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void vecAddKernel(float* A, float* B, float* C, int n) { // Calculate global thread index based on the block and thread indices ---- //INSERT KERNEL CODE HERE int i = blockDim.x*blockIdx.x+threadIdx.x; // Use global index to determine which elements to read, add, and write --- //INSERT KERNEL CODE HERE if (i<n) C[i] = A[i] + B[i]; }
45569b6af47d17a8c52afbc08051f99547fa0c39.cu
#include "includes.h" __global__ void vecAddKernel(float* A, float* B, float* C, int n) { // Calculate global thread index based on the block and thread indices ---- //INSERT KERNEL CODE HERE int i = blockDim.x*blockIdx.x+threadIdx.x; // Use global index to determine which elements to read, add, and write --- //INSERT KERNEL CODE HERE if (i<n) C[i] = A[i] + B[i]; }
34a2b156ecf789f39a279ef30e9908cfaa80e8ce.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" struct fPoint{ float x,y,z; __device__ fPoint(float x_, float y_, float z_){ x = x_; y = y_; z = z_; } }; struct fPoint4{ float x,y,z,a; __device__ fPoint4(float x_, float y_, float z_,float a_){ x = x_; y = y_; z = z_; a = a_; } }; __device__ fPoint Pmin(fPoint p1, fPoint p2){ float x = p1.x; if (p2.x < x) x = p2.x; float y = p1.y; if (p2.y < y) y = p2.y; return fPoint(x,y,0); } __device__ fPoint Pmax(fPoint p1, fPoint p2){ float x = p1.x; if (p2.x > x) x = p2.x; float y = p1.y; if (p2.y > y) y = p2.y; return fPoint(x,y,0); } __device__ float sign (fPoint p1, fPoint p2, fPoint p3) { return (p1.x - p3.x) * (p2.y - p3.y) - (p2.x - p3.x) * (p1.y - p3.y); } __device__ bool PointInTriangle (fPoint pt, fPoint v1, fPoint v2, fPoint v3) { float d1, d2, d3; bool has_neg, has_pos; d1 = sign(pt, v1, v2); d2 = sign(pt, v2, v3); d3 = sign(pt, v3, v1); has_neg = (d1 < 0) || (d2 < 0) || (d3 < 0); has_pos = (d1 > 0) || (d2 > 0) || (d3 > 0); return !(has_neg && has_pos); } __global__ void render(float vertices[3][53215], int tris[3][105840], float colors[4][53215],unsigned char image[WIDTH][HEIGHT][3], int depth[WIDTH][HEIGHT]){ const int i = threadIdx.x + blockIdx.x*blockDim.x; if (i >= 105840){ return; } fPoint v1(vertices[0][int(tris[0][i])],vertices[1][int(tris[0][i])],vertices[2][int(tris[0][i])]); fPoint v2(vertices[0][int(tris[1][i])],vertices[1][int(tris[1][i])],vertices[2][int(tris[1][i])]); fPoint v3(vertices[0][int(tris[2][i])],vertices[1][int(tris[2][i])],vertices[2][int(tris[2][i])]); fPoint4 c1(colors[0][int(tris[0][i])],colors[1][int(tris[0][i])],colors[2][int(tris[0][i])],colors[3][int(tris[0][i])]); fPoint4 c2(colors[0][int(tris[1][i])],colors[1][int(tris[1][i])],colors[2][int(tris[1][i])],colors[3][int(tris[1][i])]); fPoint4 c3(colors[0][int(tris[2][i])],colors[1][int(tris[2][i])],colors[2][int(tris[2][i])],colors[3][int(tris[2][i])]); fPoint leftup = Pmin(Pmin(v1,v2),v3); fPoint rightdown = Pmax(Pmax(v1,v2),v3); for (int x = int(leftup.x);x <= int(rightdown.x);x++) for (int y = int(leftup.y);y <= int(rightdown.y);y++){ if (PointInTriangle(fPoint(x,y,0),v1,v2,v3)){ int d = int((v1.z+v2.z+v3.z)/3); atomicMax(&depth[y][x],d); if (depth[y][x] == d){ double a = (c1.a+c2.a+c3.a)/3; image[y][x][0] = (c1.x+c2.x+c3.x)/3*a+ image[y][x][0]*(1-a); image[y][x][1] = (c1.y+c2.y+c3.y)/3*a+ image[y][x][1]*(1-a); image[y][x][2] = (c1.z+c2.z+c3.z)/3*a+ image[y][x][2]*(1-a); } } } }
34a2b156ecf789f39a279ef30e9908cfaa80e8ce.cu
struct fPoint{ float x,y,z; __device__ fPoint(float x_, float y_, float z_){ x = x_; y = y_; z = z_; } }; struct fPoint4{ float x,y,z,a; __device__ fPoint4(float x_, float y_, float z_,float a_){ x = x_; y = y_; z = z_; a = a_; } }; __device__ fPoint Pmin(fPoint p1, fPoint p2){ float x = p1.x; if (p2.x < x) x = p2.x; float y = p1.y; if (p2.y < y) y = p2.y; return fPoint(x,y,0); } __device__ fPoint Pmax(fPoint p1, fPoint p2){ float x = p1.x; if (p2.x > x) x = p2.x; float y = p1.y; if (p2.y > y) y = p2.y; return fPoint(x,y,0); } __device__ float sign (fPoint p1, fPoint p2, fPoint p3) { return (p1.x - p3.x) * (p2.y - p3.y) - (p2.x - p3.x) * (p1.y - p3.y); } __device__ bool PointInTriangle (fPoint pt, fPoint v1, fPoint v2, fPoint v3) { float d1, d2, d3; bool has_neg, has_pos; d1 = sign(pt, v1, v2); d2 = sign(pt, v2, v3); d3 = sign(pt, v3, v1); has_neg = (d1 < 0) || (d2 < 0) || (d3 < 0); has_pos = (d1 > 0) || (d2 > 0) || (d3 > 0); return !(has_neg && has_pos); } __global__ void render(float vertices[3][53215], int tris[3][105840], float colors[4][53215],unsigned char image[WIDTH][HEIGHT][3], int depth[WIDTH][HEIGHT]){ const int i = threadIdx.x + blockIdx.x*blockDim.x; if (i >= 105840){ return; } fPoint v1(vertices[0][int(tris[0][i])],vertices[1][int(tris[0][i])],vertices[2][int(tris[0][i])]); fPoint v2(vertices[0][int(tris[1][i])],vertices[1][int(tris[1][i])],vertices[2][int(tris[1][i])]); fPoint v3(vertices[0][int(tris[2][i])],vertices[1][int(tris[2][i])],vertices[2][int(tris[2][i])]); fPoint4 c1(colors[0][int(tris[0][i])],colors[1][int(tris[0][i])],colors[2][int(tris[0][i])],colors[3][int(tris[0][i])]); fPoint4 c2(colors[0][int(tris[1][i])],colors[1][int(tris[1][i])],colors[2][int(tris[1][i])],colors[3][int(tris[1][i])]); fPoint4 c3(colors[0][int(tris[2][i])],colors[1][int(tris[2][i])],colors[2][int(tris[2][i])],colors[3][int(tris[2][i])]); fPoint leftup = Pmin(Pmin(v1,v2),v3); fPoint rightdown = Pmax(Pmax(v1,v2),v3); for (int x = int(leftup.x);x <= int(rightdown.x);x++) for (int y = int(leftup.y);y <= int(rightdown.y);y++){ if (PointInTriangle(fPoint(x,y,0),v1,v2,v3)){ int d = int((v1.z+v2.z+v3.z)/3); atomicMax(&depth[y][x],d); if (depth[y][x] == d){ double a = (c1.a+c2.a+c3.a)/3; image[y][x][0] = (c1.x+c2.x+c3.x)/3*a+ image[y][x][0]*(1-a); image[y][x][1] = (c1.y+c2.y+c3.y)/3*a+ image[y][x][1]*(1-a); image[y][x][2] = (c1.z+c2.z+c3.z)/3*a+ image[y][x][2]*(1-a); } } } }
2adc664ba7ce9c5f61d89e3fc87080cec5cad88c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "gaincorr.h" #include "math.h" #include <hip/hip_runtime.h> #include <iostream> #include <list> #include"book.cuh" //! Kernel to gain corrigates the given image with the given slope and intercept data on the GPU. //! Intercept and slope arrays are images with the same size, storing intercept and slope correction //! factor to every pixel. __global__ void kernel_do_gaincorr (float* d_slope, float* d_intercept, int* d_saturation, float* d_image) { unsigned int tid = threadIdx.x; unsigned int pixel = blockIdx.x*blockDim.x + tid; //thread is computing pixel-th pixel //printf(" pixel: %d \t tid: %d \t blockIdx : %d \t blockDim : %d \n", pixel,tid, blockIdx.x, blockDim.x); d_image[pixel] = (d_image[pixel] - d_intercept[pixel] ) / d_slope[pixel] * 16383.0f / *d_saturation; return; } //! Executes gain correction of the given image. void Gaincorr::gaincorrigateimage(Image_cuda_compatible& image) { //Rounding voltage to multiply of 5 int voltage = (int) (round(image.getvoltage())); int remainder = voltage %5; if(remainder != 0) { voltage = voltage + 5 - remainder; } //DEBUG if(saturation.find(voltage) == saturation.end()) { std::cout <<"Error: no calbration data found for image" <<image.getid() << "With voltage " << image.getvoltage() <<std::endl; return; } if(slopes.find(voltage) == slopes.end()) { std::cout <<"Error: no slope data found for image" <<image.getid() << "With voltage " << image.getvoltage() <<std::endl; return; } if(intercepts.find(voltage) == intercepts.end()) { std::cout <<"Error: no slope data found for image" <<image.getid() << "With voltage " << image.getvoltage() <<std::endl; return; } int* d_saturation; //DEBUG int sat = saturation[voltage]; float* d_slope; float* d_intercept; d_slope = slopes.find(voltage)->second.gpu_im; d_intercept = intercepts.find(voltage)->second.gpu_im; float* d_image; d_image= image.gpu_im; HANDLE_ERROR (hipMalloc( (void**)&d_saturation, sizeof(int) )); HANDLE_ERROR (hipMemcpy(d_saturation, &sat, sizeof(int), hipMemcpyHostToDevice )); hipLaunchKernelGGL(( kernel_do_gaincorr), dim3(41472),dim3(32), 0, 0, d_slope, d_intercept, d_saturation, d_image ); HANDLE_ERROR (hipFree(d_saturation)); }
2adc664ba7ce9c5f61d89e3fc87080cec5cad88c.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "gaincorr.h" #include "math.h" #include <cuda.h> #include <iostream> #include <list> #include"book.cuh" //! Kernel to gain corrigates the given image with the given slope and intercept data on the GPU. //! Intercept and slope arrays are images with the same size, storing intercept and slope correction //! factor to every pixel. __global__ void kernel_do_gaincorr (float* d_slope, float* d_intercept, int* d_saturation, float* d_image) { unsigned int tid = threadIdx.x; unsigned int pixel = blockIdx.x*blockDim.x + tid; //thread is computing pixel-th pixel //printf(" pixel: %d \t tid: %d \t blockIdx : %d \t blockDim : %d \n", pixel,tid, blockIdx.x, blockDim.x); d_image[pixel] = (d_image[pixel] - d_intercept[pixel] ) / d_slope[pixel] * 16383.0f / *d_saturation; return; } //! Executes gain correction of the given image. void Gaincorr::gaincorrigateimage(Image_cuda_compatible& image) { //Rounding voltage to multiply of 5 int voltage = (int) (round(image.getvoltage())); int remainder = voltage %5; if(remainder != 0) { voltage = voltage + 5 - remainder; } //DEBUG if(saturation.find(voltage) == saturation.end()) { std::cout <<"Error: no calbration data found for image" <<image.getid() << "With voltage " << image.getvoltage() <<std::endl; return; } if(slopes.find(voltage) == slopes.end()) { std::cout <<"Error: no slope data found for image" <<image.getid() << "With voltage " << image.getvoltage() <<std::endl; return; } if(intercepts.find(voltage) == intercepts.end()) { std::cout <<"Error: no slope data found for image" <<image.getid() << "With voltage " << image.getvoltage() <<std::endl; return; } int* d_saturation; //DEBUG int sat = saturation[voltage]; float* d_slope; float* d_intercept; d_slope = slopes.find(voltage)->second.gpu_im; d_intercept = intercepts.find(voltage)->second.gpu_im; float* d_image; d_image= image.gpu_im; HANDLE_ERROR (cudaMalloc( (void**)&d_saturation, sizeof(int) )); HANDLE_ERROR (cudaMemcpy(d_saturation, &sat, sizeof(int), cudaMemcpyHostToDevice )); kernel_do_gaincorr<<<41472,32>>>( d_slope, d_intercept, d_saturation, d_image ); HANDLE_ERROR (cudaFree(d_saturation)); }
0a6922b80f9ff1a40e5f860880025418515675e4.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/ATen.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <vector> template <typename scalar_t> __global__ void dist_map_forward_kernel(const size_t batch_size, const size_t size_a, const size_t size_b, const scalar_t* __restrict__ a, const scalar_t* __restrict__ b, scalar_t* __restrict__ dist_map) { const auto n = batch_size * size_a * size_b; for (auto index = blockIdx.x * blockDim.x + threadIdx.x; index < n; index += blockDim.x * gridDim.x) { const auto batch = index / (size_a * size_b); const auto h_index = index % (size_a * size_b); const auto h = h_index / size_b; const auto w = h_index % size_b; const auto x1 = a[(batch*3+0)*size_a+h]; const auto x2 = a[(batch*3+1)*size_a+h]; const auto x3 = a[(batch*3+2)*size_a+h]; const auto y1 = b[(batch*3+0)*size_b+w]; const auto y2 = b[(batch*3+1)*size_b+w]; const auto y3 = b[(batch*3+2)*size_b+w]; dist_map[index] = (x1-y1)*(x1-y1) + (x2-y2)*(x2-y2) + (x3-y3)*(x3-y3); } } std::vector<at::Tensor> dist_map_cuda_forward(at::Tensor a, at::Tensor b) { const auto batch_size = a.size(0); const auto size_a = a.size(2); const auto size_b = b.size(2); auto dist_map = at::zeros({batch_size, size_a, size_b}, a.options()); const int threads = 1024; const int blocks = (batch_size * size_a * size_b + threads - 1) / threads; AT_DISPATCH_FLOATING_TYPES(a.type(), "dist_map_cuda_forward", ([&] { hipLaunchKernelGGL(( dist_map_forward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, batch_size, size_a, size_b, a.data<scalar_t>(), b.data<scalar_t>(), dist_map.data<scalar_t>() ); })); return {dist_map}; } template <typename scalar_t> __global__ void dist_map_grad_a_kernel(const size_t batch_size, const size_t size_a, const size_t size_b, const scalar_t* __restrict__ a, const scalar_t* __restrict__ b, const scalar_t* __restrict__ grad_d, scalar_t* __restrict__ grad_a) { const auto n = batch_size * size_a * 3; for (auto index = blockIdx.x * blockDim.x + threadIdx.x; index < n; index += blockDim.x * gridDim.x) { const auto batch = index / (size_a * 3); const auto h_index = index % (size_a * 3); const auto h = h_index / size_a; const auto w = h_index % size_a; for (auto j = 0; j < size_b; ++j) { const auto b_val = b[(batch*3+h)*size_b+j]; const auto grad_d_val = grad_d[(batch*size_a+w)*size_b+j]; grad_a[index] += 2. * (a[index] - b_val) * grad_d_val; } } } template <typename scalar_t> __global__ void dist_map_grad_b_kernel(const size_t batch_size, const size_t size_a, const size_t size_b, const scalar_t* __restrict__ a, const scalar_t* __restrict__ b, const scalar_t* __restrict__ grad_d, scalar_t* __restrict__ grad_b) { const auto n = batch_size * size_b * 3; for (auto index = blockIdx.x * blockDim.x + threadIdx.x; index < n; index += blockDim.x * gridDim.x) { const auto batch = index / (size_b * 3); const auto h_index = index % (size_b * 3); const auto h = h_index / size_b; const auto w = h_index % size_b; for (auto j = 0; j < size_a; ++j) { const auto a_val = a[(batch*3+h)*size_a+j]; const auto grad_d_val = grad_d[(batch*size_a+j)*size_b+w]; grad_b[index] += 2. * (b[index] - a_val) * grad_d_val; } } } std::vector<at::Tensor> dist_map_cuda_backward(at::Tensor grad_d, at::Tensor a, at::Tensor b) { const auto batch_size = a.size(0); const auto size_a = a.size(2); const auto size_b = b.size(2); auto grad_a = at::zeros_like(a); auto grad_b = at::zeros_like(b); const int threads = 1024; const int blocks_a = (batch_size * size_a * 3 + threads - 1) / threads; const int blocks_b = (batch_size * size_b * 3 + threads - 1) / threads; AT_DISPATCH_FLOATING_TYPES(a.type(), "dist_map_cuda_backward", ([&] { hipLaunchKernelGGL(( dist_map_grad_a_kernel<scalar_t>), dim3(blocks_a), dim3(threads), 0, 0, batch_size, size_a, size_b, a.data<scalar_t>(), b.data<scalar_t>(), grad_d.data<scalar_t>(), grad_a.data<scalar_t>() ); })); AT_DISPATCH_FLOATING_TYPES(b.type(), "dist_map_cuda_backward", ([&] { hipLaunchKernelGGL(( dist_map_grad_b_kernel<scalar_t>), dim3(blocks_b), dim3(threads), 0, 0, batch_size, size_a, size_b, a.data<scalar_t>(), b.data<scalar_t>(), grad_d.data<scalar_t>(), grad_b.data<scalar_t>() ); })); return {grad_a, grad_b}; }
0a6922b80f9ff1a40e5f860880025418515675e4.cu
#include <ATen/ATen.h> #include <cuda.h> #include <cuda_runtime.h> #include <vector> template <typename scalar_t> __global__ void dist_map_forward_kernel(const size_t batch_size, const size_t size_a, const size_t size_b, const scalar_t* __restrict__ a, const scalar_t* __restrict__ b, scalar_t* __restrict__ dist_map) { const auto n = batch_size * size_a * size_b; for (auto index = blockIdx.x * blockDim.x + threadIdx.x; index < n; index += blockDim.x * gridDim.x) { const auto batch = index / (size_a * size_b); const auto h_index = index % (size_a * size_b); const auto h = h_index / size_b; const auto w = h_index % size_b; const auto x1 = a[(batch*3+0)*size_a+h]; const auto x2 = a[(batch*3+1)*size_a+h]; const auto x3 = a[(batch*3+2)*size_a+h]; const auto y1 = b[(batch*3+0)*size_b+w]; const auto y2 = b[(batch*3+1)*size_b+w]; const auto y3 = b[(batch*3+2)*size_b+w]; dist_map[index] = (x1-y1)*(x1-y1) + (x2-y2)*(x2-y2) + (x3-y3)*(x3-y3); } } std::vector<at::Tensor> dist_map_cuda_forward(at::Tensor a, at::Tensor b) { const auto batch_size = a.size(0); const auto size_a = a.size(2); const auto size_b = b.size(2); auto dist_map = at::zeros({batch_size, size_a, size_b}, a.options()); const int threads = 1024; const int blocks = (batch_size * size_a * size_b + threads - 1) / threads; AT_DISPATCH_FLOATING_TYPES(a.type(), "dist_map_cuda_forward", ([&] { dist_map_forward_kernel<scalar_t><<<blocks, threads>>>( batch_size, size_a, size_b, a.data<scalar_t>(), b.data<scalar_t>(), dist_map.data<scalar_t>() ); })); return {dist_map}; } template <typename scalar_t> __global__ void dist_map_grad_a_kernel(const size_t batch_size, const size_t size_a, const size_t size_b, const scalar_t* __restrict__ a, const scalar_t* __restrict__ b, const scalar_t* __restrict__ grad_d, scalar_t* __restrict__ grad_a) { const auto n = batch_size * size_a * 3; for (auto index = blockIdx.x * blockDim.x + threadIdx.x; index < n; index += blockDim.x * gridDim.x) { const auto batch = index / (size_a * 3); const auto h_index = index % (size_a * 3); const auto h = h_index / size_a; const auto w = h_index % size_a; for (auto j = 0; j < size_b; ++j) { const auto b_val = b[(batch*3+h)*size_b+j]; const auto grad_d_val = grad_d[(batch*size_a+w)*size_b+j]; grad_a[index] += 2. * (a[index] - b_val) * grad_d_val; } } } template <typename scalar_t> __global__ void dist_map_grad_b_kernel(const size_t batch_size, const size_t size_a, const size_t size_b, const scalar_t* __restrict__ a, const scalar_t* __restrict__ b, const scalar_t* __restrict__ grad_d, scalar_t* __restrict__ grad_b) { const auto n = batch_size * size_b * 3; for (auto index = blockIdx.x * blockDim.x + threadIdx.x; index < n; index += blockDim.x * gridDim.x) { const auto batch = index / (size_b * 3); const auto h_index = index % (size_b * 3); const auto h = h_index / size_b; const auto w = h_index % size_b; for (auto j = 0; j < size_a; ++j) { const auto a_val = a[(batch*3+h)*size_a+j]; const auto grad_d_val = grad_d[(batch*size_a+j)*size_b+w]; grad_b[index] += 2. * (b[index] - a_val) * grad_d_val; } } } std::vector<at::Tensor> dist_map_cuda_backward(at::Tensor grad_d, at::Tensor a, at::Tensor b) { const auto batch_size = a.size(0); const auto size_a = a.size(2); const auto size_b = b.size(2); auto grad_a = at::zeros_like(a); auto grad_b = at::zeros_like(b); const int threads = 1024; const int blocks_a = (batch_size * size_a * 3 + threads - 1) / threads; const int blocks_b = (batch_size * size_b * 3 + threads - 1) / threads; AT_DISPATCH_FLOATING_TYPES(a.type(), "dist_map_cuda_backward", ([&] { dist_map_grad_a_kernel<scalar_t><<<blocks_a, threads>>>( batch_size, size_a, size_b, a.data<scalar_t>(), b.data<scalar_t>(), grad_d.data<scalar_t>(), grad_a.data<scalar_t>() ); })); AT_DISPATCH_FLOATING_TYPES(b.type(), "dist_map_cuda_backward", ([&] { dist_map_grad_b_kernel<scalar_t><<<blocks_b, threads>>>( batch_size, size_a, size_b, a.data<scalar_t>(), b.data<scalar_t>(), grad_d.data<scalar_t>(), grad_b.data<scalar_t>() ); })); return {grad_a, grad_b}; }
361be77f234b15db23d5364cfe2d294013533fdb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2013-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <solvers/polynomial_solver.h> #include <solvers/block_common_solver.h> #include <blas.h> #include <string.h> #include <cutil.h> #include <multiply.h> #include <miscmath.h> #ifdef _WIN32 #pragma warning (push) #pragma warning (disable : 4244 4267 4521) #endif #include <cusp/relaxation/polynomial.h> #include <cusp/detail/spectral_radius.h> #ifdef _WIN32 #pragma warning (pop) #endif #include <matrix_cusp.h> namespace amgx { namespace polynomial_solver { template <typename IndexType, typename ValueTypeA, typename ValueTypeB> __global__ void aux_norm1_csr(const IndexType *row_offsets, const IndexType *column_indices, const IndexType *dia_values, const ValueTypeA *nonzero_values, const int num_rows, int bsize, ValueTypeB *row_sum) { int tid = threadIdx.x + blockDim.x * blockIdx.x; int bsize_sq = bsize * bsize; while (tid < num_rows) { ValueTypeB tmp_sum = 0; int idx_i = tid / bsize; int offset_i = tid % bsize; for (int i = 0; i < bsize; i++) { tmp_sum += fabs(nonzero_values[dia_values[idx_i] * bsize_sq + offset_i * bsize + i]); } for (int j = row_offsets[idx_i]; j < row_offsets[idx_i + 1]; j++) { // Compute edge weight for (int i = 0; i < bsize; i++) { tmp_sum += fabs(nonzero_values[j * bsize_sq + offset_i * bsize + i]); } } row_sum[tid] = tmp_sum / fabs(nonzero_values[dia_values[idx_i] * bsize_sq + offset_i * bsize + offset_i]); tid += gridDim.x * blockDim.x; } } template <typename IndexType, typename ValueTypeA, typename ValueTypeB> __global__ void get_diaginv(const IndexType *dia_idx, const ValueTypeA *nonzero_values, const int num_rows, int bsize, ValueTypeB *Dinv) { int tid = threadIdx.x + blockDim.x * blockIdx.x; int bsize_sq = bsize * bsize; while (tid < num_rows) { int idx_i = tid / bsize; int offset_i = tid % bsize; ValueTypeB diag = nonzero_values[dia_idx[idx_i] * bsize_sq + offset_i * bsize + offset_i]; Dinv[tid] = ValueTypeB(1) / (isNotCloseToZero(diag) ? diag : epsilon(diag) ); tid += gridDim.x * blockDim.x; } } // Constructor template<class T_Config> PolynomialSolverBase<T_Config>::PolynomialSolverBase( AMG_Config &cfg, const std::string &cfg_scope) : Solver<T_Config>( cfg, cfg_scope), R(0) { ndeg0 = cfg.AMG_Config::getParameter<int>("kpz_order", cfg_scope); } // Destructor template<class T_Config> PolynomialSolverBase<T_Config>::~PolynomialSolverBase() { } template<class T_Config> void PolynomialSolverBase<T_Config>::printSolverParameters() const { std::cout << "kpz_order = " << this->ndeg0 << std::endl; } // Solver setup template<class T_Config> void PolynomialSolverBase<T_Config>::solver_setup(bool reuse_matrix_structure) { this->m_explicit_A = dynamic_cast<Matrix<T_Config>*>(this->m_A); if (!this->m_explicit_A) { FatalError("PolynomialSolver only works with explicit matrices", AMGX_ERR_INTERNAL); } if (ndeg0 == 0) { ndeg0 = 6; } int N = this->m_explicit_A->get_num_rows() * this->m_explicit_A->get_block_dimy(); ValueTypeA mu0, mu1, smu0, smu1; const IndexType *A_row_offsets_ptr = this->m_explicit_A->row_offsets.raw(); const IndexType *A_column_indices_ptr = this->m_explicit_A->col_indices.raw(); const IndexType *A_dia_ptr = this->m_explicit_A->diag.raw(); const ValueTypeA *A_nonzero_values_ptr = this->m_explicit_A->values.raw(); VVector row_sum(N); ValueTypeB *row_sum_ptr = row_sum.raw(); const int threads_per_block = 512; const int num_blocks = min((N - 1) / threads_per_block + 1, AMGX_GRID_MAX_SIZE); hipLaunchKernelGGL(( aux_norm1_csr) , dim3(num_blocks), dim3(threads_per_block), 0, 0, A_row_offsets_ptr, A_column_indices_ptr, A_dia_ptr, A_nonzero_values_ptr, N, this->m_explicit_A->get_block_dimy(), row_sum_ptr); cudaCheckError(); mu0 = cusp::blas::nrmmax(row_sum); cudaCheckError(); if (mu0 == 0) { mu0 = cusp::blas::nrmmax(this->m_explicit_A->values); } cudaCheckError(); mu0 = 1.0 / mu0; mu1 = 4.0 * mu0; // default set 8; smu0 = sqrt(mu0); smu1 = sqrt(mu1); k[1] = (mu0 + mu1) / 2.0; k[2] = (smu0 + smu1) * (smu0 + smu1) / 2.0; k[3] = mu0 * mu1; k[4] = 2.0 * k[3] / k[2]; // 4.0*mu0*mu1/(sqrt(mu0)+sqrt(mu1))/(sqrt(mu0)+sqrt(mu1)); k[5] = (mu1 - 2.0 * smu0 * smu1 + mu0) / (mu1 + 2.0 * smu0 * smu1 + mu0); // square of (sqrt(kappa)-1)/(sqrt(kappa)+1); if (this->m_explicit_A->get_block_size() == 1) { MatrixCusp<T_Config, cusp::csr_format> wA((Matrix<T_Config> *) &*this->m_explicit_A); ValueTypeA rho = cusp::detail::ritz_spectral_radius_symmetric(wA, 8); cudaCheckError(); cusp::array1d<ValueTypeA, cusp::host_memory> coeffs; cusp::relaxation::detail::chebyshev_polynomial_coefficients(rho, coeffs); cudaCheckError(); poly = cusp::relaxation::polynomial<ValueTypeA, typename Matrix<T_Config>::memory_space > (wA, coeffs); cudaCheckError(); } R.resize(N); V0.resize(N); V.resize(N); Rbar.resize(N); Sn.resize(N); Dinv.resize(N); R.set_block_dimy(this->m_explicit_A->get_block_dimy()); R.set_block_dimx(1); V0.set_block_dimy(this->m_explicit_A->get_block_dimy()); V0.set_block_dimx(1); V.set_block_dimy(this->m_explicit_A->get_block_dimy()); V.set_block_dimx(1); Rbar.set_block_dimy(this->m_explicit_A->get_block_dimy()); Rbar.set_block_dimx(1); Sn.set_block_dimy(this->m_explicit_A->get_block_dimy()); Sn.set_block_dimx(1); Dinv.set_block_dimy(this->m_explicit_A->get_block_dimy()); Dinv.set_block_dimx(1); ValueTypeB *Dinv_ptr = Dinv.raw(); hipLaunchKernelGGL(( get_diaginv) , dim3(num_blocks), dim3(threads_per_block), 0, 0, A_dia_ptr, A_nonzero_values_ptr, N, this->m_explicit_A->get_block_dimy(), Dinv_ptr); cudaCheckError(); } // template<class T_Config> void PolynomialSolverBase<T_Config>::solve_init( VVector &b, VVector &x, bool xIsZero ) { } // Solve one iteration template<class T_Config> bool PolynomialSolverBase<T_Config>::solve_iteration( VVector &b, VVector &x, bool xIsZero ) { smooth_common_sqblocks( *this->m_explicit_A, b, x ); return this->converged( b, x ); } template<class T_Config> void PolynomialSolverBase<T_Config>::solve_finalize( VVector &b, VVector &x ) { } template<class T_Config> struct poly_smooth { static void poly_postsmooth(const Matrix<T_Config> &A, const Vector<T_Config> &B, Vector<T_Config> &C, cusp::relaxation::polynomial<typename T_Config::MatPrec, typename T_Config::MemSpace> &poly) { FatalError("Mixed precision is not supported for scalar matrix type", AMGX_ERR_NOT_IMPLEMENTED); } static void poly_presmooth(const Matrix<T_Config> &A, const Vector<T_Config> &B, Vector<T_Config> &C, cusp::relaxation::polynomial<typename T_Config::MatPrec, typename T_Config::MemSpace> &poly) { FatalError("Mixed precision is not supported for scalar matrix type", AMGX_ERR_NOT_IMPLEMENTED); } }; template<AMGX_MemorySpace t_memSpace, AMGX_IndPrecision t_indInt> struct poly_smooth<TemplateConfig<t_memSpace, AMGX_vecDouble, AMGX_matDouble, t_indInt>> { static void poly_postsmooth(const Matrix<TemplateConfig<t_memSpace, AMGX_vecDouble, AMGX_matDouble, t_indInt> > &A, const Vector<TemplateConfig<t_memSpace, AMGX_vecDouble, AMGX_matDouble, t_indInt> > &B, Vector<TemplateConfig<t_memSpace, AMGX_vecDouble, AMGX_matDouble, t_indInt> > &C, cusp::relaxation::polynomial<double, typename MemorySpaceMap<t_memSpace>::Type> &poly) { MatrixCusp<TemplateConfig<t_memSpace, AMGX_vecDouble, AMGX_matDouble, t_indInt>, cusp::csr_format> wA((Matrix<TemplateConfig<t_memSpace, AMGX_vecDouble, AMGX_matDouble, t_indInt> > *) &A); poly.postsmooth(wA, B, C); cudaCheckError(); } static void poly_presmooth(const Matrix<TemplateConfig<t_memSpace, AMGX_vecDouble, AMGX_matDouble, t_indInt> > &A, const Vector<TemplateConfig<t_memSpace, AMGX_vecDouble, AMGX_matDouble, t_indInt> > &B, Vector<TemplateConfig<t_memSpace, AMGX_vecDouble, AMGX_matDouble, t_indInt> > &C, cusp::relaxation::polynomial<double, typename MemorySpaceMap<t_memSpace>::Type> &poly) { MatrixCusp<TemplateConfig<t_memSpace, AMGX_vecDouble, AMGX_matDouble, t_indInt>, cusp::csr_format> wA((Matrix<TemplateConfig<t_memSpace, AMGX_vecDouble, AMGX_matDouble, t_indInt> > *) &A); poly.presmooth(wA, B, C); cudaCheckError(); } }; template<AMGX_MemorySpace t_memSpace, AMGX_IndPrecision t_indInt> struct poly_smooth<TemplateConfig<t_memSpace, AMGX_vecFloat, AMGX_matFloat, t_indInt>> { static void poly_postsmooth(const Matrix<TemplateConfig<t_memSpace, AMGX_vecFloat, AMGX_matFloat, t_indInt> > &A, const Vector<TemplateConfig<t_memSpace, AMGX_vecFloat, AMGX_matFloat, t_indInt> > &B, Vector<TemplateConfig<t_memSpace, AMGX_vecFloat, AMGX_matFloat, t_indInt> > &C, cusp::relaxation::polynomial<float, typename MemorySpaceMap<t_memSpace>::Type> &poly) { MatrixCusp<TemplateConfig<t_memSpace, AMGX_vecFloat, AMGX_matFloat, t_indInt>, cusp::csr_format> wA((Matrix<TemplateConfig<t_memSpace, AMGX_vecFloat, AMGX_matFloat, t_indInt> > *) &A); poly.postsmooth(wA, B, C); cudaCheckError(); } static void poly_presmooth(const Matrix<TemplateConfig<t_memSpace, AMGX_vecFloat, AMGX_matFloat, t_indInt> > &A, const Vector<TemplateConfig<t_memSpace, AMGX_vecFloat, AMGX_matFloat, t_indInt> > &B, Vector<TemplateConfig<t_memSpace, AMGX_vecFloat, AMGX_matFloat, t_indInt> > &C, cusp::relaxation::polynomial<float, typename MemorySpaceMap<t_memSpace>::Type> &poly) { MatrixCusp<TemplateConfig<t_memSpace, AMGX_vecFloat, AMGX_matFloat, t_indInt>, cusp::csr_format> wA((Matrix<TemplateConfig<t_memSpace, AMGX_vecFloat, AMGX_matFloat, t_indInt> > *) &A); poly.presmooth(wA, B, C); cudaCheckError(); } }; template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void PolynomialSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::smooth_1x1(const Matrix_h &A, const VVector &B, VVector &C) { FatalError("Unsupported on host for Polynomial smoother", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void PolynomialSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::smooth_1x1(const Matrix_d &A, const VVector &B, VVector &C) { if (A.hasProps(DIAG)) { FatalError("Unsupported separate diag", AMGX_ERR_NOT_IMPLEMENTED); } if (A.get_block_size() != 1) { FatalError("Unsupported block size for PolynomialSolver", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE); } poly_smooth<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec>>::poly_postsmooth(A, B, C, this->poly); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void PolynomialSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::smooth_common_sqblocks(Matrix_h &A, const VVector &b, VVector &x) { FatalError("Unsupported on host for Polynomial smoother", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void PolynomialSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::smooth_common_sqblocks(Matrix_d &A, const VVector &b, VVector &x) { //0, get residule : R = b - Ax multiply( A, x, this->R ); cusp::blas::axpby( b, this->R, this->R, ValueTypeB(1), ValueTypeB(-1) ); //1, set up rbar cusp::blas::xmy(this->Dinv, this->R, this->Rbar); //2, set up V and V0 multiply( A, this->Rbar, this->V ); cusp::blas::xmy(this->Dinv, this->V, this->V); this->V0 = this->Rbar; cusp::blas::scal(this->V0, this->k[1]); cusp::blas::axpby(this->Rbar, this->V, this->V, this->k[2], this->k[3]); //3, iterate to get v_{j+1} for (int i = 0; i < this->ndeg0; i++) { multiply( A, this->V, this->Rbar ); cusp::blas::axpby( this->R, this->Rbar, this->Rbar, ValueTypeB(1), ValueTypeB(-1) ); cusp::blas::xmy(this->Dinv, this->Rbar, this->Rbar); cusp::blas::axpbypcz(this->Rbar, this->V, this->V0, this->Sn, this->k[4], this->k[5] + 1.0, -1.0 * this->k[5]); // V0 = V this->V0 = this->V; // V = V+Sn cusp::blas::copy(this->Sn, this->V); } //4, update solution cusp::blas::axpy(this->V, x, ValueTypeB(1)); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void PolynomialSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::smooth_with_0_initial_guess_1x1(const Matrix_h &A, const VVector &b, VVector &x) { FatalError("Unsupported on host for Polynomial smoother", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void PolynomialSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::smooth_with_0_initial_guess_1x1(const Matrix_d &A, const VVector &b, VVector &x) { if (A.hasProps(DIAG)) { FatalError("Unsupported separate diag", AMGX_ERR_NOT_IMPLEMENTED); } if (A.get_block_size() != 1) { FatalError("Unsupported block size for PolynomialSolver", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE); } poly_smooth<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec>>::poly_presmooth(A, b, x, this->poly); } /**************************************** * Explict instantiations ***************************************/ #define AMGX_CASE_LINE(CASE) template class PolynomialSolverBase<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE #define AMGX_CASE_LINE(CASE) template class PolynomialSolver<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE } // namespace polynomial_solver } // namespace amgx
361be77f234b15db23d5364cfe2d294013533fdb.cu
/* Copyright (c) 2013-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <solvers/polynomial_solver.h> #include <solvers/block_common_solver.h> #include <blas.h> #include <string.h> #include <cutil.h> #include <multiply.h> #include <miscmath.h> #ifdef _WIN32 #pragma warning (push) #pragma warning (disable : 4244 4267 4521) #endif #include <cusp/relaxation/polynomial.h> #include <cusp/detail/spectral_radius.h> #ifdef _WIN32 #pragma warning (pop) #endif #include <matrix_cusp.h> namespace amgx { namespace polynomial_solver { template <typename IndexType, typename ValueTypeA, typename ValueTypeB> __global__ void aux_norm1_csr(const IndexType *row_offsets, const IndexType *column_indices, const IndexType *dia_values, const ValueTypeA *nonzero_values, const int num_rows, int bsize, ValueTypeB *row_sum) { int tid = threadIdx.x + blockDim.x * blockIdx.x; int bsize_sq = bsize * bsize; while (tid < num_rows) { ValueTypeB tmp_sum = 0; int idx_i = tid / bsize; int offset_i = tid % bsize; for (int i = 0; i < bsize; i++) { tmp_sum += fabs(nonzero_values[dia_values[idx_i] * bsize_sq + offset_i * bsize + i]); } for (int j = row_offsets[idx_i]; j < row_offsets[idx_i + 1]; j++) { // Compute edge weight for (int i = 0; i < bsize; i++) { tmp_sum += fabs(nonzero_values[j * bsize_sq + offset_i * bsize + i]); } } row_sum[tid] = tmp_sum / fabs(nonzero_values[dia_values[idx_i] * bsize_sq + offset_i * bsize + offset_i]); tid += gridDim.x * blockDim.x; } } template <typename IndexType, typename ValueTypeA, typename ValueTypeB> __global__ void get_diaginv(const IndexType *dia_idx, const ValueTypeA *nonzero_values, const int num_rows, int bsize, ValueTypeB *Dinv) { int tid = threadIdx.x + blockDim.x * blockIdx.x; int bsize_sq = bsize * bsize; while (tid < num_rows) { int idx_i = tid / bsize; int offset_i = tid % bsize; ValueTypeB diag = nonzero_values[dia_idx[idx_i] * bsize_sq + offset_i * bsize + offset_i]; Dinv[tid] = ValueTypeB(1) / (isNotCloseToZero(diag) ? diag : epsilon(diag) ); tid += gridDim.x * blockDim.x; } } // Constructor template<class T_Config> PolynomialSolverBase<T_Config>::PolynomialSolverBase( AMG_Config &cfg, const std::string &cfg_scope) : Solver<T_Config>( cfg, cfg_scope), R(0) { ndeg0 = cfg.AMG_Config::getParameter<int>("kpz_order", cfg_scope); } // Destructor template<class T_Config> PolynomialSolverBase<T_Config>::~PolynomialSolverBase() { } template<class T_Config> void PolynomialSolverBase<T_Config>::printSolverParameters() const { std::cout << "kpz_order = " << this->ndeg0 << std::endl; } // Solver setup template<class T_Config> void PolynomialSolverBase<T_Config>::solver_setup(bool reuse_matrix_structure) { this->m_explicit_A = dynamic_cast<Matrix<T_Config>*>(this->m_A); if (!this->m_explicit_A) { FatalError("PolynomialSolver only works with explicit matrices", AMGX_ERR_INTERNAL); } if (ndeg0 == 0) { ndeg0 = 6; } int N = this->m_explicit_A->get_num_rows() * this->m_explicit_A->get_block_dimy(); ValueTypeA mu0, mu1, smu0, smu1; const IndexType *A_row_offsets_ptr = this->m_explicit_A->row_offsets.raw(); const IndexType *A_column_indices_ptr = this->m_explicit_A->col_indices.raw(); const IndexType *A_dia_ptr = this->m_explicit_A->diag.raw(); const ValueTypeA *A_nonzero_values_ptr = this->m_explicit_A->values.raw(); VVector row_sum(N); ValueTypeB *row_sum_ptr = row_sum.raw(); const int threads_per_block = 512; const int num_blocks = min((N - 1) / threads_per_block + 1, AMGX_GRID_MAX_SIZE); aux_norm1_csr <<< num_blocks, threads_per_block>>>(A_row_offsets_ptr, A_column_indices_ptr, A_dia_ptr, A_nonzero_values_ptr, N, this->m_explicit_A->get_block_dimy(), row_sum_ptr); cudaCheckError(); mu0 = cusp::blas::nrmmax(row_sum); cudaCheckError(); if (mu0 == 0) { mu0 = cusp::blas::nrmmax(this->m_explicit_A->values); } cudaCheckError(); mu0 = 1.0 / mu0; mu1 = 4.0 * mu0; // default set 8; smu0 = sqrt(mu0); smu1 = sqrt(mu1); k[1] = (mu0 + mu1) / 2.0; k[2] = (smu0 + smu1) * (smu0 + smu1) / 2.0; k[3] = mu0 * mu1; k[4] = 2.0 * k[3] / k[2]; // 4.0*mu0*mu1/(sqrt(mu0)+sqrt(mu1))/(sqrt(mu0)+sqrt(mu1)); k[5] = (mu1 - 2.0 * smu0 * smu1 + mu0) / (mu1 + 2.0 * smu0 * smu1 + mu0); // square of (sqrt(kappa)-1)/(sqrt(kappa)+1); if (this->m_explicit_A->get_block_size() == 1) { MatrixCusp<T_Config, cusp::csr_format> wA((Matrix<T_Config> *) &*this->m_explicit_A); ValueTypeA rho = cusp::detail::ritz_spectral_radius_symmetric(wA, 8); cudaCheckError(); cusp::array1d<ValueTypeA, cusp::host_memory> coeffs; cusp::relaxation::detail::chebyshev_polynomial_coefficients(rho, coeffs); cudaCheckError(); poly = cusp::relaxation::polynomial<ValueTypeA, typename Matrix<T_Config>::memory_space > (wA, coeffs); cudaCheckError(); } R.resize(N); V0.resize(N); V.resize(N); Rbar.resize(N); Sn.resize(N); Dinv.resize(N); R.set_block_dimy(this->m_explicit_A->get_block_dimy()); R.set_block_dimx(1); V0.set_block_dimy(this->m_explicit_A->get_block_dimy()); V0.set_block_dimx(1); V.set_block_dimy(this->m_explicit_A->get_block_dimy()); V.set_block_dimx(1); Rbar.set_block_dimy(this->m_explicit_A->get_block_dimy()); Rbar.set_block_dimx(1); Sn.set_block_dimy(this->m_explicit_A->get_block_dimy()); Sn.set_block_dimx(1); Dinv.set_block_dimy(this->m_explicit_A->get_block_dimy()); Dinv.set_block_dimx(1); ValueTypeB *Dinv_ptr = Dinv.raw(); get_diaginv <<< num_blocks, threads_per_block>>>(A_dia_ptr, A_nonzero_values_ptr, N, this->m_explicit_A->get_block_dimy(), Dinv_ptr); cudaCheckError(); } // template<class T_Config> void PolynomialSolverBase<T_Config>::solve_init( VVector &b, VVector &x, bool xIsZero ) { } // Solve one iteration template<class T_Config> bool PolynomialSolverBase<T_Config>::solve_iteration( VVector &b, VVector &x, bool xIsZero ) { smooth_common_sqblocks( *this->m_explicit_A, b, x ); return this->converged( b, x ); } template<class T_Config> void PolynomialSolverBase<T_Config>::solve_finalize( VVector &b, VVector &x ) { } template<class T_Config> struct poly_smooth { static void poly_postsmooth(const Matrix<T_Config> &A, const Vector<T_Config> &B, Vector<T_Config> &C, cusp::relaxation::polynomial<typename T_Config::MatPrec, typename T_Config::MemSpace> &poly) { FatalError("Mixed precision is not supported for scalar matrix type", AMGX_ERR_NOT_IMPLEMENTED); } static void poly_presmooth(const Matrix<T_Config> &A, const Vector<T_Config> &B, Vector<T_Config> &C, cusp::relaxation::polynomial<typename T_Config::MatPrec, typename T_Config::MemSpace> &poly) { FatalError("Mixed precision is not supported for scalar matrix type", AMGX_ERR_NOT_IMPLEMENTED); } }; template<AMGX_MemorySpace t_memSpace, AMGX_IndPrecision t_indInt> struct poly_smooth<TemplateConfig<t_memSpace, AMGX_vecDouble, AMGX_matDouble, t_indInt>> { static void poly_postsmooth(const Matrix<TemplateConfig<t_memSpace, AMGX_vecDouble, AMGX_matDouble, t_indInt> > &A, const Vector<TemplateConfig<t_memSpace, AMGX_vecDouble, AMGX_matDouble, t_indInt> > &B, Vector<TemplateConfig<t_memSpace, AMGX_vecDouble, AMGX_matDouble, t_indInt> > &C, cusp::relaxation::polynomial<double, typename MemorySpaceMap<t_memSpace>::Type> &poly) { MatrixCusp<TemplateConfig<t_memSpace, AMGX_vecDouble, AMGX_matDouble, t_indInt>, cusp::csr_format> wA((Matrix<TemplateConfig<t_memSpace, AMGX_vecDouble, AMGX_matDouble, t_indInt> > *) &A); poly.postsmooth(wA, B, C); cudaCheckError(); } static void poly_presmooth(const Matrix<TemplateConfig<t_memSpace, AMGX_vecDouble, AMGX_matDouble, t_indInt> > &A, const Vector<TemplateConfig<t_memSpace, AMGX_vecDouble, AMGX_matDouble, t_indInt> > &B, Vector<TemplateConfig<t_memSpace, AMGX_vecDouble, AMGX_matDouble, t_indInt> > &C, cusp::relaxation::polynomial<double, typename MemorySpaceMap<t_memSpace>::Type> &poly) { MatrixCusp<TemplateConfig<t_memSpace, AMGX_vecDouble, AMGX_matDouble, t_indInt>, cusp::csr_format> wA((Matrix<TemplateConfig<t_memSpace, AMGX_vecDouble, AMGX_matDouble, t_indInt> > *) &A); poly.presmooth(wA, B, C); cudaCheckError(); } }; template<AMGX_MemorySpace t_memSpace, AMGX_IndPrecision t_indInt> struct poly_smooth<TemplateConfig<t_memSpace, AMGX_vecFloat, AMGX_matFloat, t_indInt>> { static void poly_postsmooth(const Matrix<TemplateConfig<t_memSpace, AMGX_vecFloat, AMGX_matFloat, t_indInt> > &A, const Vector<TemplateConfig<t_memSpace, AMGX_vecFloat, AMGX_matFloat, t_indInt> > &B, Vector<TemplateConfig<t_memSpace, AMGX_vecFloat, AMGX_matFloat, t_indInt> > &C, cusp::relaxation::polynomial<float, typename MemorySpaceMap<t_memSpace>::Type> &poly) { MatrixCusp<TemplateConfig<t_memSpace, AMGX_vecFloat, AMGX_matFloat, t_indInt>, cusp::csr_format> wA((Matrix<TemplateConfig<t_memSpace, AMGX_vecFloat, AMGX_matFloat, t_indInt> > *) &A); poly.postsmooth(wA, B, C); cudaCheckError(); } static void poly_presmooth(const Matrix<TemplateConfig<t_memSpace, AMGX_vecFloat, AMGX_matFloat, t_indInt> > &A, const Vector<TemplateConfig<t_memSpace, AMGX_vecFloat, AMGX_matFloat, t_indInt> > &B, Vector<TemplateConfig<t_memSpace, AMGX_vecFloat, AMGX_matFloat, t_indInt> > &C, cusp::relaxation::polynomial<float, typename MemorySpaceMap<t_memSpace>::Type> &poly) { MatrixCusp<TemplateConfig<t_memSpace, AMGX_vecFloat, AMGX_matFloat, t_indInt>, cusp::csr_format> wA((Matrix<TemplateConfig<t_memSpace, AMGX_vecFloat, AMGX_matFloat, t_indInt> > *) &A); poly.presmooth(wA, B, C); cudaCheckError(); } }; template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void PolynomialSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::smooth_1x1(const Matrix_h &A, const VVector &B, VVector &C) { FatalError("Unsupported on host for Polynomial smoother", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void PolynomialSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::smooth_1x1(const Matrix_d &A, const VVector &B, VVector &C) { if (A.hasProps(DIAG)) { FatalError("Unsupported separate diag", AMGX_ERR_NOT_IMPLEMENTED); } if (A.get_block_size() != 1) { FatalError("Unsupported block size for PolynomialSolver", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE); } poly_smooth<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec>>::poly_postsmooth(A, B, C, this->poly); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void PolynomialSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::smooth_common_sqblocks(Matrix_h &A, const VVector &b, VVector &x) { FatalError("Unsupported on host for Polynomial smoother", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void PolynomialSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::smooth_common_sqblocks(Matrix_d &A, const VVector &b, VVector &x) { //0, get residule : R = b - Ax multiply( A, x, this->R ); cusp::blas::axpby( b, this->R, this->R, ValueTypeB(1), ValueTypeB(-1) ); //1, set up rbar cusp::blas::xmy(this->Dinv, this->R, this->Rbar); //2, set up V and V0 multiply( A, this->Rbar, this->V ); cusp::blas::xmy(this->Dinv, this->V, this->V); this->V0 = this->Rbar; cusp::blas::scal(this->V0, this->k[1]); cusp::blas::axpby(this->Rbar, this->V, this->V, this->k[2], this->k[3]); //3, iterate to get v_{j+1} for (int i = 0; i < this->ndeg0; i++) { multiply( A, this->V, this->Rbar ); cusp::blas::axpby( this->R, this->Rbar, this->Rbar, ValueTypeB(1), ValueTypeB(-1) ); cusp::blas::xmy(this->Dinv, this->Rbar, this->Rbar); cusp::blas::axpbypcz(this->Rbar, this->V, this->V0, this->Sn, this->k[4], this->k[5] + 1.0, -1.0 * this->k[5]); // V0 = V this->V0 = this->V; // V = V+Sn cusp::blas::copy(this->Sn, this->V); } //4, update solution cusp::blas::axpy(this->V, x, ValueTypeB(1)); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void PolynomialSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::smooth_with_0_initial_guess_1x1(const Matrix_h &A, const VVector &b, VVector &x) { FatalError("Unsupported on host for Polynomial smoother", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void PolynomialSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::smooth_with_0_initial_guess_1x1(const Matrix_d &A, const VVector &b, VVector &x) { if (A.hasProps(DIAG)) { FatalError("Unsupported separate diag", AMGX_ERR_NOT_IMPLEMENTED); } if (A.get_block_size() != 1) { FatalError("Unsupported block size for PolynomialSolver", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE); } poly_smooth<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec>>::poly_presmooth(A, b, x, this->poly); } /**************************************** * Explict instantiations ***************************************/ #define AMGX_CASE_LINE(CASE) template class PolynomialSolverBase<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE #define AMGX_CASE_LINE(CASE) template class PolynomialSolver<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE } // namespace polynomial_solver } // namespace amgx
a6956b26f7bb09ad9349493ea9d4562ce2ece29d.hip
// !!! This is a file automatically generated by hipify!!! #define DEFINE_GLOBAL #include <math.h> #include <stdio.h> #include <stdlib.h> #include <assert.h> #include "global.h" #include "geom_pbc.h" #include "randgpu.h" #include "tests.h" void print_nn(int k) { int i,j,idx; if (k>0 && k<3) printf("Naechste Nachbarn, %d-Richtung:\n",k); else if (k>2 && k<5) printf("Naechste Nachbarn, -%d-Richtung:\n",k-2); else printf("Fortlaufender Index:\n"); idx=0; for (j=0; j<lsize[2]; j++) { printf(" "); for (i=0; i<lsize[1]; i++) { if (k>0) printf("%d ",nn[k][idx]); else printf("%d ",idx); idx+=1; } printf("\n"); } } double action(double lambda, double kappa, hipDoubleComplex h) { int idx,k; double act,tmp; act=0.0; kappa*=2.0; for (idx=0; idx<nvol; idx++) { tmp=cuCreal(phi[idx])*cuCreal(phi[idx]) + cuCimag(phi[idx])*cuCimag(phi[idx]); act+=tmp; tmp-=1.0; act+=lambda*tmp*tmp; tmp=0.0; for (k=1; k<=ndim; k++) { tmp+=(cuCreal(phi[idx])*cuCreal(phi[nn[k][idx]]) + cuCimag(phi[idx])*cuCimag(phi[nn[k][idx]])); } act-=kappa*tmp; act-=2.0*(cuCreal(phi[idx])*cuCreal(h) + cuCimag(phi[idx])*cuCimag(h)); } return act; } double alocal(int idx, double lambda, double kappa, hipDoubleComplex h) { int k; double a,tmp; hipDoubleComplex b,tmpc; b=h; for (k=1; k<=ndim; k++) { tmpc=cuCadd(phi[nn[k][idx]],phi[nn[ndim+k][idx]]); b=make_cuDoubleComplex(cuCreal(b)+kappa*cuCreal(tmpc),cuCimag(b)+kappa*cuCimag(tmpc)); } tmp=cuCreal(phi[idx])*cuCreal(phi[idx]) + cuCimag(phi[idx])*cuCimag(phi[idx]); a=2.0*(cuCreal(b)*cuCreal(phi[idx])+cuCimag(b)*cuCimag(phi[idx]))-tmp; tmp-=1.0; a-=lambda*tmp*tmp; #ifdef DEBUG printf("b: %f, a: %f\n",b,a); #endif return -a; } int check1(double lambda, double kappa, hipDoubleComplex h, double a, double b) { int idx; double act1,act2,tmp; for (idx=0; idx<nvol; idx++) { phi[idx]=make_cuDoubleComplex(a,b); //printf("%f + %f * i\n", cuCreal(phi[idx]), cuCimag(phi[idx])); } act1=action(lambda,kappa,h); tmp=a*a+b*b; act2=((double)nvol)*( (1.0-2*lambda-2.0*kappa*ndim)*tmp + lambda*(1+tmp*tmp) - 2.0*(a*cuCreal(h)+b*cuCimag(h)) ); printf("Check1: %e (%e)\n",fabs((act1-act2)/act2),(sqrt(nvol*(100+50*ndim))*DBL_EPSILON)); return (fabs((act1-act2)/act2)<(nvol*DBL_EPSILON)); } void random_cnfg(void) { int idx; for (idx=0; idx<nvol; idx++) { phi[idx]=make_cuDoubleComplex((double)(rand() & 0xFF ) / 99.0,(double)(rand() & 0xFF ) / 99.0); } } int check2(double lambda, double kappa, double alpha) { int idx; double act1,act2; hipDoubleComplex f,h; random_cnfg(); f=make_cuDoubleComplex(cos(alpha),sin(alpha)); h=make_cuDoubleComplex(0.0,0.0); act1=action(lambda,kappa,h); for (idx=0; idx<nvol; idx++) { phi[idx]=cuCmul(phi[idx],f); } act2=action(lambda,kappa,h); printf("Check2: %e\n",fabs((act1-act2)/act2)); return (fabs((act1-act2)/act2)<sqrt(nvol)*DBL_EPSILON); } int check_alocal(double lambda, double kappa, hipDoubleComplex h) { int idx,ifail; double act1,act2,a1,a2,diff,mdiff; hipDoubleComplex tmp; random_cnfg(); ifail=0; mdiff=0.0; act1=action(lambda,kappa,h); for (idx=0; idx<nvol; idx++) { tmp=phi[idx]; a1=alocal(idx,lambda,kappa,h); phi[idx]=make_cuDoubleComplex(cuCreal(phi[idx])+10.0,cuCimag(phi[idx])); act2=action(lambda,kappa,h); a2=alocal(idx,lambda,kappa,h); phi[idx]=tmp; diff=fabs(((-act2+act1)-(-a2+a1))/(-act2+act1)); if (diff>1e-7) { printf("idx: %d, diff: %e %e\n",idx,diff,(-act2+act1)-(-a2+a1)); ifail=1; } if (diff>mdiff) mdiff=diff; } printf("Check alocal: Max. diff: %e\n",mdiff); return (ifail==0); } int main(int argc, char **argv) { printf("%s Starting...\n", argv[0]); int i; double lambda, kappa, alpha; hipDoubleComplex h; if (argc>1) { ndim=argc-1; lsize=(int*)malloc((ndim+1)*sizeof(int)); for (i=1; i<argc; i++) { lsize[i]=atoi(argv[i]); } } else { ndim=1; lsize=(int*)malloc((ndim+1)*sizeof(int)); lsize[1]=8; } printf("Gittergroesse: %d",lsize[1]); for (i=2; i<=ndim; i++) { printf(" x %d",lsize[i]); } printf("\n\n"); geom_pbc(); if (ndim==2) { print_nn(0); print_nn(1); print_nn(2); print_nn(3); print_nn(4); } phi=(hipDoubleComplex*)malloc(nvol*sizeof(hipDoubleComplex)); h=make_cuDoubleComplex(0.3,0.5); lambda=0.7; kappa=0.06; if (check1(lambda,kappa,h,0.5,0.0)) printf("Check1 erfolgreich.\n"); else printf("Check1 fehlgeschalgen!.\n"); alpha=0.45; if (check2(lambda,kappa,alpha)) printf("Check2 erfolgreich.\n"); else printf("Check2 fehlgeschalgen!.\n"); if (check_alocal(lambda,kappa,h)) printf("Check alocal erfolgreich.\n"); else printf("Check alocal fehlgeschalgen!.\n"); printf("\n"); double *rnd; rnd=randgpu(20); for (int i=1; i<20; i++) { printf(" %.6f\n",rnd[i]); } mag_test(); /* other_test(); */ /* int fitting_res = delta_fitting_test(); */ int set_res = spin_set_test(); spin_update_test(); boltzmag(); free(lsize); free(nn[0]); free(nn); free(phi); }
a6956b26f7bb09ad9349493ea9d4562ce2ece29d.cu
#define DEFINE_GLOBAL #include <math.h> #include <stdio.h> #include <stdlib.h> #include <assert.h> #include "global.h" #include "geom_pbc.h" #include "randgpu.h" #include "tests.h" void print_nn(int k) { int i,j,idx; if (k>0 && k<3) printf("Naechste Nachbarn, %d-Richtung:\n",k); else if (k>2 && k<5) printf("Naechste Nachbarn, -%d-Richtung:\n",k-2); else printf("Fortlaufender Index:\n"); idx=0; for (j=0; j<lsize[2]; j++) { printf(" "); for (i=0; i<lsize[1]; i++) { if (k>0) printf("%d ",nn[k][idx]); else printf("%d ",idx); idx+=1; } printf("\n"); } } double action(double lambda, double kappa, cuDoubleComplex h) { int idx,k; double act,tmp; act=0.0; kappa*=2.0; for (idx=0; idx<nvol; idx++) { tmp=cuCreal(phi[idx])*cuCreal(phi[idx]) + cuCimag(phi[idx])*cuCimag(phi[idx]); act+=tmp; tmp-=1.0; act+=lambda*tmp*tmp; tmp=0.0; for (k=1; k<=ndim; k++) { tmp+=(cuCreal(phi[idx])*cuCreal(phi[nn[k][idx]]) + cuCimag(phi[idx])*cuCimag(phi[nn[k][idx]])); } act-=kappa*tmp; act-=2.0*(cuCreal(phi[idx])*cuCreal(h) + cuCimag(phi[idx])*cuCimag(h)); } return act; } double alocal(int idx, double lambda, double kappa, cuDoubleComplex h) { int k; double a,tmp; cuDoubleComplex b,tmpc; b=h; for (k=1; k<=ndim; k++) { tmpc=cuCadd(phi[nn[k][idx]],phi[nn[ndim+k][idx]]); b=make_cuDoubleComplex(cuCreal(b)+kappa*cuCreal(tmpc),cuCimag(b)+kappa*cuCimag(tmpc)); } tmp=cuCreal(phi[idx])*cuCreal(phi[idx]) + cuCimag(phi[idx])*cuCimag(phi[idx]); a=2.0*(cuCreal(b)*cuCreal(phi[idx])+cuCimag(b)*cuCimag(phi[idx]))-tmp; tmp-=1.0; a-=lambda*tmp*tmp; #ifdef DEBUG printf("b: %f, a: %f\n",b,a); #endif return -a; } int check1(double lambda, double kappa, cuDoubleComplex h, double a, double b) { int idx; double act1,act2,tmp; for (idx=0; idx<nvol; idx++) { phi[idx]=make_cuDoubleComplex(a,b); //printf("%f + %f * i\n", cuCreal(phi[idx]), cuCimag(phi[idx])); } act1=action(lambda,kappa,h); tmp=a*a+b*b; act2=((double)nvol)*( (1.0-2*lambda-2.0*kappa*ndim)*tmp + lambda*(1+tmp*tmp) - 2.0*(a*cuCreal(h)+b*cuCimag(h)) ); printf("Check1: %e (%e)\n",fabs((act1-act2)/act2),(sqrt(nvol*(100+50*ndim))*DBL_EPSILON)); return (fabs((act1-act2)/act2)<(nvol*DBL_EPSILON)); } void random_cnfg(void) { int idx; for (idx=0; idx<nvol; idx++) { phi[idx]=make_cuDoubleComplex((double)(rand() & 0xFF ) / 99.0,(double)(rand() & 0xFF ) / 99.0); } } int check2(double lambda, double kappa, double alpha) { int idx; double act1,act2; cuDoubleComplex f,h; random_cnfg(); f=make_cuDoubleComplex(cos(alpha),sin(alpha)); h=make_cuDoubleComplex(0.0,0.0); act1=action(lambda,kappa,h); for (idx=0; idx<nvol; idx++) { phi[idx]=cuCmul(phi[idx],f); } act2=action(lambda,kappa,h); printf("Check2: %e\n",fabs((act1-act2)/act2)); return (fabs((act1-act2)/act2)<sqrt(nvol)*DBL_EPSILON); } int check_alocal(double lambda, double kappa, cuDoubleComplex h) { int idx,ifail; double act1,act2,a1,a2,diff,mdiff; cuDoubleComplex tmp; random_cnfg(); ifail=0; mdiff=0.0; act1=action(lambda,kappa,h); for (idx=0; idx<nvol; idx++) { tmp=phi[idx]; a1=alocal(idx,lambda,kappa,h); phi[idx]=make_cuDoubleComplex(cuCreal(phi[idx])+10.0,cuCimag(phi[idx])); act2=action(lambda,kappa,h); a2=alocal(idx,lambda,kappa,h); phi[idx]=tmp; diff=fabs(((-act2+act1)-(-a2+a1))/(-act2+act1)); if (diff>1e-7) { printf("idx: %d, diff: %e %e\n",idx,diff,(-act2+act1)-(-a2+a1)); ifail=1; } if (diff>mdiff) mdiff=diff; } printf("Check alocal: Max. diff: %e\n",mdiff); return (ifail==0); } int main(int argc, char **argv) { printf("%s Starting...\n", argv[0]); int i; double lambda, kappa, alpha; cuDoubleComplex h; if (argc>1) { ndim=argc-1; lsize=(int*)malloc((ndim+1)*sizeof(int)); for (i=1; i<argc; i++) { lsize[i]=atoi(argv[i]); } } else { ndim=1; lsize=(int*)malloc((ndim+1)*sizeof(int)); lsize[1]=8; } printf("Gittergroesse: %d",lsize[1]); for (i=2; i<=ndim; i++) { printf(" x %d",lsize[i]); } printf("\n\n"); geom_pbc(); if (ndim==2) { print_nn(0); print_nn(1); print_nn(2); print_nn(3); print_nn(4); } phi=(cuDoubleComplex*)malloc(nvol*sizeof(cuDoubleComplex)); h=make_cuDoubleComplex(0.3,0.5); lambda=0.7; kappa=0.06; if (check1(lambda,kappa,h,0.5,0.0)) printf("Check1 erfolgreich.\n"); else printf("Check1 fehlgeschalgen!.\n"); alpha=0.45; if (check2(lambda,kappa,alpha)) printf("Check2 erfolgreich.\n"); else printf("Check2 fehlgeschalgen!.\n"); if (check_alocal(lambda,kappa,h)) printf("Check alocal erfolgreich.\n"); else printf("Check alocal fehlgeschalgen!.\n"); printf("\n"); double *rnd; rnd=randgpu(20); for (int i=1; i<20; i++) { printf(" %.6f\n",rnd[i]); } mag_test(); /* other_test(); */ /* int fitting_res = delta_fitting_test(); */ int set_res = spin_set_test(); spin_update_test(); boltzmag(); free(lsize); free(nn[0]); free(nn); free(phi); }
e1d9806599f96efd7034b478293a1d64be14684a.hip
// !!! This is a file automatically generated by hipify!!! // ######################################################################## // Practical Course: GPU Programming in Computer Vision // Technical University of Munich, Computer Vision Group // ######################################################################## #include "structure_tensor.cuh" #include <iostream> #include <hip/hip_runtime.h> #include "helper.cuh" __global__ void computeTensorOutputKernel(float *imgOut, const float *lmb1, const float *lmb2, const float *imgIn, int w, int h, int nc, float alpha, float beta) { int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; if (x >= w || y >= h) return; int pos = x + y * w; float lambda1 = lmb1[pos]; float lambda2 = lmb2[pos]; if (lambda2 >= lambda1 && lambda1 >= alpha) { // corner pixel => make it red imgOut[pos] = 255; } else if (lambda1 <= beta && beta < alpha && alpha <= lambda2) { // edge pixel => make it yellow imgOut[pos] = 255; imgOut[pos + w * h] = 255; } else { // otherwise make the original pixel darker for (int ch = 0; ch < nc; ch++) imgOut[pos + w * h * ch] = 0.5 * imgIn[pos + w * h * ch]; } } __device__ void computeEigenValues(float *lmb1, float *lmb2, const float m11, const float m12, const float m22, const int pos) { float trace = m11 + m22; float det = m11 * m22 - m12 * m12; float a = trace / 2.0; float b = sqrtf((trace * trace) / 4.0 - det); float lambda1 = a + b; float lambda2 = a - b; // follow convention that l1 <= l2 if (lambda1 < lambda2) { lmb1[pos] = lambda1; lmb2[pos] = lambda2; } else { lmb1[pos] = lambda2; lmb2[pos] = lambda1; } } __global__ void computeDetectorKernel(float *lmb1, float *lmb2, const float *tensor11, const float *tensor12, const float *tensor22, int w, int h) { int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; if (x >= w || y >= h) return; // compute eigenvalues int pos = x + y * w; computeEigenValues(lmb1, lmb2, tensor11[pos], tensor12[pos], tensor22[pos], pos); } __global__ void computeStructureTensorKernel(float *tensor11, float *tensor12, float *tensor22, const float *dx, const float *dy, int w, int h, int nc) { int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; if (x >= w || y >= h) return; int i = x + y * w; tensor11[i] = 0; tensor12[i] = 0; tensor22[i] = 0; for (int ch = 0; ch < nc; ch++) { int ch_skip = w * h * ch; tensor11[i] += dx[i + ch_skip] * dx[i + ch_skip]; tensor12[i] += dx[i + ch_skip] * dy[i + ch_skip]; tensor22[i] += dy[i + ch_skip] * dy[i + ch_skip]; } } void computeTensorOutputCuda(float *imgOut, const float *lmb1, const float *lmb2, const float *imgIn, int w, int h, int nc, float alpha, float beta) { // calculate block and grid size dim3 block(32, 32, 1); dim3 grid = computeGrid2D(block, w, h); // run cuda kernel hipLaunchKernelGGL(( computeTensorOutputKernel), dim3(grid), dim3(block), 0, 0, imgOut, lmb1, lmb2, imgIn, w, h, nc, alpha, beta); // check for errors CUDA_CHECK; } void computeDetectorCuda(float *lmb1, float *lmb2, const float *tensor11, const float *tensor12, const float *tensor22, int w, int h) { // calculate block and grid size dim3 block(32, 32, 1); dim3 grid = computeGrid2D(block, w, h); // run cuda kernel hipLaunchKernelGGL(( computeDetectorKernel), dim3(grid), dim3(block), 0, 0, lmb1, lmb2, tensor11, tensor12, tensor22, w, h); // check for errors CUDA_CHECK; } void computeStructureTensorCuda(float *tensor11, float *tensor12, float *tensor22, const float *dx, const float *dy, int w, int h, int nc) { // calculate block and grid size dim3 block(32, 32, 1); dim3 grid = computeGrid2D(block, w, h); // run cuda kernel hipLaunchKernelGGL(( computeStructureTensorKernel), dim3(grid), dim3(block), 0, 0, tensor11, tensor12, tensor22, dx, dy, w, h, nc); // check for errors CUDA_CHECK; }
e1d9806599f96efd7034b478293a1d64be14684a.cu
// ######################################################################## // Practical Course: GPU Programming in Computer Vision // Technical University of Munich, Computer Vision Group // ######################################################################## #include "structure_tensor.cuh" #include <iostream> #include <cuda_runtime.h> #include "helper.cuh" __global__ void computeTensorOutputKernel(float *imgOut, const float *lmb1, const float *lmb2, const float *imgIn, int w, int h, int nc, float alpha, float beta) { int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; if (x >= w || y >= h) return; int pos = x + y * w; float lambda1 = lmb1[pos]; float lambda2 = lmb2[pos]; if (lambda2 >= lambda1 && lambda1 >= alpha) { // corner pixel => make it red imgOut[pos] = 255; } else if (lambda1 <= beta && beta < alpha && alpha <= lambda2) { // edge pixel => make it yellow imgOut[pos] = 255; imgOut[pos + w * h] = 255; } else { // otherwise make the original pixel darker for (int ch = 0; ch < nc; ch++) imgOut[pos + w * h * ch] = 0.5 * imgIn[pos + w * h * ch]; } } __device__ void computeEigenValues(float *lmb1, float *lmb2, const float m11, const float m12, const float m22, const int pos) { float trace = m11 + m22; float det = m11 * m22 - m12 * m12; float a = trace / 2.0; float b = sqrtf((trace * trace) / 4.0 - det); float lambda1 = a + b; float lambda2 = a - b; // follow convention that l1 <= l2 if (lambda1 < lambda2) { lmb1[pos] = lambda1; lmb2[pos] = lambda2; } else { lmb1[pos] = lambda2; lmb2[pos] = lambda1; } } __global__ void computeDetectorKernel(float *lmb1, float *lmb2, const float *tensor11, const float *tensor12, const float *tensor22, int w, int h) { int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; if (x >= w || y >= h) return; // compute eigenvalues int pos = x + y * w; computeEigenValues(lmb1, lmb2, tensor11[pos], tensor12[pos], tensor22[pos], pos); } __global__ void computeStructureTensorKernel(float *tensor11, float *tensor12, float *tensor22, const float *dx, const float *dy, int w, int h, int nc) { int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; if (x >= w || y >= h) return; int i = x + y * w; tensor11[i] = 0; tensor12[i] = 0; tensor22[i] = 0; for (int ch = 0; ch < nc; ch++) { int ch_skip = w * h * ch; tensor11[i] += dx[i + ch_skip] * dx[i + ch_skip]; tensor12[i] += dx[i + ch_skip] * dy[i + ch_skip]; tensor22[i] += dy[i + ch_skip] * dy[i + ch_skip]; } } void computeTensorOutputCuda(float *imgOut, const float *lmb1, const float *lmb2, const float *imgIn, int w, int h, int nc, float alpha, float beta) { // calculate block and grid size dim3 block(32, 32, 1); dim3 grid = computeGrid2D(block, w, h); // run cuda kernel computeTensorOutputKernel<<<grid, block>>>(imgOut, lmb1, lmb2, imgIn, w, h, nc, alpha, beta); // check for errors CUDA_CHECK; } void computeDetectorCuda(float *lmb1, float *lmb2, const float *tensor11, const float *tensor12, const float *tensor22, int w, int h) { // calculate block and grid size dim3 block(32, 32, 1); dim3 grid = computeGrid2D(block, w, h); // run cuda kernel computeDetectorKernel<<<grid, block>>>(lmb1, lmb2, tensor11, tensor12, tensor22, w, h); // check for errors CUDA_CHECK; } void computeStructureTensorCuda(float *tensor11, float *tensor12, float *tensor22, const float *dx, const float *dy, int w, int h, int nc) { // calculate block and grid size dim3 block(32, 32, 1); dim3 grid = computeGrid2D(block, w, h); // run cuda kernel computeStructureTensorKernel<<<grid, block>>>(tensor11, tensor12, tensor22, dx, dy, w, h, nc); // check for errors CUDA_CHECK; }
d1e5e5d637fe895f35c777a96cbb2011f6f5e2a0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "nf_header.h" #include "nf_scattering.h" #include "cuMath.cu" __constant__ constStructre<double> constStr_scat_single; template<typename T, typename T2, typename T3> __device__ T2 singleScattering_mainLoop(const T3 currentX, const pixel_entry<T,T3>* pixelData) { const constStructre<T>* curr_constStr = (const constStructre<T>*) &constStr_scat_single; T2 thL[3]; T2 thV[3]; T th_c, log_nu; T bd1, bd2, bd3, dz; unsigned char mixtureIdx; T gamma_s, real_abs_mu; T2 gamma_s_over_beta_0; T3 w; T2 expVal; T2 C, sqrtMu; T2 res = {0}; // l Throughput: a^{/tilde}_i // Distance to edge of sample bd1 = abs(pixelData->illuminationDir.x) < 1e-8 ? INFINITY : pixelData->illuminationDir.x >= 0 ? (currentX.x - curr_constStr->box_min[0]) / ( pixelData->illuminationDir.x) : (currentX.x - curr_constStr->box_max[0]) / (pixelData->illuminationDir.x); bd2 = abs(pixelData->illuminationDir.y) < 1e-8 ? INFINITY : pixelData->illuminationDir.y >= 0 ? (currentX.y - curr_constStr->box_min[1]) / ( pixelData->illuminationDir.y) : (currentX.y - curr_constStr->box_max[1]) / (pixelData->illuminationDir.y); bd3 = abs(pixelData->illuminationDir.z) < 1e-8 ? INFINITY : pixelData->illuminationDir.z >= 0 ? (currentX.z - curr_constStr->box_min[2]) / ( pixelData->illuminationDir.z) : (currentX.z - curr_constStr->box_max[2]) / (pixelData->illuminationDir.z); dz = fmin(fmin(bd1, bd2), bd3); // The complex throughput thL[0].x = curr_constStr->aperture_kappa_l * pixelData->illuminationDir.x; thL[1].x = curr_constStr->aperture_kappa_l * pixelData->illuminationDir.y; thL[2].x = curr_constStr->aperture_kappa_l * pixelData->illuminationDir.z; thL[0].y = pixelData->k * (currentX.x - pixelData->illuminationP.x); thL[1].y = pixelData->k * (currentX.y - pixelData->illuminationP.y); thL[2].y = pixelData->k * (currentX.z - pixelData->illuminationP.z); // v Throughput: a^{/tilde}_v // Distance to edge of sample bd1 = abs(pixelData->viewDir.x) < 1e-8 ? INFINITY : pixelData->viewDir.x < 0 ? (curr_constStr->box_min[0] - currentX.x) / ( pixelData->viewDir.x) : (curr_constStr->box_max[0] - currentX.x) / (pixelData->viewDir.x); bd2 = abs(pixelData->viewDir.y) < 1e-8 ? INFINITY : pixelData->viewDir.y < 0 ? (curr_constStr->box_min[1] - currentX.y) / ( pixelData->viewDir.y) : (curr_constStr->box_max[1] - currentX.y) / (pixelData->viewDir.y); bd3 = abs(pixelData->viewDir.z) < 1e-8 ? INFINITY : pixelData->viewDir.z < 0 ? (curr_constStr->box_min[2] - currentX.z) / ( pixelData->viewDir.z) : (curr_constStr->box_max[2] - currentX.z) / (pixelData->viewDir.z); dz += fmin(fmin(bd1, bd2), bd3); // The complex throughput thV[0].x = curr_constStr->aperture_kappa_v * pixelData->viewDir.x; thV[1].x = curr_constStr->aperture_kappa_v * pixelData->viewDir.y; thV[2].x = curr_constStr->aperture_kappa_v * pixelData->viewDir.z; thV[0].y = pixelData->k * (pixelData->viewP.x - currentX.x); thV[1].y = pixelData->k * (pixelData->viewP.y - currentX.y); thV[2].y = pixelData->k * (pixelData->viewP.z - currentX.z); th_c = curr_constStr->aperture_C_l_plus_aperture_C_v_plus_LOG_2_PI - curr_constStr->sigt * dz; // For each mixture component for (mixtureIdx = 0; mixtureIdx < curr_constStr->mixturesNum; mixtureIdx++) { // Convolution with the illumination throughput gamma_s = curr_constStr->mixtureMu[mixtureIdx]; if(abs(gamma_s) < 0.000000001) { gamma_s = 0.000000001; } gamma_s_over_beta_0 = gamma_s * rComplexSqrt( complexSquare(cfma(gamma_s , pixelData->viewDir.x , thL[0])) + complexSquare(cfma(gamma_s , pixelData->viewDir.y , thL[1])) + complexSquare(cfma(gamma_s , pixelData->viewDir.z , thL[2]))); sqrtMu = complexSqrt( complexSquare(cfma(gamma_s_over_beta_0 , thL[0] , thV[0])) + complexSquare(cfma(gamma_s_over_beta_0 , thL[1] , thV[1])) + complexSquare(cfma(gamma_s_over_beta_0 , thL[2] , thV[2]))); real_abs_mu = rnorm3d(realMult(gamma_s_over_beta_0 , thL[0]),realMult(gamma_s_over_beta_0 , thL[1]),realMult(gamma_s_over_beta_0 , thL[2])); w.x = realMult(gamma_s_over_beta_0 , thL[0]); w.y = realMult(gamma_s_over_beta_0 , thL[1]); w.z = realMult(gamma_s_over_beta_0 , thL[2]); // expVal = expVal - real_abs_mu * gamma_s_over_beta_0 * cfma(w.x , thL[0] , cfma(w.y , thL[1] , w.z * thL[2])); gamma_s *= real_abs_mu; C = complexSqrt( complexSquare(cfma(gamma_s , w.x , thL[0])) + complexSquare(cfma(gamma_s , w.y , thL[1])) + complexSquare(cfma(gamma_s , w.z , thL[2]))); expVal = sqrtMu + C - real_abs_mu * gamma_s_over_beta_0 * cfma(w.x , thL[0] , cfma(w.y , thL[1] , w.z * thL[2])); log_nu = th_c + curr_constStr->mixtureC[mixtureIdx]; // integrate // res = res + (complexExponent(expVal + log_nu) - complexExponent(log_nu - expVal)) / (C * sqrtMu); res = res + (complexExponent(expVal + log_nu)) / (C * sqrtMu); } return res; } template<typename T, typename T2, typename T3> __device__ void singleScatteringLoop(const T3* __restrict__ x0, volatile T *u_res_x, volatile T *u_res_y, entryStructre_correlation<T,T3> *pixelData, const T2* __restrict__ constPath) { T2 res = singleScattering_mainLoop<T,T2,T3>(x0[threadIdx.x],&pixelData->pixel_1); u_res_x[threadIdx.x] = res.x; u_res_y[threadIdx.x] = res.y; res = singleScattering_mainLoop<T,T2,T3>(x0[threadIdx.x],&pixelData->pixel_2); T2 res_orig; res_orig.x = u_res_x[threadIdx.x]; res_orig.y = u_res_y[threadIdx.x]; T2 u_mult = conjMult(res_orig,res); // Each thread copies the sum of all mixture to the shared memory u_mult = u_mult * constPath[threadIdx.x]; u_res_x[threadIdx.x] = u_mult.x; u_res_y[threadIdx.x] = u_mult.y; } template<typename T, typename T2, typename T3> __device__ void singleScatteringLoop(const T3* __restrict__ x0, volatile T *u_res_x, volatile T *u_res_y, entryStructre_field<T,T3> *pixelData, const T2* __restrict__ constPath) { T2 res = singleScattering_mainLoop<T,T2,T3>(x0[threadIdx.x],&pixelData->pixel); res = res * constPath[threadIdx.x]; u_res_x[threadIdx.x] = res.x; u_res_y[threadIdx.x] = res.y; } template<typename T, typename T2, typename T3, typename correlationType> __launch_bounds__(THREADS_NUM) __global__ void singleScattering_kernel(T2* u, const T3* __restrict__ x0, const correlationType* __restrict__ dataIn, const T2* __restrict__ constPath) { __shared__ volatile T u_res_x[THREADS_NUM]; __shared__ volatile T u_res_y[THREADS_NUM]; __shared__ correlationType pixelData; // copy the relevant data to shared memory int* iDest = (int*)&pixelData; const int* iSrc = (const int*)(dataIn + blockIdx.x); if(threadIdx.x < sizeof(correlationType) / sizeof(int)) { iDest[threadIdx.x] = iSrc[threadIdx.x]; } __syncthreads(); singleScatteringLoop<T,T2,T3>(x0,u_res_x,u_res_y,&pixelData,constPath); __syncthreads(); // Reduce sum of all shared memory to a single result if (threadIdx.x < 512){ u_res_x[threadIdx.x] = u_res_x[threadIdx.x] + u_res_x[threadIdx.x + 512]; u_res_y[threadIdx.x] = u_res_y[threadIdx.x] + u_res_y[threadIdx.x + 512]; } __syncthreads(); if (threadIdx.x < 256){ u_res_x[threadIdx.x] = u_res_x[threadIdx.x] + u_res_x[threadIdx.x + 256]; u_res_y[threadIdx.x] = u_res_y[threadIdx.x] + u_res_y[threadIdx.x + 256]; } __syncthreads(); if (threadIdx.x < 128){ u_res_x[threadIdx.x] = u_res_x[threadIdx.x] + u_res_x[threadIdx.x + 128]; u_res_y[threadIdx.x] = u_res_y[threadIdx.x] + u_res_y[threadIdx.x + 128]; } __syncthreads(); if (threadIdx.x < 64 ){ u_res_x[threadIdx.x] = u_res_x[threadIdx.x] + u_res_x[threadIdx.x + 64 ]; u_res_y[threadIdx.x] = u_res_y[threadIdx.x] + u_res_y[threadIdx.x + 64 ]; } __syncthreads(); if (threadIdx.x < 32 ) // warpReduce { u_res_x[threadIdx.x] = u_res_x[threadIdx.x] + u_res_x[threadIdx.x + 32]; u_res_y[threadIdx.x] = u_res_y[threadIdx.x] + u_res_y[threadIdx.x + 32]; u_res_x[threadIdx.x] = u_res_x[threadIdx.x] + u_res_x[threadIdx.x + 16]; u_res_y[threadIdx.x] = u_res_y[threadIdx.x] + u_res_y[threadIdx.x + 16]; u_res_x[threadIdx.x] = u_res_x[threadIdx.x] + u_res_x[threadIdx.x + 8 ]; u_res_y[threadIdx.x] = u_res_y[threadIdx.x] + u_res_y[threadIdx.x + 8 ]; u_res_x[threadIdx.x] = u_res_x[threadIdx.x] + u_res_x[threadIdx.x + 4 ]; u_res_y[threadIdx.x] = u_res_y[threadIdx.x] + u_res_y[threadIdx.x + 4 ]; u_res_x[threadIdx.x] = u_res_x[threadIdx.x] + u_res_x[threadIdx.x + 2 ]; u_res_y[threadIdx.x] = u_res_y[threadIdx.x] + u_res_y[threadIdx.x + 2 ]; u_res_x[threadIdx.x] = u_res_x[threadIdx.x] + u_res_x[threadIdx.x + 1 ]; u_res_y[threadIdx.x] = u_res_y[threadIdx.x] + u_res_y[threadIdx.x + 1 ]; } // copy the final result to the global mem if (threadIdx.x == 0) { u[blockIdx.x].x += u_res_x[0]; u[blockIdx.x].y += u_res_y[0]; } } template<typename T, typename T2, typename T3> void singleScattering(T2 *us, const T3 *x0, const T2* constPath, ub32 is_correlation, ub32 total_elements, const void* globalMem) { if(is_correlation) { const entryStructre_correlation<T,T3> *globalMem_corr = (const entryStructre_correlation<T,T3> *) globalMem; hipLaunchKernelGGL(( singleScattering_kernel<T,T2,T3,entryStructre_correlation<T,T3> >), dim3(total_elements), dim3(THREADS_NUM), 0, 0, us,x0,globalMem_corr,constPath); } else { const entryStructre_field<T,T3> *globalMem_field = (const entryStructre_field<T,T3> *) globalMem; hipLaunchKernelGGL(( singleScattering_kernel<T,T2,T3,entryStructre_field<T,T3> >), dim3(total_elements), dim3(THREADS_NUM), 0, 0, us,x0,globalMem_field,constPath); } } template<typename T> void singleScattering_setConstMem(constStructre<T> *constMem) { hipMemcpyToSymbol(constStr_scat_single, constMem, sizeof(constStructre<T>)); } template void singleScattering<double,double2,double3>(double2 *us, const double3 *x0, const double2* constPath, ub32 is_correlation, ub32 total_elements, const void* globalMem); template void singleScattering<float,float2,float3>(float2 *us, const float3 *x0, const float2* constPath, ub32 is_correlation, ub32 total_elements, const void* globalMem); template void singleScattering_setConstMem<double>(constStructre<double> *constMem); template void singleScattering_setConstMem<float>(constStructre<float> *constMem);
d1e5e5d637fe895f35c777a96cbb2011f6f5e2a0.cu
#include "nf_header.h" #include "nf_scattering.h" #include "cuMath.cu" __constant__ constStructre<double> constStr_scat_single; template<typename T, typename T2, typename T3> __device__ T2 singleScattering_mainLoop(const T3 currentX, const pixel_entry<T,T3>* pixelData) { const constStructre<T>* curr_constStr = (const constStructre<T>*) &constStr_scat_single; T2 thL[3]; T2 thV[3]; T th_c, log_nu; T bd1, bd2, bd3, dz; unsigned char mixtureIdx; T gamma_s, real_abs_mu; T2 gamma_s_over_beta_0; T3 w; T2 expVal; T2 C, sqrtMu; T2 res = {0}; // l Throughput: a^{/tilde}_i // Distance to edge of sample bd1 = abs(pixelData->illuminationDir.x) < 1e-8 ? INFINITY : pixelData->illuminationDir.x >= 0 ? (currentX.x - curr_constStr->box_min[0]) / ( pixelData->illuminationDir.x) : (currentX.x - curr_constStr->box_max[0]) / (pixelData->illuminationDir.x); bd2 = abs(pixelData->illuminationDir.y) < 1e-8 ? INFINITY : pixelData->illuminationDir.y >= 0 ? (currentX.y - curr_constStr->box_min[1]) / ( pixelData->illuminationDir.y) : (currentX.y - curr_constStr->box_max[1]) / (pixelData->illuminationDir.y); bd3 = abs(pixelData->illuminationDir.z) < 1e-8 ? INFINITY : pixelData->illuminationDir.z >= 0 ? (currentX.z - curr_constStr->box_min[2]) / ( pixelData->illuminationDir.z) : (currentX.z - curr_constStr->box_max[2]) / (pixelData->illuminationDir.z); dz = fmin(fmin(bd1, bd2), bd3); // The complex throughput thL[0].x = curr_constStr->aperture_kappa_l * pixelData->illuminationDir.x; thL[1].x = curr_constStr->aperture_kappa_l * pixelData->illuminationDir.y; thL[2].x = curr_constStr->aperture_kappa_l * pixelData->illuminationDir.z; thL[0].y = pixelData->k * (currentX.x - pixelData->illuminationP.x); thL[1].y = pixelData->k * (currentX.y - pixelData->illuminationP.y); thL[2].y = pixelData->k * (currentX.z - pixelData->illuminationP.z); // v Throughput: a^{/tilde}_v // Distance to edge of sample bd1 = abs(pixelData->viewDir.x) < 1e-8 ? INFINITY : pixelData->viewDir.x < 0 ? (curr_constStr->box_min[0] - currentX.x) / ( pixelData->viewDir.x) : (curr_constStr->box_max[0] - currentX.x) / (pixelData->viewDir.x); bd2 = abs(pixelData->viewDir.y) < 1e-8 ? INFINITY : pixelData->viewDir.y < 0 ? (curr_constStr->box_min[1] - currentX.y) / ( pixelData->viewDir.y) : (curr_constStr->box_max[1] - currentX.y) / (pixelData->viewDir.y); bd3 = abs(pixelData->viewDir.z) < 1e-8 ? INFINITY : pixelData->viewDir.z < 0 ? (curr_constStr->box_min[2] - currentX.z) / ( pixelData->viewDir.z) : (curr_constStr->box_max[2] - currentX.z) / (pixelData->viewDir.z); dz += fmin(fmin(bd1, bd2), bd3); // The complex throughput thV[0].x = curr_constStr->aperture_kappa_v * pixelData->viewDir.x; thV[1].x = curr_constStr->aperture_kappa_v * pixelData->viewDir.y; thV[2].x = curr_constStr->aperture_kappa_v * pixelData->viewDir.z; thV[0].y = pixelData->k * (pixelData->viewP.x - currentX.x); thV[1].y = pixelData->k * (pixelData->viewP.y - currentX.y); thV[2].y = pixelData->k * (pixelData->viewP.z - currentX.z); th_c = curr_constStr->aperture_C_l_plus_aperture_C_v_plus_LOG_2_PI - curr_constStr->sigt * dz; // For each mixture component for (mixtureIdx = 0; mixtureIdx < curr_constStr->mixturesNum; mixtureIdx++) { // Convolution with the illumination throughput gamma_s = curr_constStr->mixtureMu[mixtureIdx]; if(abs(gamma_s) < 0.000000001) { gamma_s = 0.000000001; } gamma_s_over_beta_0 = gamma_s * rComplexSqrt( complexSquare(cfma(gamma_s , pixelData->viewDir.x , thL[0])) + complexSquare(cfma(gamma_s , pixelData->viewDir.y , thL[1])) + complexSquare(cfma(gamma_s , pixelData->viewDir.z , thL[2]))); sqrtMu = complexSqrt( complexSquare(cfma(gamma_s_over_beta_0 , thL[0] , thV[0])) + complexSquare(cfma(gamma_s_over_beta_0 , thL[1] , thV[1])) + complexSquare(cfma(gamma_s_over_beta_0 , thL[2] , thV[2]))); real_abs_mu = rnorm3d(realMult(gamma_s_over_beta_0 , thL[0]),realMult(gamma_s_over_beta_0 , thL[1]),realMult(gamma_s_over_beta_0 , thL[2])); w.x = realMult(gamma_s_over_beta_0 , thL[0]); w.y = realMult(gamma_s_over_beta_0 , thL[1]); w.z = realMult(gamma_s_over_beta_0 , thL[2]); // expVal = expVal - real_abs_mu * gamma_s_over_beta_0 * cfma(w.x , thL[0] , cfma(w.y , thL[1] , w.z * thL[2])); gamma_s *= real_abs_mu; C = complexSqrt( complexSquare(cfma(gamma_s , w.x , thL[0])) + complexSquare(cfma(gamma_s , w.y , thL[1])) + complexSquare(cfma(gamma_s , w.z , thL[2]))); expVal = sqrtMu + C - real_abs_mu * gamma_s_over_beta_0 * cfma(w.x , thL[0] , cfma(w.y , thL[1] , w.z * thL[2])); log_nu = th_c + curr_constStr->mixtureC[mixtureIdx]; // integrate // res = res + (complexExponent(expVal + log_nu) - complexExponent(log_nu - expVal)) / (C * sqrtMu); res = res + (complexExponent(expVal + log_nu)) / (C * sqrtMu); } return res; } template<typename T, typename T2, typename T3> __device__ void singleScatteringLoop(const T3* __restrict__ x0, volatile T *u_res_x, volatile T *u_res_y, entryStructre_correlation<T,T3> *pixelData, const T2* __restrict__ constPath) { T2 res = singleScattering_mainLoop<T,T2,T3>(x0[threadIdx.x],&pixelData->pixel_1); u_res_x[threadIdx.x] = res.x; u_res_y[threadIdx.x] = res.y; res = singleScattering_mainLoop<T,T2,T3>(x0[threadIdx.x],&pixelData->pixel_2); T2 res_orig; res_orig.x = u_res_x[threadIdx.x]; res_orig.y = u_res_y[threadIdx.x]; T2 u_mult = conjMult(res_orig,res); // Each thread copies the sum of all mixture to the shared memory u_mult = u_mult * constPath[threadIdx.x]; u_res_x[threadIdx.x] = u_mult.x; u_res_y[threadIdx.x] = u_mult.y; } template<typename T, typename T2, typename T3> __device__ void singleScatteringLoop(const T3* __restrict__ x0, volatile T *u_res_x, volatile T *u_res_y, entryStructre_field<T,T3> *pixelData, const T2* __restrict__ constPath) { T2 res = singleScattering_mainLoop<T,T2,T3>(x0[threadIdx.x],&pixelData->pixel); res = res * constPath[threadIdx.x]; u_res_x[threadIdx.x] = res.x; u_res_y[threadIdx.x] = res.y; } template<typename T, typename T2, typename T3, typename correlationType> __launch_bounds__(THREADS_NUM) __global__ void singleScattering_kernel(T2* u, const T3* __restrict__ x0, const correlationType* __restrict__ dataIn, const T2* __restrict__ constPath) { __shared__ volatile T u_res_x[THREADS_NUM]; __shared__ volatile T u_res_y[THREADS_NUM]; __shared__ correlationType pixelData; // copy the relevant data to shared memory int* iDest = (int*)&pixelData; const int* iSrc = (const int*)(dataIn + blockIdx.x); if(threadIdx.x < sizeof(correlationType) / sizeof(int)) { iDest[threadIdx.x] = iSrc[threadIdx.x]; } __syncthreads(); singleScatteringLoop<T,T2,T3>(x0,u_res_x,u_res_y,&pixelData,constPath); __syncthreads(); // Reduce sum of all shared memory to a single result if (threadIdx.x < 512){ u_res_x[threadIdx.x] = u_res_x[threadIdx.x] + u_res_x[threadIdx.x + 512]; u_res_y[threadIdx.x] = u_res_y[threadIdx.x] + u_res_y[threadIdx.x + 512]; } __syncthreads(); if (threadIdx.x < 256){ u_res_x[threadIdx.x] = u_res_x[threadIdx.x] + u_res_x[threadIdx.x + 256]; u_res_y[threadIdx.x] = u_res_y[threadIdx.x] + u_res_y[threadIdx.x + 256]; } __syncthreads(); if (threadIdx.x < 128){ u_res_x[threadIdx.x] = u_res_x[threadIdx.x] + u_res_x[threadIdx.x + 128]; u_res_y[threadIdx.x] = u_res_y[threadIdx.x] + u_res_y[threadIdx.x + 128]; } __syncthreads(); if (threadIdx.x < 64 ){ u_res_x[threadIdx.x] = u_res_x[threadIdx.x] + u_res_x[threadIdx.x + 64 ]; u_res_y[threadIdx.x] = u_res_y[threadIdx.x] + u_res_y[threadIdx.x + 64 ]; } __syncthreads(); if (threadIdx.x < 32 ) // warpReduce { u_res_x[threadIdx.x] = u_res_x[threadIdx.x] + u_res_x[threadIdx.x + 32]; u_res_y[threadIdx.x] = u_res_y[threadIdx.x] + u_res_y[threadIdx.x + 32]; u_res_x[threadIdx.x] = u_res_x[threadIdx.x] + u_res_x[threadIdx.x + 16]; u_res_y[threadIdx.x] = u_res_y[threadIdx.x] + u_res_y[threadIdx.x + 16]; u_res_x[threadIdx.x] = u_res_x[threadIdx.x] + u_res_x[threadIdx.x + 8 ]; u_res_y[threadIdx.x] = u_res_y[threadIdx.x] + u_res_y[threadIdx.x + 8 ]; u_res_x[threadIdx.x] = u_res_x[threadIdx.x] + u_res_x[threadIdx.x + 4 ]; u_res_y[threadIdx.x] = u_res_y[threadIdx.x] + u_res_y[threadIdx.x + 4 ]; u_res_x[threadIdx.x] = u_res_x[threadIdx.x] + u_res_x[threadIdx.x + 2 ]; u_res_y[threadIdx.x] = u_res_y[threadIdx.x] + u_res_y[threadIdx.x + 2 ]; u_res_x[threadIdx.x] = u_res_x[threadIdx.x] + u_res_x[threadIdx.x + 1 ]; u_res_y[threadIdx.x] = u_res_y[threadIdx.x] + u_res_y[threadIdx.x + 1 ]; } // copy the final result to the global mem if (threadIdx.x == 0) { u[blockIdx.x].x += u_res_x[0]; u[blockIdx.x].y += u_res_y[0]; } } template<typename T, typename T2, typename T3> void singleScattering(T2 *us, const T3 *x0, const T2* constPath, ub32 is_correlation, ub32 total_elements, const void* globalMem) { if(is_correlation) { const entryStructre_correlation<T,T3> *globalMem_corr = (const entryStructre_correlation<T,T3> *) globalMem; singleScattering_kernel<T,T2,T3,entryStructre_correlation<T,T3> ><<<total_elements, THREADS_NUM>>>(us,x0,globalMem_corr,constPath); } else { const entryStructre_field<T,T3> *globalMem_field = (const entryStructre_field<T,T3> *) globalMem; singleScattering_kernel<T,T2,T3,entryStructre_field<T,T3> ><<<total_elements, THREADS_NUM>>>(us,x0,globalMem_field,constPath); } } template<typename T> void singleScattering_setConstMem(constStructre<T> *constMem) { cudaMemcpyToSymbol(constStr_scat_single, constMem, sizeof(constStructre<T>)); } template void singleScattering<double,double2,double3>(double2 *us, const double3 *x0, const double2* constPath, ub32 is_correlation, ub32 total_elements, const void* globalMem); template void singleScattering<float,float2,float3>(float2 *us, const float3 *x0, const float2* constPath, ub32 is_correlation, ub32 total_elements, const void* globalMem); template void singleScattering_setConstMem<double>(constStructre<double> *constMem); template void singleScattering_setConstMem<float>(constStructre<float> *constMem);
1e52027893c8231cdc1eb991b1d3c68cfa626c9b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <fft_cuda.cuh> __global__ void fft_cuda3_kernel(complex_t *ip, complex_t *op, int m, int size) { __shared__ complex_t shared_op[2048]; int tid = threadIdx.x + blockDim.x * blockIdx.x; if (4 * tid < size) { complex_t eps = (complex_t){0.0, -1.0}; complex_t ip0 = ip[reverse(4*tid+0, m)]; complex_t ip1 = ip[reverse(4*tid+1, m)]; complex_t ip2 = ip[reverse(4*tid+2, m)]; complex_t ip3 = ip[reverse(4*tid+3, m)]; complex_t t0 = cuda_complex_add(ip0, ip1); complex_t t1 = cuda_complex_add(ip2, ip3); complex_t s0 = cuda_complex_sub(ip0, ip1); complex_t s1 = cuda_complex_mult(cuda_complex_sub(ip2, ip3), eps); shared_op[4*tid+0] = cuda_complex_add(t0, t1); shared_op[4*tid+2] = cuda_complex_sub(t0, t1); shared_op[4*tid+1] = cuda_complex_add(s0, s1); shared_op[4*tid+3] = cuda_complex_sub(s0, s1); } __syncthreads(); for (int i = 2; i < m; i++) { int len = 1 << i; /* the length of half bfly at level m*/ complex_t factor = {cos(-2.0 * PI / (2 * len)), sin(-2.0 * PI / (2 * len))}; int bfly_len = (len << 1); int nbfly = size / bfly_len; if (tid < nbfly) { int j = tid * bfly_len; complex_t omega = {1, 0}; for (int k = j; k < j+len; k++) { complex_t temp = cuda_complex_mult(omega, shared_op[k+len]); shared_op[k+len] = cuda_complex_sub(shared_op[k], temp); shared_op[k ] = cuda_complex_add(shared_op[k], temp); omega = cuda_complex_mult(omega, factor); } } __syncthreads(); } __syncthreads(); if (2 * tid < size) { op[2*tid+0] = shared_op[2*tid+0]; op[2*tid+1] = shared_op[2*tid+1]; } } void fft_cuda3(complex_t *_ip, complex_t *_op, int size) { int m = (int)log2((double)size); complex_t *ip = (complex_t *)_ip; complex_t *op = (complex_t *)_op; gpuErrchk(hipMemcpy(dev_ip, ip, size*sizeof(complex_t), hipMemcpyHostToDevice)); /* Can only work until size 2048 */ int threads = (128 < size) ? 128 : size; dim3 block(threads, 1, 1); dim3 grid(size/threads, 1, 1); hipLaunchKernelGGL(( fft_cuda3_kernel), dim3(grid), dim3(block), 0, 0, dev_ip, dev_op, m, size); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipMemcpy(op, dev_op, size*sizeof(complex_t), hipMemcpyDeviceToHost)); }
1e52027893c8231cdc1eb991b1d3c68cfa626c9b.cu
#include <fft_cuda.cuh> __global__ void fft_cuda3_kernel(complex_t *ip, complex_t *op, int m, int size) { __shared__ complex_t shared_op[2048]; int tid = threadIdx.x + blockDim.x * blockIdx.x; if (4 * tid < size) { complex_t eps = (complex_t){0.0, -1.0}; complex_t ip0 = ip[reverse(4*tid+0, m)]; complex_t ip1 = ip[reverse(4*tid+1, m)]; complex_t ip2 = ip[reverse(4*tid+2, m)]; complex_t ip3 = ip[reverse(4*tid+3, m)]; complex_t t0 = cuda_complex_add(ip0, ip1); complex_t t1 = cuda_complex_add(ip2, ip3); complex_t s0 = cuda_complex_sub(ip0, ip1); complex_t s1 = cuda_complex_mult(cuda_complex_sub(ip2, ip3), eps); shared_op[4*tid+0] = cuda_complex_add(t0, t1); shared_op[4*tid+2] = cuda_complex_sub(t0, t1); shared_op[4*tid+1] = cuda_complex_add(s0, s1); shared_op[4*tid+3] = cuda_complex_sub(s0, s1); } __syncthreads(); for (int i = 2; i < m; i++) { int len = 1 << i; /* the length of half bfly at level m*/ complex_t factor = {cos(-2.0 * PI / (2 * len)), sin(-2.0 * PI / (2 * len))}; int bfly_len = (len << 1); int nbfly = size / bfly_len; if (tid < nbfly) { int j = tid * bfly_len; complex_t omega = {1, 0}; for (int k = j; k < j+len; k++) { complex_t temp = cuda_complex_mult(omega, shared_op[k+len]); shared_op[k+len] = cuda_complex_sub(shared_op[k], temp); shared_op[k ] = cuda_complex_add(shared_op[k], temp); omega = cuda_complex_mult(omega, factor); } } __syncthreads(); } __syncthreads(); if (2 * tid < size) { op[2*tid+0] = shared_op[2*tid+0]; op[2*tid+1] = shared_op[2*tid+1]; } } void fft_cuda3(complex_t *_ip, complex_t *_op, int size) { int m = (int)log2((double)size); complex_t *ip = (complex_t *)_ip; complex_t *op = (complex_t *)_op; gpuErrchk(cudaMemcpy(dev_ip, ip, size*sizeof(complex_t), cudaMemcpyHostToDevice)); /* Can only work until size 2048 */ int threads = (128 < size) ? 128 : size; dim3 block(threads, 1, 1); dim3 grid(size/threads, 1, 1); fft_cuda3_kernel<<<grid, block>>> (dev_ip, dev_op, m, size); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaMemcpy(op, dev_op, size*sizeof(complex_t), cudaMemcpyDeviceToHost)); }
56e119c096180c2b8b66d78b49c4cff30ac2fc34.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe/util/device_alternate.hpp" #include "caffe/failure_maker.hpp" namespace caffe { template <typename Dtype> __global__ void FailureThresholdKernel(const int n, Dtype* values, Dtype split1, Dtype split2) { CUDA_KERNEL_LOOP(index, n) { if (values[index] < split1) { values[index] = -1; } else if (values[index] < split2) { values[index] = 0; } else { values[index] = 1; } } } template <typename Dtype> void failure_threshold(const int n, Dtype* values, Dtype split1, Dtype split2) { hipLaunchKernelGGL(( FailureThresholdKernel), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, n, values, split1, split2); } template <typename Dtype> __global__ void FailKernel(const int n, Dtype* iters, const Dtype* values, Dtype* data, const Dtype* diff) { Dtype epsilon = 1e-20; CUDA_KERNEL_LOOP(index, n) { if (iters[index] <= 0) { // this cell is already broken data[index] = values[index]; } else { // strategy1: not update when gradient is too small if (diff[index] < epsilon && diff[index] > -epsilon) { continue; } iters[index] -= 100; // batch size. FIXME: how to make this exp more general if (iters[index] <= 0) { data[index] = values[index]; } } } } template <typename Dtype> void GaussianFailureMaker<Dtype>::Fail_gpu(int iter) { for (int i = 0; i < fail_iterations_.size(); i++) { int count = fail_iterations_[i]->count(); int N = CAFFE_GET_BLOCKS(count); // hiprandState_t* states; // hipMalloc((void**) &states, N * sizeof(hiprandState_t)); // InitRandom<<<N, 1>>>(time(NULL), states); hipLaunchKernelGGL(( FailKernel<Dtype>), dim3(N), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, fail_iterations_[i]->mutable_gpu_data(), fail_iterations_[i]->mutable_gpu_diff(), this->net_->failure_learnable_params()[i]->mutable_gpu_data(), this->net_->failure_learnable_params()[i]->gpu_diff()); } } template void GaussianFailureMaker<double>::Fail_gpu(int); template void GaussianFailureMaker<float>::Fail_gpu(int); template void failure_threshold<float>(const int n, float* values, float split1, float split2); template void failure_threshold<double>(const int n, double* values, double split1, double split2); }
56e119c096180c2b8b66d78b49c4cff30ac2fc34.cu
#include "caffe/util/device_alternate.hpp" #include "caffe/failure_maker.hpp" namespace caffe { template <typename Dtype> __global__ void FailureThresholdKernel(const int n, Dtype* values, Dtype split1, Dtype split2) { CUDA_KERNEL_LOOP(index, n) { if (values[index] < split1) { values[index] = -1; } else if (values[index] < split2) { values[index] = 0; } else { values[index] = 1; } } } template <typename Dtype> void failure_threshold(const int n, Dtype* values, Dtype split1, Dtype split2) { FailureThresholdKernel<<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>(n, values, split1, split2); } template <typename Dtype> __global__ void FailKernel(const int n, Dtype* iters, const Dtype* values, Dtype* data, const Dtype* diff) { Dtype epsilon = 1e-20; CUDA_KERNEL_LOOP(index, n) { if (iters[index] <= 0) { // this cell is already broken data[index] = values[index]; } else { // strategy1: not update when gradient is too small if (diff[index] < epsilon && diff[index] > -epsilon) { continue; } iters[index] -= 100; // batch size. FIXME: how to make this exp more general if (iters[index] <= 0) { data[index] = values[index]; } } } } template <typename Dtype> void GaussianFailureMaker<Dtype>::Fail_gpu(int iter) { for (int i = 0; i < fail_iterations_.size(); i++) { int count = fail_iterations_[i]->count(); int N = CAFFE_GET_BLOCKS(count); // curandState_t* states; // cudaMalloc((void**) &states, N * sizeof(curandState_t)); // InitRandom<<<N, 1>>>(time(NULL), states); FailKernel<Dtype><<<N, CAFFE_CUDA_NUM_THREADS>>>(count, fail_iterations_[i]->mutable_gpu_data(), fail_iterations_[i]->mutable_gpu_diff(), this->net_->failure_learnable_params()[i]->mutable_gpu_data(), this->net_->failure_learnable_params()[i]->gpu_diff()); } } template void GaussianFailureMaker<double>::Fail_gpu(int); template void GaussianFailureMaker<float>::Fail_gpu(int); template void failure_threshold<float>(const int n, float* values, float split1, float split2); template void failure_threshold<double>(const int n, double* values, double split1, double split2); }
be664ba421f1958e4c9230bb5c4cd1a3d6cb5272.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void kern_ProbBuffer(float* agreement, float* output, int size, short max) { int idx = CUDASTDOFFSET; float locAgreement = agreement[idx]; float probValue = (float) locAgreement / (float) max; probValue = (probValue < 1.0f) ? probValue: 1.0f; if( idx < size ) { output[idx] = probValue; } }
be664ba421f1958e4c9230bb5c4cd1a3d6cb5272.cu
#include "includes.h" __global__ void kern_ProbBuffer(float* agreement, float* output, int size, short max) { int idx = CUDASTDOFFSET; float locAgreement = agreement[idx]; float probValue = (float) locAgreement / (float) max; probValue = (probValue < 1.0f) ? probValue: 1.0f; if( idx < size ) { output[idx] = probValue; } }
21af0157bb7e7a212809c7df05c2376a043ee2fc.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright (c) 2020 Neka-Nat * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. **/ #include "cupoch/geometry/boundingvolume.h" #include "cupoch/geometry/image.h" #include "cupoch/utility/console.h" using namespace cupoch; using namespace cupoch::geometry; namespace { /// Isotropic 2D kernels are separable: /// two 1D kernels are applied in x and y direction. std::pair<utility::device_vector<float>, utility::device_vector<float>> GetFilterKernel(Image::FilterType ftype) { switch (ftype) { case Image::FilterType::Gaussian3: { const float k[3] = {0.25, 0.5, 0.25}; utility::device_vector<float> g3(k, k + 3); return std::make_pair(g3, g3); } case Image::FilterType::Gaussian5: { const float k[5] = {0.0625, 0.25, 0.375, 0.25, 0.0625}; utility::device_vector<float> g5(k, k + 5); return std::make_pair(g5, g5); } case Image::FilterType::Gaussian7: { const float k[7] = {0.03125, 0.109375, 0.21875, 0.28125, 0.21875, 0.109375, 0.03125}; utility::device_vector<float> g7(k, k + 7); return std::make_pair(g7, g7); } case Image::FilterType::Sobel3Dx: { const float k1[3] = {-1.0, 0.0, 1.0}; const float k2[3] = {1.0, 2.0, 1.0}; utility::device_vector<float> s31(k1, k1 + 3); utility::device_vector<float> s32(k2, k2 + 3); return std::make_pair(s31, s32); } case Image::FilterType::Sobel3Dy: { const float k1[3] = {-1.0, 0.0, 1.0}; const float k2[3] = {1.0, 2.0, 1.0}; utility::device_vector<float> s31(k1, k1 + 3); utility::device_vector<float> s32(k2, k2 + 3); return std::make_pair(s32, s31); } default: { utility::LogError("[Filter] Unsupported filter type."); return std::make_pair(utility::device_vector<float>(), utility::device_vector<float>()); } } } struct transpose_functor { transpose_functor(const uint8_t *src, int width, int in_bytes_per_line, int out_bytes_per_line, int bytes_per_pixel, uint8_t *dst) : src_(src), width_(width), in_bytes_per_line_(in_bytes_per_line), out_bytes_per_line_(out_bytes_per_line), bytes_per_pixel_(bytes_per_pixel), dst_(dst){}; const uint8_t *src_; const int width_; const int in_bytes_per_line_; const int out_bytes_per_line_; const int bytes_per_pixel_; uint8_t *dst_; __device__ void operator()(size_t idx) { const int y = idx / width_; const int x = idx % width_; memcpy(dst_ + x * out_bytes_per_line_ + y * bytes_per_pixel_, src_ + y * in_bytes_per_line_ + x * bytes_per_pixel_, bytes_per_pixel_ * sizeof(uint8_t)); } }; struct clip_intensity_functor { clip_intensity_functor(float min, float max) : min_(min), max_(max) {}; const float min_; const float max_; __device__ void operator()(float& f) { f = max(min(max_, f), min_); } }; struct linear_transform_functor { linear_transform_functor(float scale, float offset) : scale_(scale), offset_(offset){}; const float scale_; const float offset_; __device__ void operator() (float& f) { f = scale_ * f + offset_; } }; struct downsample_float_functor { downsample_float_functor(const uint8_t *src, int src_width, uint8_t *dst, int dst_width) : src_(src), src_width_(src_width), dst_(dst), dst_width_(dst_width){}; const uint8_t *src_; const int src_width_; uint8_t *dst_; const int dst_width_; __device__ void operator()(size_t idx) { const int y = idx / dst_width_; const int x = idx % dst_width_; float *p1 = (float *)(src_ + (y * 2 * src_width_ + x * 2) * sizeof(float)); float *p2 = (float *)(src_ + (y * 2 * src_width_ + x * 2 + 1) * sizeof(float)); float *p3 = (float *)(src_ + ((y * 2 + 1) * src_width_ + x * 2) * sizeof(float)); float *p4 = (float *)(src_ + ((y * 2 + 1) * src_width_ + x * 2 + 1) * sizeof(float)); float *p = (float *)(dst_ + idx * sizeof(float)); *p = (*p1 + *p2 + *p3 + *p4) / 4.0f; } }; struct downsample_rgb_functor { downsample_rgb_functor(const uint8_t *src, int src_width, int num_of_channels, uint8_t *dst, int dst_width) : src_(src), src_width_(src_width), num_of_channels_(num_of_channels), dst_(dst), dst_width_(dst_width){}; const uint8_t *src_; const int src_width_; const int num_of_channels_; uint8_t *dst_; const int dst_width_; __device__ void operator()(size_t idx) { const int y = idx / dst_width_; const int x = idx % dst_width_; for (int c = 0; c < num_of_channels_; ++c) { int p1 = (int)(*(src_ + (y * 2 * src_width_ + x * 2) * 3 + c)); int p2 = (int)(*(src_ + (y * 2 * src_width_ + x * 2 + 1) * 3 + c)); int p3 = (int)(*(src_ + ((y * 2 + 1) * src_width_ + x * 2) * 3) + c); int p4 = (int)(*(src_ + ((y * 2 + 1) * src_width_ + x * 2 + 1) * 3 + c)); uint8_t *p = dst_ + idx * 3 + c; *p = (uint8_t)((p1 + p2 + p3 + p4) / 4); } } }; struct filter_horizontal_float_functor { filter_horizontal_float_functor(const uint8_t *src, int width, const float *kernel, int half_kernel_size, uint8_t *dst) : src_(src), width_(width), kernel_(kernel), half_kernel_size_(half_kernel_size), dst_(dst){}; const uint8_t *src_; const int width_; const float *kernel_; const int half_kernel_size_; uint8_t *dst_; __device__ void operator()(size_t idx) { const int y = idx / width_; const int x = idx % width_; float *po = (float *)(dst_ + idx * sizeof(float)); float temp = 0; for (int i = -half_kernel_size_; i <= half_kernel_size_; i++) { int x_shift = min(max(0, x + i), width_ - 1); float *pi = (float *)(src_ + (y * width_ + x_shift) * sizeof(float)); temp += (*pi * kernel_[i + half_kernel_size_]); } *po = temp; } }; struct filter_horizontal_rgb_functor { filter_horizontal_rgb_functor(const uint8_t *src, int width, int num_of_channels, const float *kernel, int half_kernel_size, uint8_t *dst) : src_(src), width_(width), num_of_channels_(num_of_channels), kernel_(kernel), half_kernel_size_(half_kernel_size), dst_(dst){}; const uint8_t *src_; const int width_; const int num_of_channels_; const float *kernel_; const int half_kernel_size_; uint8_t *dst_; __device__ void operator()(size_t idx) { const int y = idx / width_; const int x = idx % width_; for (int c = 0; c < num_of_channels_; ++c) { uint8_t *po = dst_ + idx * num_of_channels_ + c; float temp = 0; for (int i = -half_kernel_size_; i <= half_kernel_size_; i++) { int x_shift = min(max(0, x + i), width_ - 1); const uint8_t *pi = src_ + (y * width_ + x_shift) * num_of_channels_ + c; temp += (*pi * kernel_[i + half_kernel_size_]); } *po = __float2uint_ru(temp); } } }; struct vertical_flip_functor { vertical_flip_functor(const uint8_t *src, int width, int height, int bytes_per_pixel, uint8_t *dst) : src_(src), width_(width), height_(height), bytes_per_pixel_(bytes_per_pixel), dst_(dst){}; const uint8_t *src_; const int width_; const int height_; const int bytes_per_pixel_; uint8_t *dst_; __device__ void operator()(size_t idx) { const int y = idx / width_; const int x = idx % width_; memcpy(&dst_[((height_ - y - 1) * width_ + x) * bytes_per_pixel_], &src_[idx * bytes_per_pixel_], bytes_per_pixel_ * sizeof(uint8_t)); } }; struct horizontal_flip_functor { horizontal_flip_functor(const uint8_t *src, int width, int bytes_per_pixel, uint8_t *dst) : src_(src), width_(width), bytes_per_pixel_(bytes_per_pixel), dst_(dst){}; const uint8_t *src_; const int width_; const int bytes_per_pixel_; uint8_t *dst_; __device__ void operator()(size_t idx) { const int y = idx / width_; const int x = idx % width_; memcpy(&dst_[(y * width_ + (width_ - x - 1)) * bytes_per_pixel_], &src_[idx * bytes_per_pixel_], bytes_per_pixel_ * sizeof(uint8_t)); } }; struct bilateral_filter_functor { bilateral_filter_functor(const uint8_t *src, int width, int height, int diameter, float sigma_color, const float* gaussian_const, uint8_t *dst) : src_(src), width_(width), height_(height), diameter_(diameter), sigma_color_(sigma_color), gaussian_const_(gaussian_const), dst_(dst){}; const uint8_t *src_; const int width_; const int height_; const int diameter_; const float sigma_color_; const float* gaussian_const_; uint8_t *dst_; __device__ float gaussian(float x, float sig) const { return expf(-(x * x) / (2.0f * sig * sig)); } __device__ void operator() (size_t idx) { const int y = idx / width_; const int x = idx % width_; float filtered = 0; float total_w = 0; const float center_p = *(float *)(src_ + idx * sizeof(float)); for (int dy = -diameter_; dy <= diameter_; dy++) { for (int dx = -diameter_; dx <= diameter_; dx++) { const int my = min(max(0, y + dy), height_); const int mx = min(max(0, x + dx), width_); const float cur_p = *(float *)(src_ + (my * width_ + mx) * sizeof(float)); const float w = gaussian_const_[dy + diameter_] * gaussian_const_[dx + diameter_] * gaussian(center_p - cur_p, sigma_color_); filtered += w * cur_p; total_w += w; } } float* p = (float *)(dst_ + idx * sizeof(float)); *p = filtered / total_w; } }; struct depth_to_float_functor { depth_to_float_functor(int depth_scale, int depth_trunc) : depth_scale_(depth_scale), depth_trunc_(depth_trunc) {}; const int depth_scale_; const int depth_trunc_; __device__ void operator()(float& f) { f /= (float)depth_scale_; if (f >= depth_trunc_) f = 0.0f; } }; } // namespace Image::Image() : GeometryBaseNoTrans2D(Geometry::GeometryType::Image) {} Image::~Image() {} Image::Image(const Image& other) : GeometryBaseNoTrans2D(Geometry::GeometryType::Image), width_(other.width_), height_(other.height_), num_of_channels_(other.num_of_channels_), bytes_per_channel_(other.bytes_per_channel_), data_(other.data_) {} Image& Image::operator=(const Image& other) { width_ = other.width_; height_ = other.height_; num_of_channels_ = other.num_of_channels_; bytes_per_channel_ = other.bytes_per_channel_; data_ = other.data_; return *this; } Image &Image::Clear() { width_ = 0; height_ = 0; num_of_channels_ = 0; bytes_per_channel_ = 0; data_.clear(); return *this; } bool Image::IsEmpty() const { return !HasData(); } Eigen::Vector2f Image::GetMinBound() const { return Eigen::Vector2f(0.0, 0.0); } Eigen::Vector2f Image::GetMaxBound() const { return Eigen::Vector2f(width_, height_); } Eigen::Vector2f Image::GetCenter() const { return Eigen::Vector2f(width_ / 2, height_ / 2); } thrust::host_vector<uint8_t> Image::GetData() const { thrust::host_vector<uint8_t> data = data_; return data; } void Image::SetData(const thrust::host_vector<uint8_t> &data) { data_ = data; } bool Image::TestImageBoundary(float u, float v, float inner_margin /* = 0.0 */) const { return (u >= inner_margin && u < width_ - inner_margin && v >= inner_margin && v < height_ - inner_margin); } std::pair<bool, float> Image::FloatValueAt(float u, float v) const { auto output = geometry::FloatValueAt(thrust::raw_pointer_cast(data_.data()), u, v, width_, height_, num_of_channels_, bytes_per_channel_); return std::make_pair(output.first, output.second); } std::shared_ptr<Image> Image::ConvertDepthToFloatImage( float depth_scale /* = 1000.0*/, float depth_trunc /* = 3.0*/) const { // don't need warning message about image type // as we call CreateFloatImage auto output = CreateFloatImage(); depth_to_float_functor func(depth_scale, depth_trunc); float* pt = (float*)thrust::raw_pointer_cast(output->data_.data()); for_each(thrust::device, pt, pt + (width_ * height_), func); return output; } Image &Image::ClipIntensity(float min /* = 0.0*/, float max /* = 1.0*/) { if (num_of_channels_ != 1 || bytes_per_channel_ != 4) { utility::LogError("[ClipIntensity] Unsupported image format."); return *this; } clip_intensity_functor func(min, max); float* pt = (float*)thrust::raw_pointer_cast(data_.data()); thrust::for_each(thrust::device, pt, pt + (width_ * height_), func); return *this; } Image &Image::LinearTransform(float scale, float offset /* = 0.0*/) { if (num_of_channels_ != 1 || bytes_per_channel_ != 4) { utility::LogError("[LinearTransform] Unsupported image format."); return *this; } linear_transform_functor func(scale, offset); float* pt = (float*)thrust::raw_pointer_cast(data_.data()); thrust::for_each(thrust::device, pt, pt + (width_ * height_), func); return *this; } std::shared_ptr<Image> Image::Downsample() const { auto output = std::make_shared<Image>(); if ((num_of_channels_ != 1 || bytes_per_channel_ != 4) && (num_of_channels_ != 3 || bytes_per_channel_ != 1)) { utility::LogError("[Downsample] Unsupported image format."); return output; } int half_width = (int)floor((float)width_ / 2.0); int half_height = (int)floor((float)height_ / 2.0); output->Prepare(half_width, half_height, num_of_channels_, bytes_per_channel_); if (num_of_channels_ == 1) { downsample_float_functor func(thrust::raw_pointer_cast(data_.data()), width_, thrust::raw_pointer_cast(output->data_.data()), output->width_); thrust::for_each(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>(output->width_ * output->height_), func); } else { downsample_rgb_functor func(thrust::raw_pointer_cast(data_.data()), width_, num_of_channels_, thrust::raw_pointer_cast(output->data_.data()), output->width_); thrust::for_each(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>(output->width_ * output->height_), func); } return output; } std::shared_ptr<Image> Image::FilterHorizontal( const utility::device_vector<float> &kernel) const { auto output = std::make_shared<Image>(); if ((num_of_channels_ != 1 || bytes_per_channel_ != 4) && (num_of_channels_ != 3 || bytes_per_channel_ != 1) || kernel.size() % 2 != 1) { utility::LogError( "[FilterHorizontal] Unsupported image format or kernel " "size."); } output->Prepare(width_, height_, 1, 4); const int half_kernel_size = (int)(floor((float)kernel.size() / 2.0)); if (num_of_channels_ == 1) { filter_horizontal_float_functor func( thrust::raw_pointer_cast(data_.data()), width_, thrust::raw_pointer_cast(kernel.data()), half_kernel_size, thrust::raw_pointer_cast(output->data_.data())); thrust::for_each(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>(width_ * height_), func); } else { filter_horizontal_rgb_functor func( thrust::raw_pointer_cast(data_.data()), width_, num_of_channels_, thrust::raw_pointer_cast(kernel.data()), half_kernel_size, thrust::raw_pointer_cast(output->data_.data())); thrust::for_each(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>(width_ * height_), func); } return output; } std::shared_ptr<Image> Image::Filter(Image::FilterType type) const { auto output = std::make_shared<Image>(); if (num_of_channels_ != 1 || bytes_per_channel_ != 4) { utility::LogError("[Filter] Unsupported image format."); return output; } auto kernels = GetFilterKernel(type); output = Filter(kernels.first, kernels.second); return output; } ImagePyramid Image::FilterPyramid(const ImagePyramid &input, Image::FilterType type) { std::vector<std::shared_ptr<Image>> output; for (size_t i = 0; i < input.size(); i++) { auto layer_filtered = input[i]->Filter(type); output.push_back(layer_filtered); } return output; } ImagePyramid Image::BilateralFilterPyramid(const ImagePyramid &input, int diameter, float sigma_color, float sigma_space) { std::vector<std::shared_ptr<Image>> output; for (size_t i = 0; i < input.size(); i++) { auto layer_filtered = input[i]->BilateralFilter(diameter, sigma_color, sigma_space); output.push_back(layer_filtered); } return output; } std::shared_ptr<Image> Image::Filter( const utility::device_vector<float> &dx, const utility::device_vector<float> &dy) const { auto output = std::make_shared<Image>(); if (num_of_channels_ != 1 || bytes_per_channel_ != 4) { utility::LogError("[Filter] Unsupported image format."); return output; } auto temp1 = FilterHorizontal(dx); auto temp2 = temp1->Transpose(); auto temp3 = temp2->FilterHorizontal(dy); auto temp4 = temp3->Transpose(); return temp4; } std::shared_ptr<Image> Image::Transpose() const { auto output = std::make_shared<Image>(); output->Prepare(height_, width_, num_of_channels_, bytes_per_channel_); int out_bytes_per_line = output->BytesPerLine(); int in_bytes_per_line = BytesPerLine(); int bytes_per_pixel = num_of_channels_ * bytes_per_channel_; transpose_functor func(thrust::raw_pointer_cast(data_.data()), width_, in_bytes_per_line, out_bytes_per_line, bytes_per_pixel, thrust::raw_pointer_cast(output->data_.data())); thrust::for_each(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>(width_ * height_), func); return output; } std::shared_ptr<Image> Image::FlipVertical() const { auto output = std::make_shared<Image>(); output->Prepare(width_, height_, num_of_channels_, bytes_per_channel_); vertical_flip_functor func(thrust::raw_pointer_cast(data_.data()), width_, height_, num_of_channels_ * bytes_per_channel_, thrust::raw_pointer_cast(output->data_.data())); thrust::for_each(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>(width_ * height_), func); return output; } std::shared_ptr<Image> Image::FlipHorizontal() const { auto output = std::make_shared<Image>(); output->Prepare(width_, height_, num_of_channels_, bytes_per_channel_); horizontal_flip_functor func( thrust::raw_pointer_cast(data_.data()), width_, num_of_channels_ * bytes_per_channel_, thrust::raw_pointer_cast(output->data_.data())); thrust::for_each(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>(width_ * height_), func); return output; } std::shared_ptr<Image> Image::BilateralFilter( int diameter, float sigma_color, float sigma_space) const { auto output = std::make_shared<Image>(); if (diameter >= 64) { utility::LogError("[BilateralFilter] Diameter should be less than 64."); return output; } if (num_of_channels_ != 1 || bytes_per_channel_ != 4) { utility::LogError("[BilateralFilter] Unsupported image format."); return output; } output->Prepare(width_, height_, num_of_channels_, bytes_per_channel_); float fgaussian[64]; const float sigma2 = sigma_space * sigma_space; for (int i = 0; i < 2 * diameter + 1; i++) { const float x = i - diameter; fgaussian[i] = ::exp(-(x * x) / (2 * sigma2)); } utility::device_vector<float> gaussian_const(fgaussian, fgaussian + 64); bilateral_filter_functor func( thrust::raw_pointer_cast(data_.data()), width_, height_, diameter, sigma_color, thrust::raw_pointer_cast(gaussian_const.data()), thrust::raw_pointer_cast(output->data_.data())); thrust::for_each(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>(width_ * height_), func); return output; } void Image::AllocateDataBuffer() { data_.resize(width_ * height_ * num_of_channels_ * bytes_per_channel_); }
21af0157bb7e7a212809c7df05c2376a043ee2fc.cu
/** * Copyright (c) 2020 Neka-Nat * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. **/ #include "cupoch/geometry/boundingvolume.h" #include "cupoch/geometry/image.h" #include "cupoch/utility/console.h" using namespace cupoch; using namespace cupoch::geometry; namespace { /// Isotropic 2D kernels are separable: /// two 1D kernels are applied in x and y direction. std::pair<utility::device_vector<float>, utility::device_vector<float>> GetFilterKernel(Image::FilterType ftype) { switch (ftype) { case Image::FilterType::Gaussian3: { const float k[3] = {0.25, 0.5, 0.25}; utility::device_vector<float> g3(k, k + 3); return std::make_pair(g3, g3); } case Image::FilterType::Gaussian5: { const float k[5] = {0.0625, 0.25, 0.375, 0.25, 0.0625}; utility::device_vector<float> g5(k, k + 5); return std::make_pair(g5, g5); } case Image::FilterType::Gaussian7: { const float k[7] = {0.03125, 0.109375, 0.21875, 0.28125, 0.21875, 0.109375, 0.03125}; utility::device_vector<float> g7(k, k + 7); return std::make_pair(g7, g7); } case Image::FilterType::Sobel3Dx: { const float k1[3] = {-1.0, 0.0, 1.0}; const float k2[3] = {1.0, 2.0, 1.0}; utility::device_vector<float> s31(k1, k1 + 3); utility::device_vector<float> s32(k2, k2 + 3); return std::make_pair(s31, s32); } case Image::FilterType::Sobel3Dy: { const float k1[3] = {-1.0, 0.0, 1.0}; const float k2[3] = {1.0, 2.0, 1.0}; utility::device_vector<float> s31(k1, k1 + 3); utility::device_vector<float> s32(k2, k2 + 3); return std::make_pair(s32, s31); } default: { utility::LogError("[Filter] Unsupported filter type."); return std::make_pair(utility::device_vector<float>(), utility::device_vector<float>()); } } } struct transpose_functor { transpose_functor(const uint8_t *src, int width, int in_bytes_per_line, int out_bytes_per_line, int bytes_per_pixel, uint8_t *dst) : src_(src), width_(width), in_bytes_per_line_(in_bytes_per_line), out_bytes_per_line_(out_bytes_per_line), bytes_per_pixel_(bytes_per_pixel), dst_(dst){}; const uint8_t *src_; const int width_; const int in_bytes_per_line_; const int out_bytes_per_line_; const int bytes_per_pixel_; uint8_t *dst_; __device__ void operator()(size_t idx) { const int y = idx / width_; const int x = idx % width_; memcpy(dst_ + x * out_bytes_per_line_ + y * bytes_per_pixel_, src_ + y * in_bytes_per_line_ + x * bytes_per_pixel_, bytes_per_pixel_ * sizeof(uint8_t)); } }; struct clip_intensity_functor { clip_intensity_functor(float min, float max) : min_(min), max_(max) {}; const float min_; const float max_; __device__ void operator()(float& f) { f = max(min(max_, f), min_); } }; struct linear_transform_functor { linear_transform_functor(float scale, float offset) : scale_(scale), offset_(offset){}; const float scale_; const float offset_; __device__ void operator() (float& f) { f = scale_ * f + offset_; } }; struct downsample_float_functor { downsample_float_functor(const uint8_t *src, int src_width, uint8_t *dst, int dst_width) : src_(src), src_width_(src_width), dst_(dst), dst_width_(dst_width){}; const uint8_t *src_; const int src_width_; uint8_t *dst_; const int dst_width_; __device__ void operator()(size_t idx) { const int y = idx / dst_width_; const int x = idx % dst_width_; float *p1 = (float *)(src_ + (y * 2 * src_width_ + x * 2) * sizeof(float)); float *p2 = (float *)(src_ + (y * 2 * src_width_ + x * 2 + 1) * sizeof(float)); float *p3 = (float *)(src_ + ((y * 2 + 1) * src_width_ + x * 2) * sizeof(float)); float *p4 = (float *)(src_ + ((y * 2 + 1) * src_width_ + x * 2 + 1) * sizeof(float)); float *p = (float *)(dst_ + idx * sizeof(float)); *p = (*p1 + *p2 + *p3 + *p4) / 4.0f; } }; struct downsample_rgb_functor { downsample_rgb_functor(const uint8_t *src, int src_width, int num_of_channels, uint8_t *dst, int dst_width) : src_(src), src_width_(src_width), num_of_channels_(num_of_channels), dst_(dst), dst_width_(dst_width){}; const uint8_t *src_; const int src_width_; const int num_of_channels_; uint8_t *dst_; const int dst_width_; __device__ void operator()(size_t idx) { const int y = idx / dst_width_; const int x = idx % dst_width_; for (int c = 0; c < num_of_channels_; ++c) { int p1 = (int)(*(src_ + (y * 2 * src_width_ + x * 2) * 3 + c)); int p2 = (int)(*(src_ + (y * 2 * src_width_ + x * 2 + 1) * 3 + c)); int p3 = (int)(*(src_ + ((y * 2 + 1) * src_width_ + x * 2) * 3) + c); int p4 = (int)(*(src_ + ((y * 2 + 1) * src_width_ + x * 2 + 1) * 3 + c)); uint8_t *p = dst_ + idx * 3 + c; *p = (uint8_t)((p1 + p2 + p3 + p4) / 4); } } }; struct filter_horizontal_float_functor { filter_horizontal_float_functor(const uint8_t *src, int width, const float *kernel, int half_kernel_size, uint8_t *dst) : src_(src), width_(width), kernel_(kernel), half_kernel_size_(half_kernel_size), dst_(dst){}; const uint8_t *src_; const int width_; const float *kernel_; const int half_kernel_size_; uint8_t *dst_; __device__ void operator()(size_t idx) { const int y = idx / width_; const int x = idx % width_; float *po = (float *)(dst_ + idx * sizeof(float)); float temp = 0; for (int i = -half_kernel_size_; i <= half_kernel_size_; i++) { int x_shift = min(max(0, x + i), width_ - 1); float *pi = (float *)(src_ + (y * width_ + x_shift) * sizeof(float)); temp += (*pi * kernel_[i + half_kernel_size_]); } *po = temp; } }; struct filter_horizontal_rgb_functor { filter_horizontal_rgb_functor(const uint8_t *src, int width, int num_of_channels, const float *kernel, int half_kernel_size, uint8_t *dst) : src_(src), width_(width), num_of_channels_(num_of_channels), kernel_(kernel), half_kernel_size_(half_kernel_size), dst_(dst){}; const uint8_t *src_; const int width_; const int num_of_channels_; const float *kernel_; const int half_kernel_size_; uint8_t *dst_; __device__ void operator()(size_t idx) { const int y = idx / width_; const int x = idx % width_; for (int c = 0; c < num_of_channels_; ++c) { uint8_t *po = dst_ + idx * num_of_channels_ + c; float temp = 0; for (int i = -half_kernel_size_; i <= half_kernel_size_; i++) { int x_shift = min(max(0, x + i), width_ - 1); const uint8_t *pi = src_ + (y * width_ + x_shift) * num_of_channels_ + c; temp += (*pi * kernel_[i + half_kernel_size_]); } *po = __float2uint_ru(temp); } } }; struct vertical_flip_functor { vertical_flip_functor(const uint8_t *src, int width, int height, int bytes_per_pixel, uint8_t *dst) : src_(src), width_(width), height_(height), bytes_per_pixel_(bytes_per_pixel), dst_(dst){}; const uint8_t *src_; const int width_; const int height_; const int bytes_per_pixel_; uint8_t *dst_; __device__ void operator()(size_t idx) { const int y = idx / width_; const int x = idx % width_; memcpy(&dst_[((height_ - y - 1) * width_ + x) * bytes_per_pixel_], &src_[idx * bytes_per_pixel_], bytes_per_pixel_ * sizeof(uint8_t)); } }; struct horizontal_flip_functor { horizontal_flip_functor(const uint8_t *src, int width, int bytes_per_pixel, uint8_t *dst) : src_(src), width_(width), bytes_per_pixel_(bytes_per_pixel), dst_(dst){}; const uint8_t *src_; const int width_; const int bytes_per_pixel_; uint8_t *dst_; __device__ void operator()(size_t idx) { const int y = idx / width_; const int x = idx % width_; memcpy(&dst_[(y * width_ + (width_ - x - 1)) * bytes_per_pixel_], &src_[idx * bytes_per_pixel_], bytes_per_pixel_ * sizeof(uint8_t)); } }; struct bilateral_filter_functor { bilateral_filter_functor(const uint8_t *src, int width, int height, int diameter, float sigma_color, const float* gaussian_const, uint8_t *dst) : src_(src), width_(width), height_(height), diameter_(diameter), sigma_color_(sigma_color), gaussian_const_(gaussian_const), dst_(dst){}; const uint8_t *src_; const int width_; const int height_; const int diameter_; const float sigma_color_; const float* gaussian_const_; uint8_t *dst_; __device__ float gaussian(float x, float sig) const { return expf(-(x * x) / (2.0f * sig * sig)); } __device__ void operator() (size_t idx) { const int y = idx / width_; const int x = idx % width_; float filtered = 0; float total_w = 0; const float center_p = *(float *)(src_ + idx * sizeof(float)); for (int dy = -diameter_; dy <= diameter_; dy++) { for (int dx = -diameter_; dx <= diameter_; dx++) { const int my = min(max(0, y + dy), height_); const int mx = min(max(0, x + dx), width_); const float cur_p = *(float *)(src_ + (my * width_ + mx) * sizeof(float)); const float w = gaussian_const_[dy + diameter_] * gaussian_const_[dx + diameter_] * gaussian(center_p - cur_p, sigma_color_); filtered += w * cur_p; total_w += w; } } float* p = (float *)(dst_ + idx * sizeof(float)); *p = filtered / total_w; } }; struct depth_to_float_functor { depth_to_float_functor(int depth_scale, int depth_trunc) : depth_scale_(depth_scale), depth_trunc_(depth_trunc) {}; const int depth_scale_; const int depth_trunc_; __device__ void operator()(float& f) { f /= (float)depth_scale_; if (f >= depth_trunc_) f = 0.0f; } }; } // namespace Image::Image() : GeometryBaseNoTrans2D(Geometry::GeometryType::Image) {} Image::~Image() {} Image::Image(const Image& other) : GeometryBaseNoTrans2D(Geometry::GeometryType::Image), width_(other.width_), height_(other.height_), num_of_channels_(other.num_of_channels_), bytes_per_channel_(other.bytes_per_channel_), data_(other.data_) {} Image& Image::operator=(const Image& other) { width_ = other.width_; height_ = other.height_; num_of_channels_ = other.num_of_channels_; bytes_per_channel_ = other.bytes_per_channel_; data_ = other.data_; return *this; } Image &Image::Clear() { width_ = 0; height_ = 0; num_of_channels_ = 0; bytes_per_channel_ = 0; data_.clear(); return *this; } bool Image::IsEmpty() const { return !HasData(); } Eigen::Vector2f Image::GetMinBound() const { return Eigen::Vector2f(0.0, 0.0); } Eigen::Vector2f Image::GetMaxBound() const { return Eigen::Vector2f(width_, height_); } Eigen::Vector2f Image::GetCenter() const { return Eigen::Vector2f(width_ / 2, height_ / 2); } thrust::host_vector<uint8_t> Image::GetData() const { thrust::host_vector<uint8_t> data = data_; return data; } void Image::SetData(const thrust::host_vector<uint8_t> &data) { data_ = data; } bool Image::TestImageBoundary(float u, float v, float inner_margin /* = 0.0 */) const { return (u >= inner_margin && u < width_ - inner_margin && v >= inner_margin && v < height_ - inner_margin); } std::pair<bool, float> Image::FloatValueAt(float u, float v) const { auto output = geometry::FloatValueAt(thrust::raw_pointer_cast(data_.data()), u, v, width_, height_, num_of_channels_, bytes_per_channel_); return std::make_pair(output.first, output.second); } std::shared_ptr<Image> Image::ConvertDepthToFloatImage( float depth_scale /* = 1000.0*/, float depth_trunc /* = 3.0*/) const { // don't need warning message about image type // as we call CreateFloatImage auto output = CreateFloatImage(); depth_to_float_functor func(depth_scale, depth_trunc); float* pt = (float*)thrust::raw_pointer_cast(output->data_.data()); for_each(thrust::device, pt, pt + (width_ * height_), func); return output; } Image &Image::ClipIntensity(float min /* = 0.0*/, float max /* = 1.0*/) { if (num_of_channels_ != 1 || bytes_per_channel_ != 4) { utility::LogError("[ClipIntensity] Unsupported image format."); return *this; } clip_intensity_functor func(min, max); float* pt = (float*)thrust::raw_pointer_cast(data_.data()); thrust::for_each(thrust::device, pt, pt + (width_ * height_), func); return *this; } Image &Image::LinearTransform(float scale, float offset /* = 0.0*/) { if (num_of_channels_ != 1 || bytes_per_channel_ != 4) { utility::LogError("[LinearTransform] Unsupported image format."); return *this; } linear_transform_functor func(scale, offset); float* pt = (float*)thrust::raw_pointer_cast(data_.data()); thrust::for_each(thrust::device, pt, pt + (width_ * height_), func); return *this; } std::shared_ptr<Image> Image::Downsample() const { auto output = std::make_shared<Image>(); if ((num_of_channels_ != 1 || bytes_per_channel_ != 4) && (num_of_channels_ != 3 || bytes_per_channel_ != 1)) { utility::LogError("[Downsample] Unsupported image format."); return output; } int half_width = (int)floor((float)width_ / 2.0); int half_height = (int)floor((float)height_ / 2.0); output->Prepare(half_width, half_height, num_of_channels_, bytes_per_channel_); if (num_of_channels_ == 1) { downsample_float_functor func(thrust::raw_pointer_cast(data_.data()), width_, thrust::raw_pointer_cast(output->data_.data()), output->width_); thrust::for_each(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>(output->width_ * output->height_), func); } else { downsample_rgb_functor func(thrust::raw_pointer_cast(data_.data()), width_, num_of_channels_, thrust::raw_pointer_cast(output->data_.data()), output->width_); thrust::for_each(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>(output->width_ * output->height_), func); } return output; } std::shared_ptr<Image> Image::FilterHorizontal( const utility::device_vector<float> &kernel) const { auto output = std::make_shared<Image>(); if ((num_of_channels_ != 1 || bytes_per_channel_ != 4) && (num_of_channels_ != 3 || bytes_per_channel_ != 1) || kernel.size() % 2 != 1) { utility::LogError( "[FilterHorizontal] Unsupported image format or kernel " "size."); } output->Prepare(width_, height_, 1, 4); const int half_kernel_size = (int)(floor((float)kernel.size() / 2.0)); if (num_of_channels_ == 1) { filter_horizontal_float_functor func( thrust::raw_pointer_cast(data_.data()), width_, thrust::raw_pointer_cast(kernel.data()), half_kernel_size, thrust::raw_pointer_cast(output->data_.data())); thrust::for_each(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>(width_ * height_), func); } else { filter_horizontal_rgb_functor func( thrust::raw_pointer_cast(data_.data()), width_, num_of_channels_, thrust::raw_pointer_cast(kernel.data()), half_kernel_size, thrust::raw_pointer_cast(output->data_.data())); thrust::for_each(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>(width_ * height_), func); } return output; } std::shared_ptr<Image> Image::Filter(Image::FilterType type) const { auto output = std::make_shared<Image>(); if (num_of_channels_ != 1 || bytes_per_channel_ != 4) { utility::LogError("[Filter] Unsupported image format."); return output; } auto kernels = GetFilterKernel(type); output = Filter(kernels.first, kernels.second); return output; } ImagePyramid Image::FilterPyramid(const ImagePyramid &input, Image::FilterType type) { std::vector<std::shared_ptr<Image>> output; for (size_t i = 0; i < input.size(); i++) { auto layer_filtered = input[i]->Filter(type); output.push_back(layer_filtered); } return output; } ImagePyramid Image::BilateralFilterPyramid(const ImagePyramid &input, int diameter, float sigma_color, float sigma_space) { std::vector<std::shared_ptr<Image>> output; for (size_t i = 0; i < input.size(); i++) { auto layer_filtered = input[i]->BilateralFilter(diameter, sigma_color, sigma_space); output.push_back(layer_filtered); } return output; } std::shared_ptr<Image> Image::Filter( const utility::device_vector<float> &dx, const utility::device_vector<float> &dy) const { auto output = std::make_shared<Image>(); if (num_of_channels_ != 1 || bytes_per_channel_ != 4) { utility::LogError("[Filter] Unsupported image format."); return output; } auto temp1 = FilterHorizontal(dx); auto temp2 = temp1->Transpose(); auto temp3 = temp2->FilterHorizontal(dy); auto temp4 = temp3->Transpose(); return temp4; } std::shared_ptr<Image> Image::Transpose() const { auto output = std::make_shared<Image>(); output->Prepare(height_, width_, num_of_channels_, bytes_per_channel_); int out_bytes_per_line = output->BytesPerLine(); int in_bytes_per_line = BytesPerLine(); int bytes_per_pixel = num_of_channels_ * bytes_per_channel_; transpose_functor func(thrust::raw_pointer_cast(data_.data()), width_, in_bytes_per_line, out_bytes_per_line, bytes_per_pixel, thrust::raw_pointer_cast(output->data_.data())); thrust::for_each(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>(width_ * height_), func); return output; } std::shared_ptr<Image> Image::FlipVertical() const { auto output = std::make_shared<Image>(); output->Prepare(width_, height_, num_of_channels_, bytes_per_channel_); vertical_flip_functor func(thrust::raw_pointer_cast(data_.data()), width_, height_, num_of_channels_ * bytes_per_channel_, thrust::raw_pointer_cast(output->data_.data())); thrust::for_each(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>(width_ * height_), func); return output; } std::shared_ptr<Image> Image::FlipHorizontal() const { auto output = std::make_shared<Image>(); output->Prepare(width_, height_, num_of_channels_, bytes_per_channel_); horizontal_flip_functor func( thrust::raw_pointer_cast(data_.data()), width_, num_of_channels_ * bytes_per_channel_, thrust::raw_pointer_cast(output->data_.data())); thrust::for_each(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>(width_ * height_), func); return output; } std::shared_ptr<Image> Image::BilateralFilter( int diameter, float sigma_color, float sigma_space) const { auto output = std::make_shared<Image>(); if (diameter >= 64) { utility::LogError("[BilateralFilter] Diameter should be less than 64."); return output; } if (num_of_channels_ != 1 || bytes_per_channel_ != 4) { utility::LogError("[BilateralFilter] Unsupported image format."); return output; } output->Prepare(width_, height_, num_of_channels_, bytes_per_channel_); float fgaussian[64]; const float sigma2 = sigma_space * sigma_space; for (int i = 0; i < 2 * diameter + 1; i++) { const float x = i - diameter; fgaussian[i] = std::exp(-(x * x) / (2 * sigma2)); } utility::device_vector<float> gaussian_const(fgaussian, fgaussian + 64); bilateral_filter_functor func( thrust::raw_pointer_cast(data_.data()), width_, height_, diameter, sigma_color, thrust::raw_pointer_cast(gaussian_const.data()), thrust::raw_pointer_cast(output->data_.data())); thrust::for_each(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>(width_ * height_), func); return output; } void Image::AllocateDataBuffer() { data_.resize(width_ * height_ * num_of_channels_ * bytes_per_channel_); }
4af5e825067760c89b573cc3803a5fbf08a7b873.hip
// !!! This is a file automatically generated by hipify!!! #include "funset.hpp" #include <iostream> #include <algorithm> #include <memory> #include <vector> #include <hip/hip_runtime.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <device_launch_parameters.h> #include "common.hpp" /* __global__: ;;,3.2 ;void;, ;, gridblock,(<<< >>>); a kernel,(GPUCUDAkernel( ),__global__); */ __global__ static void calculate_histogram(const unsigned char* data, int length, unsigned int* hist) { /* __shared__: __shared____device__ blockblock block__shared____constant__ __shared__extern __shared__CUDA C __shared__CUDA C */ // clear out the accumulation buffer called temp since we are launched with // 256 threads, it is easy to clear that memory with one write per thread __shared__ unsigned int temp[256]; // temp[threadIdx.x] = 0; /* __syncthreads: CUDA __syncthreads() __syncthreads();block(shared memory)(kernel __syncthreads())clock() clock() __syncthreads()block threadblock thread */ __syncthreads(); /* gridDim: ,,, ,,. dim3 blockDim: ,block.dim3, block;,, ; blockIdx: ,; threadblockgrid,blockIdx.x [0,gridDim.x-1],blockIdx.y[0, gridDim.y-1].uint3, blockgrid; threadIdx: ,; threadblock;threadIdx.x, threadIdx.y,threadIdx.z;uint3 ,threadblock */ // calculate the starting index and the offset to the next block that each thread will be processing int i = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; while (i < length) { /* atomicAdd: , addr(atomic function)3264 read-modify-write atomicAdd(addr,y) addryaddr */ atomicAdd(&temp[data[i]], 1); i += stride; } // sync the data from the above writes to shared memory then add the shared memory values to the values from // the other thread blocks using global memory atomic adds same as before, since we have 256 threads, // updating the global histogram is just one write per thread! __syncthreads(); // atomicAdd(&(hist[threadIdx.x]), temp[threadIdx.x]); } int calculate_histogram_gpu(const unsigned char* data, int length, unsigned int* hist, unsigned int& value, float* elapsed_time) { /* hipEvent_t: CUDA event types,, CUDA,GPU ,CUDAGPU,CUDA GPU, */ hipEvent_t start, stop; // hipEventCreate: , hipEventCreate(&start); hipEventCreate(&stop); // hipEventRecord: ,,start hipEventRecord(start, 0); unsigned char* dev_buffer{ nullptr }; unsigned int* dev_hist{ nullptr }; // hipMalloc: hipMalloc(&dev_buffer, length); hipMalloc(&dev_hist, 256 * sizeof(unsigned int)); /* hipMemcpy: ,: (1). hipMemcpyHostToHost: (2). hipMemcpyHostToDevice: (3). hipMemcpyDeviceToHost: (4). hipMemcpyDeviceToDevice: (5). hipMemcpyDefault: , (CUDA6.0) cudaMemcpy */ hipMemcpy(dev_buffer, data, length, hipMemcpyHostToDevice); /* hipMemset: ,GPU */ hipMemset(dev_hist, 0, 256 * sizeof(unsigned int)); // hipDeviceProp_t: cuda // kernel launch - 2x the number of mps gave best timing hipDeviceProp_t prop; // hipGetDeviceProperties: GPU hipGetDeviceProperties(&prop, 0); // hipDeviceProp_t::multiProcessorCount: int blocks = prop.multiProcessorCount; fprintf(stderr, "multiProcessorCount: %d\n", blocks); /* <<< >>>: CUDA,, CUDA,, ;, ,, ;; kernel,kernel, GPU,; API,<<<Dg,Db,Ns,S>>> ,Dgdim3,grid .Dg,gridDg.x*Dg.y*Dg.zblock;Db dim3,block.Db, blockDb.x*Db.y*Db.zthread;Nsunsigned int, , (extern __shared__);Ns,0;S cudaStream_t,.S,0. */ // GPU2 calculate_histogram << <blocks * 2, 256 >> >(dev_buffer, length, dev_hist); hipMemcpy(hist, dev_hist, 256 * sizeof(unsigned int), hipMemcpyDeviceToHost); value = 0; for (int i = 0; i < 256; ++i) { value += hist[i]; } // hipFree: cudaMalloc hipFree(dev_buffer); hipFree(dev_hist); // hipEventRecord: ,,stop hipEventRecord(stop, 0); // hipEventSynchronize: ,, hipEventSynchronize(stop); // cudaEventElapseTime: ,, hipEventElapsedTime(elapsed_time, start, stop); // hipEventDestroy: , hipEventDestroy(start); hipEventDestroy(stop); return 0; }
4af5e825067760c89b573cc3803a5fbf08a7b873.cu
#include "funset.hpp" #include <iostream> #include <algorithm> #include <memory> #include <vector> #include <cuda_runtime.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <device_launch_parameters.h> #include "common.hpp" /* __global__: 函数类型限定符;在设备上运行;在主机端调用,计算能力3.2及以上可以在 设备端调用;声明的函数的返回值必须是void类型;对此类型函数的调用是异步的,即在 设备完全完成它的运行之前就返回了;对此类型函数的调用必须指定执行配置,即用于在 设备上执行函数时的grid和block的维度,以及相关的流(即插入<<< >>>运算符); a kernel,表示此函数为内核函数(运行在GPU上的CUDA并行计算函数称为kernel(内核函 数),内核函数必须通过__global__函数类型限定符定义); */ __global__ static void calculate_histogram(const unsigned char* data, int length, unsigned int* hist) { /* __shared__: 变量类型限定符;使用__shared__限定符,或者与__device__限 定符连用,此时声明的变量位于block中的共享存储器空间中,与block具有相同 的生命周期,仅可通过block内的所有线程访问;__shared__和__constant__变量 默认为是静态存储;在__shared__前可以加extern关键字,但表示的是变量大小 由执行参数确定;__shared__变量在声明时不能初始化;可以将CUDA C的关键字 __shared__添加到变量声明中,这将使这个变量驻留在共享内存中;CUDA C编译 器对共享内存中的变量与普通变量将分别采取不同的处理方式 */ // clear out the accumulation buffer called temp since we are launched with // 256 threads, it is easy to clear that memory with one write per thread __shared__ unsigned int temp[256]; // 共享内存缓冲区 temp[threadIdx.x] = 0; /* __syncthreads: 对线程块中的线程进行同步;CUDA架构将确保,除非线程块 中的每个线程都执行了__syncthreads(),否则没有任何线程能执行 __syncthreads()之后的指令;在同一个block中的线程通过共享存储器(shared memory)交换数据,并通过栅栏同步(可以在kernel函数中需要同步的位置调用 __syncthreads()函数)保证线程间能够正确地共享数据;使用clock()函数计时, 在内核函数中要测量的一段代码的开始和结束的位置分别调用一次clock()函数, 并将结果记录下来。由于调用__syncthreads()函数后,一个block中的所有 thread需要的时间是相同的,因此只需要记录每个block执行需要的时间就行了, 而不需要记录每个thread的时间 */ __syncthreads(); /* gridDim: 内置变量,用于描述线程网格的维度,对于所有线程块来说,这个 变量是一个常数,用来保存线程格每一维的大小,即每个线程格中线程块的数量. 为dim3类型; blockDim: 内置变量,用于说明每个block的维度与尺寸.为dim3类型,包含 了block在三个维度上的尺寸信息;对于所有线程块来说,这个变量是一个常数, 保存的是线程块中每一维的线程数量; blockIdx: 内置变量,变量中包含的值就是当前执行设备代码的线程块的索引;用 于说明当前thread所在的block在整个grid中的位置,blockIdx.x取值范围是 [0,gridDim.x-1],blockIdx.y取值范围是[0, gridDim.y-1].为uint3类型, 包含了一个block在grid中各个维度上的索引信息; threadIdx: 内置变量,变量中包含的值就是当前执行设备代码的线程索引;用于 说明当前thread在block中的位置;如果线程是一维的可获取threadIdx.x,如果 是二维的还可获取threadIdx.y,如果是三维的还可获取threadIdx.z;为uint3类 型,包含了一个thread在block中各个维度的索引信息 */ // calculate the starting index and the offset to the next block that each thread will be processing int i = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; while (i < length) { /* atomicAdd: 原子操作,底层硬件将确保当执行这些原子操作时,其 它任何线程都不会读取或写入地址addr上的值。原子函数(atomic function)对位于全局或共享存储器的一个32位或64位字执行 read-modify-write的原子操作。也就是说,当多个线程同时访问全局或 共享存储器的同一位置时,保证每个线程能够实现对共享可写数据的互 斥操作:在一个操作完成之前,其它任何线程都无法访问此地址。之所 以将这一过程称为原子操作,是因为每个线程的操作都不会影响到其它 线程。换句话说,原子操作能够保证对一个地址的当前操作完成之前, 其它线程都不能访问这个地址。 atomicAdd(addr,y):将生成一个原子的操作序列,这个操作序列包括读 取地址addr处的值,将y增加到这个值,以及将结果保存回地址addr。 */ atomicAdd(&temp[data[i]], 1); i += stride; } // sync the data from the above writes to shared memory then add the shared memory values to the values from // the other thread blocks using global memory atomic adds same as before, since we have 256 threads, // updating the global histogram is just one write per thread! __syncthreads(); // 将每个线程块的直方图合并为单个最终的直方图 atomicAdd(&(hist[threadIdx.x]), temp[threadIdx.x]); } int calculate_histogram_gpu(const unsigned char* data, int length, unsigned int* hist, unsigned int& value, float* elapsed_time) { /* cudaEvent_t: CUDA event types,结构体类型, CUDA事件,用于测量GPU在某 个任务上花费的时间,CUDA中的事件本质上是一个GPU时间戳,由于CUDA事件是在 GPU上实现的,因此它们不适于对同时包含设备代码和主机代码的混合代码计时 */ cudaEvent_t start, stop; // cudaEventCreate: 创建一个事件对象,异步启动 cudaEventCreate(&start); cudaEventCreate(&stop); // cudaEventRecord: 记录一个事件,异步启动,start记录起始时间 cudaEventRecord(start, 0); unsigned char* dev_buffer{ nullptr }; unsigned int* dev_hist{ nullptr }; // cudaMalloc: 在设备端分配内存 cudaMalloc(&dev_buffer, length); cudaMalloc(&dev_hist, 256 * sizeof(unsigned int)); /* cudaMemcpy: 在主机端和设备端拷贝数据,此函数第四个参数仅能是下面之一: (1). cudaMemcpyHostToHost: 拷贝数据从主机端到主机端 (2). cudaMemcpyHostToDevice: 拷贝数据从主机端到设备端 (3). cudaMemcpyDeviceToHost: 拷贝数据从设备端到主机端 (4). cudaMemcpyDeviceToDevice: 拷贝数据从设备端到设备端 (5). cudaMemcpyDefault: 从指针值自动推断拷贝数据方向,需要支持 统一虚拟寻址(CUDA6.0及以上版本) cudaMemcpy函数对于主机是同步的 */ cudaMemcpy(dev_buffer, data, length, cudaMemcpyHostToDevice); /* cudaMemset: 存储器初始化函数,在GPU内存上执行。用指定的值初始化或设置 设备内存 */ cudaMemset(dev_hist, 0, 256 * sizeof(unsigned int)); // cudaDeviceProp: cuda设备属性结构体 // kernel launch - 2x the number of mps gave best timing cudaDeviceProp prop; // cudaGetDeviceProperties: 获取GPU设备相关信息 cudaGetDeviceProperties(&prop, 0); // cudaDeviceProp::multiProcessorCount: 设备上多处理器的数量 int blocks = prop.multiProcessorCount; fprintf(stderr, "multiProcessorCount: %d\n", blocks); /* <<< >>>: 为CUDA引入的运算符,指定线程网格和线程块维度等,传递执行参 数给CUDA编译器和运行时系统,用于说明内核函数中的线程数量,以及线程是如何 组织的;尖括号中这些参数并不是传递给设备代码的参数,而是告诉运行时如何 启动设备代码,传递给设备代码本身的参数是放在圆括号中传递的,就像标准的函 数调用一样;不同计算能力的设备对线程的总数和组织方式有不同的约束;必须 先为kernel中用到的数组或变量分配好足够的空间,再调用kernel函数,否则在 GPU计算时会发生错误,例如越界等; 使用运行时API时,需要在调用的内核函数名与参数列表直接以<<<Dg,Db,Ns,S>>> 的形式设置执行配置,其中:Dg是一个dim3型变量,用于设置grid的维度和各个 维度上的尺寸.设置好Dg后,grid中将有Dg.x*Dg.y*Dg.z个block;Db是 一个dim3型变量,用于设置block的维度和各个维度上的尺寸.设置好Db后,每个 block中将有Db.x*Db.y*Db.z个thread;Ns是一个unsigned int型变量,指定各块为此调 用动态分配的共享存储器大小,这些动态分配的存储器可供声明为外部数组 (extern __shared__)的其他任何变量使用;Ns是一个可选参数,默认值为0;S为 cudaStream_t类型,用于设置与内核函数关联的流.S是一个可选参数,默认值0. */ // 当线程块的数量为GPU中处理器数量的2倍时,将达到最优性能 calculate_histogram << <blocks * 2, 256 >> >(dev_buffer, length, dev_hist); cudaMemcpy(hist, dev_hist, 256 * sizeof(unsigned int), cudaMemcpyDeviceToHost); value = 0; for (int i = 0; i < 256; ++i) { value += hist[i]; } // cudaFree: 释放设备上由cudaMalloc函数分配的内存 cudaFree(dev_buffer); cudaFree(dev_hist); // cudaEventRecord: 记录一个事件,异步启动,stop记录结束时间 cudaEventRecord(stop, 0); // cudaEventSynchronize: 事件同步,等待一个事件完成,异步启动 cudaEventSynchronize(stop); // cudaEventElapseTime: 计算两个事件之间经历的时间,单位为毫秒,异步启动 cudaEventElapsedTime(elapsed_time, start, stop); // cudaEventDestroy: 销毁事件对象,异步启动 cudaEventDestroy(start); cudaEventDestroy(stop); return 0; }
fdced60646b78ff9ce06bf7a0a2599100bcb7f04.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" /* * JCudaVec - Vector operations for JCuda * http://www.jcuda.org * * Copyright (c) 2013-2015 Marco Hutter - http://www.jcuda.org */ extern "C" //=== Vector arithmetic ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar arithmetic =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector comparison ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar comparison =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector math (one argument) ============================================= // Calculate the arc cosine of the input argument. extern "C" // Calculate the nonnegative arc hyperbolic cosine of the input argument. extern "C" // Calculate the arc sine of the input argument. extern "C" // Calculate the arc hyperbolic sine of the input argument. extern "C" // Calculate the arc tangent of the input argument. extern "C" // Calculate the arc hyperbolic tangent of the input argument. extern "C" // Calculate the cube root of the input argument. extern "C" // Calculate ceiling of the input argument. extern "C" // Calculate the cosine of the input argument. extern "C" // Calculate the hyperbolic cosine of the input argument. extern "C" // Calculate the cosine of the input argument p . extern "C" // Calculate the complementary error function of the input argument. extern "C" // Calculate the inverse complementary error function of the input argument. extern "C" // Calculate the scaled complementary error function of the input argument. extern "C" // Calculate the error function of the input argument. extern "C" // Calculate the inverse error function of the input argument. extern "C" // Calculate the base 10 exponential of the input argument. extern "C" // Calculate the base 2 exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument, minus 1. extern "C" // Calculate the absolute value of its argument. extern "C" // Calculate the largest integer less than or equal to x. extern "C" // Calculate the value of the Bessel function of the first kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the first kind of order 1 for the input argument. extern "C" // Calculate the natural logarithm of the absolute value of the gamma function of the input argument. extern "C" // Calculate the base 10 logarithm of the input argument. extern "C" // Calculate the value of l o g e ( 1 + x ) . extern "C" // Calculate the base 2 logarithm of the input argument. extern "C" // Calculate the floating point representation of the exponent of the input argument. extern "C" // Calculate the natural logarithm of the input argument. extern "C" // Calculate the standard normal cumulative distribution function. extern "C" // Calculate the inverse of the standard normal cumulative distribution function. extern "C" // Calculate reciprocal cube root function. extern "C" // Round input to nearest integer value in floating-point. extern "C" // Round to nearest integer value in floating-point. extern "C" // Calculate the reciprocal of the square root of the input argument. extern "C" // Calculate the sine of the input argument. extern "C" // Calculate the hyperbolic sine of the input argument. extern "C" // Calculate the sine of the input argument p . extern "C" // Calculate the square root of the input argument. extern "C" // Calculate the tangent of the input argument. extern "C" // Calculate the hyperbolic tangent of the input argument. extern "C" // Calculate the gamma function of the input argument. extern "C" // Truncate input argument to the integral part. extern "C" // Calculate the value of the Bessel function of the second kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the second kind of order 1 for the input argument. extern "C" //=== Vector math (two arguments) ============================================ // Create value with given magnitude, copying sign of second value. extern "C" // Compute the positive difference between x and y. extern "C" // Divide two floating point values. extern "C" // Determine the maximum numeric value of the arguments. extern "C" // Determine the minimum numeric value of the arguments. extern "C" // Calculate the floating-point remainder of x / y. extern "C" // Calculate the square root of the sum of squares of two arguments. extern "C" // Return next representable single-precision floating-point value afer argument. extern "C" // Calculate the value of first argument to the power of second argument. extern "C" // Compute single-precision floating-point remainder. extern "C" __global__ void vec_gtf (size_t n, float *result, float *x, float *y) { int id = threadIdx.x + blockIdx.x * blockDim.x; if (id < n) { result[id] = (x[id] > y[id])?1.0f:0.0f; } }
fdced60646b78ff9ce06bf7a0a2599100bcb7f04.cu
#include "includes.h" /* * JCudaVec - Vector operations for JCuda * http://www.jcuda.org * * Copyright (c) 2013-2015 Marco Hutter - http://www.jcuda.org */ extern "C" //=== Vector arithmetic ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar arithmetic =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector comparison ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar comparison =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector math (one argument) ============================================= // Calculate the arc cosine of the input argument. extern "C" // Calculate the nonnegative arc hyperbolic cosine of the input argument. extern "C" // Calculate the arc sine of the input argument. extern "C" // Calculate the arc hyperbolic sine of the input argument. extern "C" // Calculate the arc tangent of the input argument. extern "C" // Calculate the arc hyperbolic tangent of the input argument. extern "C" // Calculate the cube root of the input argument. extern "C" // Calculate ceiling of the input argument. extern "C" // Calculate the cosine of the input argument. extern "C" // Calculate the hyperbolic cosine of the input argument. extern "C" // Calculate the cosine of the input argument × p . extern "C" // Calculate the complementary error function of the input argument. extern "C" // Calculate the inverse complementary error function of the input argument. extern "C" // Calculate the scaled complementary error function of the input argument. extern "C" // Calculate the error function of the input argument. extern "C" // Calculate the inverse error function of the input argument. extern "C" // Calculate the base 10 exponential of the input argument. extern "C" // Calculate the base 2 exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument, minus 1. extern "C" // Calculate the absolute value of its argument. extern "C" // Calculate the largest integer less than or equal to x. extern "C" // Calculate the value of the Bessel function of the first kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the first kind of order 1 for the input argument. extern "C" // Calculate the natural logarithm of the absolute value of the gamma function of the input argument. extern "C" // Calculate the base 10 logarithm of the input argument. extern "C" // Calculate the value of l o g e ( 1 + x ) . extern "C" // Calculate the base 2 logarithm of the input argument. extern "C" // Calculate the floating point representation of the exponent of the input argument. extern "C" // Calculate the natural logarithm of the input argument. extern "C" // Calculate the standard normal cumulative distribution function. extern "C" // Calculate the inverse of the standard normal cumulative distribution function. extern "C" // Calculate reciprocal cube root function. extern "C" // Round input to nearest integer value in floating-point. extern "C" // Round to nearest integer value in floating-point. extern "C" // Calculate the reciprocal of the square root of the input argument. extern "C" // Calculate the sine of the input argument. extern "C" // Calculate the hyperbolic sine of the input argument. extern "C" // Calculate the sine of the input argument × p . extern "C" // Calculate the square root of the input argument. extern "C" // Calculate the tangent of the input argument. extern "C" // Calculate the hyperbolic tangent of the input argument. extern "C" // Calculate the gamma function of the input argument. extern "C" // Truncate input argument to the integral part. extern "C" // Calculate the value of the Bessel function of the second kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the second kind of order 1 for the input argument. extern "C" //=== Vector math (two arguments) ============================================ // Create value with given magnitude, copying sign of second value. extern "C" // Compute the positive difference between x and y. extern "C" // Divide two floating point values. extern "C" // Determine the maximum numeric value of the arguments. extern "C" // Determine the minimum numeric value of the arguments. extern "C" // Calculate the floating-point remainder of x / y. extern "C" // Calculate the square root of the sum of squares of two arguments. extern "C" // Return next representable single-precision floating-point value afer argument. extern "C" // Calculate the value of first argument to the power of second argument. extern "C" // Compute single-precision floating-point remainder. extern "C" __global__ void vec_gtf (size_t n, float *result, float *x, float *y) { int id = threadIdx.x + blockIdx.x * blockDim.x; if (id < n) { result[id] = (x[id] > y[id])?1.0f:0.0f; } }
3dff9991c724d3651ab64579bfae12dcfdfd4804.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author GS <[email protected]> // #include <array/NDArrayFactory.h> #include <exceptions/cuda_exception.h> #include <helpers/ConstantTadHelper.h> #include <helpers/PointersManager.h> #include <helpers/ShapeUtils.h> #include <helpers/TAD.h> #include <ops/declarable/helpers/segment.h> #include <ops/declarable/helpers/segment_common.h> namespace sd { namespace ops { namespace helpers { // -------------------------------------------------------------------------------------------------------------- // // Segment ops linear kernels // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static SD_KERNEL void segmentMaxLinearKernel(void* input, sd::LongType const* inputShape, int* starts, int* lengths, sd::LongType numOfClasses, void* output, sd::LongType const* outputShape) { __shared__ T* val; __shared__ sd::LongType xLen, zLen, zIndex; __shared__ T* x; __shared__ T* z; __shared__ int threadsPerSegment, start, finish; auto segment = blockIdx.x; if (threadIdx.x == 0) { // threadsPerSegment = (gridDim.x + numOfClasses - 1) / numOfClasses; // segment = blockIdx.x / threadsPerSegment; x = reinterpret_cast<T*>(input); z = reinterpret_cast<T*>(output); extern __shared__ unsigned char shmem[]; val = reinterpret_cast<T*>(shmem); xLen = shape::length(inputShape); zLen = shape::length(outputShape); if (segment < numOfClasses) { zIndex = shape::getIndexOffset(segment, outputShape); start = starts[segment]; finish = start + lengths[segment]; z[zIndex] = x[shape::getIndexOffset(start, inputShape)]; val[segment] = z[zIndex]; } } __syncthreads(); for (auto e = start + threadIdx.x + 1; e < finish; e += blockDim.x) { auto xIndex = shape::getIndexOffset(e, inputShape); sd::math::atomics::sd_atomicMax(&z[zIndex], x[xIndex]); } } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static SD_KERNEL void unsortedSegmentMaxLinearKernel(void* input, sd::LongType const* inputShape, void* indices, sd::LongType const* indicesShape, int* starts, int* lengths, sd::LongType numOfClasses, void* output, sd::LongType const* outputShape) { __shared__ T* val; __shared__ sd::LongType xLen, zLen, zIndex; __shared__ T* x; __shared__ T* z; __shared__ I* y; // int threadsPerSegment, start, finish; auto segment = blockIdx.x; if (threadIdx.x == 0) { x = reinterpret_cast<T*>(input); z = reinterpret_cast<T*>(output); y = reinterpret_cast<I*>(indices); xLen = shape::length(inputShape); zLen = shape::length(outputShape); zIndex = shape::getIndexOffset(segment, outputShape); // start = starts[segment]; // finish = start + lengths[segment]; if (lengths[segment] > 0) z[zIndex] = x[shape::getIndexOffset(starts[segment], inputShape)]; else z[zIndex] = -DataTypeUtils::max<T>(); } __syncthreads(); if (lengths[segment] > 0) for (auto e = threadIdx.x + 1; e < xLen; e += blockDim.x) { auto xIndex = shape::getIndexOffset(e, inputShape); auto yIndex = shape::getIndexOffset(e, indicesShape); if (y[yIndex] == segment) { sd::math::atomics::sd_atomicMax(&z[zIndex], x[xIndex]); } } } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static SD_KERNEL void segmentMaxTadKernel(void* inputBuf, sd::LongType const* inputShape, sd::LongType const* inputTads, sd::LongType const* inputTadOffsets, I* indices, int* starts, int* lengths, sd::LongType numOfClasses, void* outputBuf, sd::LongType const* outputShape, sd::LongType const* outputTads, sd::LongType const* outputTadOffsets, T filler = 0) { __shared__ T* val; __shared__ sd::LongType len, zIndex, total; __shared__ T* z; __shared__ int start, finish; __shared__ I segment; if (threadIdx.x == 0) { segment = indices[blockIdx.x]; // / threadsPerSegment; z = reinterpret_cast<T*>(outputBuf) + outputTadOffsets[segment]; len = shape::length(inputTads); start = starts[segment]; finish = start + lengths[segment]; total = shape::sizeAt(inputShape, 0); } __syncthreads(); auto idx = blockIdx.x; if (idx <= total) { auto x = reinterpret_cast<T*>(inputBuf) + inputTadOffsets[idx]; if (blockIdx.x == start) { for (auto e = threadIdx.x; e < len; e += blockDim.x) { auto xIndex = shape::getIndexOffset(e, inputTads); auto zIndex = shape::getIndexOffset(e, outputTads); sd::math::atomics::sd_atomicMax(&z[zIndex], x[xIndex]); // z[zIndex] = x[xIndex]; } } else { for (auto e = threadIdx.x; e < len; e += blockDim.x) { auto xIndex = shape::getIndexOffset(e, inputTads); auto zIndex = shape::getIndexOffset(e, outputTads); if (lengths[segment]) sd::math::atomics::sd_atomicMax(&z[zIndex], x[xIndex]); } } } } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static void segmentMaxFunctor_(LaunchContext* context, NDArray* input, NDArray* indices, NDArray* output) { // int numClasses = output->sizeAt(0); // if input is a vector: (as if in doc sample) // sd::LongType idx = indices->e<sd::LongType>(0); output->assign(-DataTypeUtils::infOrMax<T>()); auto stream = context->getCudaStream(); indices->syncToHost(); sd::LongType numOfClasses = indices->e<sd::LongType>(indices->lengthOf() - 1) + 1; NDArray classesRangesLens = NDArrayFactory::create<int>('c', {numOfClasses}, context); NDArray classesRangesBegs = NDArrayFactory::create<int>('c', {numOfClasses}, context); classesRangesBegs.assign(indices->lengthOf()); classesRangesLens.assign(0); dim3 dims(256, 512, 256); int* begins = reinterpret_cast<int*>(classesRangesBegs.specialBuffer()); int* lengths = reinterpret_cast<int*>(classesRangesLens.specialBuffer()); fillUpSegments(indices, numOfClasses, classesRangesBegs, classesRangesLens); NDArray::prepareSpecialUse({output}, {input, indices, &classesRangesBegs, &classesRangesLens}); if (input->isVector()) { hipLaunchKernelGGL(( segmentMaxLinearKernel<T, I>), dim3(numOfClasses), dim3(input->lengthOf()), numOfClasses * 32 + 32, *stream, input->specialBuffer(), input->specialShapeInfo(), begins, lengths, numOfClasses, output->specialBuffer(), output->specialShapeInfo()); } else { std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0}); auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions); auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions); auto inputTads = packX.specialShapeInfo(); auto inputTadOffsets = packX.specialOffsets(); auto outputTads = packZ.specialShapeInfo(); auto outputTadOffsets = packZ.specialOffsets(); hipLaunchKernelGGL(( segmentMaxTadKernel<T, I>), dim3(packX.numberOfTads()), dim3(512), 2048, *stream, input->specialBuffer(), input->specialShapeInfo(), inputTads, inputTadOffsets, reinterpret_cast<I*>(indices->specialBuffer()), begins, lengths, numOfClasses, output->specialBuffer(), output->specialShapeInfo(), outputTads, outputTadOffsets); } NDArray::registerSpecialUse({output}, {input, indices, &classesRangesBegs, &classesRangesLens}); } // -------------------------------------------------------------------------------------------------------------- // void segmentMaxFunctor(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* output) { NDArray::prepareSpecialUse({output}, {input, indices}); BUILD_DOUBLE_SELECTOR(input->dataType(), indices->dataType(), segmentMaxFunctor_, (context, input, indices, output), SD_NUMERIC_TYPES, SD_INDEXING_TYPES); NDArray::registerSpecialUse({output}, {input, indices}); } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static void unsortedSegmentMaxFunctor_(sd::LaunchContext* context, NDArray* input, NDArray* indices, sd::LongType numOfClasses, NDArray* output) { auto stream = context->getCudaStream(); // NDArray classes = NDArrayFactory::create<int>('c', {numOfClasses, 2}); output->assign(DataTypeUtils::infOrMax<T>()); NDArray classesRangesBegs = NDArrayFactory::create<int>('c', {numOfClasses}, context); NDArray classesRangesLens = NDArrayFactory::create<int>('c', {numOfClasses}, context); // NDArray row = NDArrayFactory::create<int>('c', {1, 2}, {(int)indices->lengthOf(), (int)0}); // classes.applyTrueBroadcast(sd::BroadcastOpsTuple::Assign(), row, classes); classesRangesBegs.assign(indices->lengthOf()); classesRangesLens.assign(0); dim3 dims(numOfClasses, indices->lengthOf(), numOfClasses * 32 + 32); // int* classesBuf = reinterpret_cast<int*>(classes.specialBuffer()); fillUpSegments(indices, numOfClasses, classesRangesBegs, classesRangesLens); int* begins = reinterpret_cast<int*>(classesRangesBegs.specialBuffer()); int* lengths = reinterpret_cast<int*>(classesRangesLens.specialBuffer()); if (input->isVector()) { hipLaunchKernelGGL(( unsortedSegmentMaxLinearKernel<T, I>), dim3(dims.x), dim3(dims.y), dims.z, *stream, input->specialBuffer(), input->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), begins, lengths, numOfClasses, output->specialBuffer(), output->specialShapeInfo()); } else { std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0}); auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions); auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions); auto inputTads = packX.specialShapeInfo(); auto inputTadOffsets = packX.specialOffsets(); auto outputTads = packZ.specialShapeInfo(); auto outputTadOffsets = packZ.specialOffsets(); dims.x = input->sizeAt(0); output->assign(-DataTypeUtils::max<T>()); hipLaunchKernelGGL(( segmentMaxTadKernel<T, I>), dim3(dims.x), dim3(dims.y), dims.z, *stream, input->specialBuffer(), input->specialShapeInfo(), inputTads, inputTadOffsets, reinterpret_cast<I*>(indices->specialBuffer()), begins, lengths, numOfClasses, output->specialBuffer(), output->specialShapeInfo(), outputTads, outputTadOffsets); } } // -------------------------------------------------------------------------------------------------------------- // void unsortedSegmentMaxFunctor(sd::LaunchContext* context, NDArray* input, NDArray* indices, sd::LongType numOfClasses, NDArray* output) { NDArray::prepareSpecialUse({output}, {input, indices}); output->nullify(); BUILD_DOUBLE_SELECTOR(input->dataType(), indices->dataType(), unsortedSegmentMaxFunctor_, (context, input, indices, numOfClasses, output), SD_NUMERIC_TYPES, SD_INDEXING_TYPES); NDArray::registerSpecialUse({output}, {input, indices}); } // -------------------------------------------------------------------------------------------------------------- // // segment max // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static SD_KERNEL void segmentMaxBPLinearKernel(void* inputBuf, sd::LongType const* inputShape, void* forwardOutput, sd::LongType const* forwardShape, void* eps, sd::LongType const* epsShape, void* indicesBuf, sd::LongType const* indicesShape, void* outputBuf, sd::LongType const* outputShape) { __shared__ T* x; __shared__ T* gradIn; __shared__ T* gradOut; __shared__ I* y; __shared__ T* z; __shared__ sd::LongType xLen, gradLen; if (threadIdx.x == 0) { xLen = shape::length(inputShape); x = reinterpret_cast<T*>(inputBuf); y = reinterpret_cast<I*>(indicesBuf); z = reinterpret_cast<T*>(outputBuf); gradIn = reinterpret_cast<T*>(forwardOutput); gradOut = reinterpret_cast<T*>(eps); gradLen = shape::length(epsShape); } __syncthreads(); auto start = blockIdx.x * blockDim.x + threadIdx.x; auto step = gridDim.x * blockDim.x; for (auto e = start; e < xLen; e += step) { auto zOffset = shape::getIndexOffset(e, outputShape); auto xOffset = shape::getIndexOffset(e, inputShape); auto yOffset = shape::getIndexOffset(e, indicesShape); auto classIndex = y[yOffset]; auto gradOffsetI = shape::getIndexOffset(classIndex, forwardShape); auto gradOffsetO = shape::getIndexOffset(classIndex, epsShape); if (sd::math::sd_abs(gradIn[gradOffsetI] - x[xOffset]) <= T(1.e-6)) { z[zOffset] = gradOut[gradOffsetO]; } } } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static SD_KERNEL void segmentMaxBPTadKernel(void* inputBuf, sd::LongType const* inputShape, void* forwardOutput, sd::LongType const* forwardShape, void* eps, sd::LongType const* epsShape, void* indicesBuf, sd::LongType const* indicesShape, void* outputBuf, sd::LongType const* outputShape, sd::LongType const* inputTad, sd::LongType const* inputOffsets, sd::LongType const* gradInTad, sd::LongType const* gradInOffsets, sd::LongType const* gradOutTad, sd::LongType const* gradOutOffsets, sd::LongType const* outTad, sd::LongType const* outOffsets) { __shared__ T* x; __shared__ T* gradIn; __shared__ T* gradOut; __shared__ I* y; __shared__ T* z; __shared__ sd::LongType xLen, yLen, gradLen, currentLen; if (threadIdx.x == 0) { xLen = shape::length(inputShape); x = reinterpret_cast<T*>(inputBuf); y = reinterpret_cast<I*>(indicesBuf); z = reinterpret_cast<T*>(outputBuf); yLen = shape::length(indicesShape); gradOut = reinterpret_cast<T*>(eps); gradIn = reinterpret_cast<T*>(forwardOutput); gradLen = shape::length(epsShape); currentLen = shape::length(outTad); } __syncthreads(); for (auto i = blockIdx.x; i < yLen; i += gridDim.x) { auto yIndex = shape::getIndexOffset(i, indicesShape); auto segment = y[yIndex]; T* current = x + inputOffsets[i]; T* currentOut = z + outOffsets[i]; T* in = gradIn + gradInOffsets[segment]; T* outGrad = gradOut + gradOutOffsets[segment]; for (auto e = threadIdx.x; e < currentLen; e += blockDim.x) { if (sd::math::sd_abs(in[e] - current[e]) <= T(1.e-6)) currentOut[e] = outGrad[e]; } } } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> sd::Status segmentMaxFunctorBP_(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* gradOut, NDArray* output) { // int numOfClasses = gradOut->sizeAt(0); // if input is a vector: (as if in doc sample) auto stream = context->getCudaStream(); NDArray tempRes(gradOut->ordering(), gradOut->getShapeAsVector(), DataTypeUtils::fromT<T>(), context); //->shapeInfo(), context); segmentMaxFunctor_<T, I>(context, input, indices, &tempRes); NDArray::prepareSpecialUse({output}, {input, indices, gradOut, &tempRes}); if (input->isVector()) { sd::LongType loop_size = input->lengthOf(); auto numOfClasses = gradOut->lengthOf(); // indices->e<sd::LongType>(loop_size - 1); hipLaunchKernelGGL(( segmentMaxBPLinearKernel<T, I>), dim3(1 + gradOut->lengthOf()), dim3(input->lengthOf()), 256, *stream, input->specialBuffer(), input->specialShapeInfo(), tempRes.specialBuffer(), tempRes.specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo()); } else { std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0}); auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions); auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions); auto packGradIn = sd::ConstantTadHelper::getInstance().tadForDimensions(tempRes.shapeInfo(), dimensions); auto packGradOut = sd::ConstantTadHelper::getInstance().tadForDimensions(gradOut->shapeInfo(), dimensions); sd::LongType const* inputTads = packX.specialShapeInfo(); sd::LongType const* inputTadOffsets = packX.specialOffsets(); sd::LongType const* outputTads = packZ.specialShapeInfo(); sd::LongType const* outputTadOffsets = packZ.specialOffsets(); sd::LongType const* gradInTads = packGradIn.specialShapeInfo(); sd::LongType const* gradInTadOffsets = packGradIn.specialOffsets(); sd::LongType const* gradOutTads = packGradOut.specialShapeInfo(); sd::LongType const* gradOutTadOffsets = packGradOut.specialOffsets(); hipLaunchKernelGGL(( segmentMaxBPTadKernel<T, I>), dim3(gradOut->lengthOf()), dim3(input->lengthOf()), 256, *stream, input->specialBuffer(), input->specialShapeInfo(), tempRes.specialBuffer(), tempRes.specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), inputTads, inputTadOffsets, gradInTads, gradInTadOffsets, gradOutTads, gradOutTadOffsets, outputTads, outputTadOffsets); } NDArray::registerSpecialUse({output}, {input, indices, gradOut, &tempRes}); return sd::Status::OK; } // -------------------------------------------------------------------------------------------------------------- // sd::Status segmentMaxFunctorBP(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* gradOut, NDArray* output) { NDArray::prepareSpecialUse({output}, {input, indices, gradOut}); BUILD_DOUBLE_SELECTOR(output->dataType(), indices->dataType(), return segmentMaxFunctorBP_, (context, input, indices, gradOut, output), SD_FLOAT_TYPES, SD_INDEXING_TYPES); NDArray::registerSpecialUse({output}, {input, indices, gradOut}); } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static sd::Status unsortedSegmentMaxFunctorBP_(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* gradOut, sd::LongType numOfClasses, NDArray* output) { // int numOfClasses = gradOut->sizeAt(0); // if input is a vector: (as if in doc sample) auto stream = context->getCudaStream(); NDArray tempRes(gradOut->ordering(), gradOut->getShapeAsVector(), DataTypeUtils::fromT<T>(), context); //->shapeInfo(), context); unsortedSegmentMaxFunctor_<T, I>(context, input, indices, numOfClasses, &tempRes); NDArray::prepareSpecialUse({output}, {input, indices, gradOut, &tempRes}); if (input->isVector()) { sd::LongType loop_size = input->lengthOf(); auto numOfClasses = gradOut->lengthOf(); // indices->e<sd::LongType>(loop_size - 1); hipLaunchKernelGGL(( segmentMaxBPLinearKernel<T, I>), dim3(gradOut->lengthOf()), dim3(input->lengthOf()), 256, *stream, input->specialBuffer(), input->specialShapeInfo(), tempRes.specialBuffer(), tempRes.specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo()); } else { std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0}); auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions); auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions); auto packGradIn = sd::ConstantTadHelper::getInstance().tadForDimensions(tempRes.shapeInfo(), dimensions); auto packGradOut = sd::ConstantTadHelper::getInstance().tadForDimensions(gradOut->shapeInfo(), dimensions); sd::LongType const* inputTads = packX.specialShapeInfo(); sd::LongType const* inputTadOffsets = packX.specialOffsets(); sd::LongType const* outputTads = packZ.specialShapeInfo(); sd::LongType const* outputTadOffsets = packZ.specialOffsets(); sd::LongType const* gradInTads = packGradIn.specialShapeInfo(); sd::LongType const* gradInTadOffsets = packGradIn.specialOffsets(); sd::LongType const* gradOutTads = packGradOut.specialShapeInfo(); sd::LongType const* gradOutTadOffsets = packGradOut.specialOffsets(); hipLaunchKernelGGL(( segmentMaxBPTadKernel<T, I>), dim3(gradOut->lengthOf()), dim3(input->lengthOf()), 256, *stream, input->specialBuffer(), input->specialShapeInfo(), tempRes.specialBuffer(), tempRes.specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), inputTads, inputTadOffsets, gradInTads, gradInTadOffsets, gradOutTads, gradOutTadOffsets, outputTads, outputTadOffsets); } NDArray::registerSpecialUse({output}, {input, indices, gradOut, &tempRes}); return sd::Status::OK; } // -------------------------------------------------------------------------------------------------------------- // sd::Status unsortedSegmentMaxFunctorBP(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* gradOut, sd::LongType numOfClasses, NDArray* output) { NDArray::prepareSpecialUse({output}, {input, indices, gradOut}); BUILD_DOUBLE_SELECTOR(output->dataType(), indices->dataType(), return unsortedSegmentMaxFunctorBP_, (context, input, indices, gradOut, numOfClasses, output), SD_FLOAT_TYPES, SD_INDEXING_TYPES); NDArray::registerSpecialUse({output}, {input, indices, gradOut}); } } // namespace helpers } // namespace ops } // namespace sd
3dff9991c724d3651ab64579bfae12dcfdfd4804.cu
/* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author GS <[email protected]> // #include <array/NDArrayFactory.h> #include <exceptions/cuda_exception.h> #include <helpers/ConstantTadHelper.h> #include <helpers/PointersManager.h> #include <helpers/ShapeUtils.h> #include <helpers/TAD.h> #include <ops/declarable/helpers/segment.h> #include <ops/declarable/helpers/segment_common.h> namespace sd { namespace ops { namespace helpers { // -------------------------------------------------------------------------------------------------------------- // // Segment ops linear kernels // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static SD_KERNEL void segmentMaxLinearKernel(void* input, sd::LongType const* inputShape, int* starts, int* lengths, sd::LongType numOfClasses, void* output, sd::LongType const* outputShape) { __shared__ T* val; __shared__ sd::LongType xLen, zLen, zIndex; __shared__ T* x; __shared__ T* z; __shared__ int threadsPerSegment, start, finish; auto segment = blockIdx.x; if (threadIdx.x == 0) { // threadsPerSegment = (gridDim.x + numOfClasses - 1) / numOfClasses; // segment = blockIdx.x / threadsPerSegment; x = reinterpret_cast<T*>(input); z = reinterpret_cast<T*>(output); extern __shared__ unsigned char shmem[]; val = reinterpret_cast<T*>(shmem); xLen = shape::length(inputShape); zLen = shape::length(outputShape); if (segment < numOfClasses) { zIndex = shape::getIndexOffset(segment, outputShape); start = starts[segment]; finish = start + lengths[segment]; z[zIndex] = x[shape::getIndexOffset(start, inputShape)]; val[segment] = z[zIndex]; } } __syncthreads(); for (auto e = start + threadIdx.x + 1; e < finish; e += blockDim.x) { auto xIndex = shape::getIndexOffset(e, inputShape); sd::math::atomics::sd_atomicMax(&z[zIndex], x[xIndex]); } } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static SD_KERNEL void unsortedSegmentMaxLinearKernel(void* input, sd::LongType const* inputShape, void* indices, sd::LongType const* indicesShape, int* starts, int* lengths, sd::LongType numOfClasses, void* output, sd::LongType const* outputShape) { __shared__ T* val; __shared__ sd::LongType xLen, zLen, zIndex; __shared__ T* x; __shared__ T* z; __shared__ I* y; // int threadsPerSegment, start, finish; auto segment = blockIdx.x; if (threadIdx.x == 0) { x = reinterpret_cast<T*>(input); z = reinterpret_cast<T*>(output); y = reinterpret_cast<I*>(indices); xLen = shape::length(inputShape); zLen = shape::length(outputShape); zIndex = shape::getIndexOffset(segment, outputShape); // start = starts[segment]; // finish = start + lengths[segment]; if (lengths[segment] > 0) z[zIndex] = x[shape::getIndexOffset(starts[segment], inputShape)]; else z[zIndex] = -DataTypeUtils::max<T>(); } __syncthreads(); if (lengths[segment] > 0) for (auto e = threadIdx.x + 1; e < xLen; e += blockDim.x) { auto xIndex = shape::getIndexOffset(e, inputShape); auto yIndex = shape::getIndexOffset(e, indicesShape); if (y[yIndex] == segment) { sd::math::atomics::sd_atomicMax(&z[zIndex], x[xIndex]); } } } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static SD_KERNEL void segmentMaxTadKernel(void* inputBuf, sd::LongType const* inputShape, sd::LongType const* inputTads, sd::LongType const* inputTadOffsets, I* indices, int* starts, int* lengths, sd::LongType numOfClasses, void* outputBuf, sd::LongType const* outputShape, sd::LongType const* outputTads, sd::LongType const* outputTadOffsets, T filler = 0) { __shared__ T* val; __shared__ sd::LongType len, zIndex, total; __shared__ T* z; __shared__ int start, finish; __shared__ I segment; if (threadIdx.x == 0) { segment = indices[blockIdx.x]; // / threadsPerSegment; z = reinterpret_cast<T*>(outputBuf) + outputTadOffsets[segment]; len = shape::length(inputTads); start = starts[segment]; finish = start + lengths[segment]; total = shape::sizeAt(inputShape, 0); } __syncthreads(); auto idx = blockIdx.x; if (idx <= total) { auto x = reinterpret_cast<T*>(inputBuf) + inputTadOffsets[idx]; if (blockIdx.x == start) { for (auto e = threadIdx.x; e < len; e += blockDim.x) { auto xIndex = shape::getIndexOffset(e, inputTads); auto zIndex = shape::getIndexOffset(e, outputTads); sd::math::atomics::sd_atomicMax(&z[zIndex], x[xIndex]); // z[zIndex] = x[xIndex]; } } else { for (auto e = threadIdx.x; e < len; e += blockDim.x) { auto xIndex = shape::getIndexOffset(e, inputTads); auto zIndex = shape::getIndexOffset(e, outputTads); if (lengths[segment]) sd::math::atomics::sd_atomicMax(&z[zIndex], x[xIndex]); } } } } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static void segmentMaxFunctor_(LaunchContext* context, NDArray* input, NDArray* indices, NDArray* output) { // int numClasses = output->sizeAt(0); // if input is a vector: (as if in doc sample) // sd::LongType idx = indices->e<sd::LongType>(0); output->assign(-DataTypeUtils::infOrMax<T>()); auto stream = context->getCudaStream(); indices->syncToHost(); sd::LongType numOfClasses = indices->e<sd::LongType>(indices->lengthOf() - 1) + 1; NDArray classesRangesLens = NDArrayFactory::create<int>('c', {numOfClasses}, context); NDArray classesRangesBegs = NDArrayFactory::create<int>('c', {numOfClasses}, context); classesRangesBegs.assign(indices->lengthOf()); classesRangesLens.assign(0); dim3 dims(256, 512, 256); int* begins = reinterpret_cast<int*>(classesRangesBegs.specialBuffer()); int* lengths = reinterpret_cast<int*>(classesRangesLens.specialBuffer()); fillUpSegments(indices, numOfClasses, classesRangesBegs, classesRangesLens); NDArray::prepareSpecialUse({output}, {input, indices, &classesRangesBegs, &classesRangesLens}); if (input->isVector()) { segmentMaxLinearKernel<T, I><<<numOfClasses, input->lengthOf(), numOfClasses * 32 + 32, *stream>>>( input->specialBuffer(), input->specialShapeInfo(), begins, lengths, numOfClasses, output->specialBuffer(), output->specialShapeInfo()); } else { std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0}); auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions); auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions); auto inputTads = packX.specialShapeInfo(); auto inputTadOffsets = packX.specialOffsets(); auto outputTads = packZ.specialShapeInfo(); auto outputTadOffsets = packZ.specialOffsets(); segmentMaxTadKernel<T, I><<<packX.numberOfTads(), 512, 2048, *stream>>>( input->specialBuffer(), input->specialShapeInfo(), inputTads, inputTadOffsets, reinterpret_cast<I*>(indices->specialBuffer()), begins, lengths, numOfClasses, output->specialBuffer(), output->specialShapeInfo(), outputTads, outputTadOffsets); } NDArray::registerSpecialUse({output}, {input, indices, &classesRangesBegs, &classesRangesLens}); } // -------------------------------------------------------------------------------------------------------------- // void segmentMaxFunctor(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* output) { NDArray::prepareSpecialUse({output}, {input, indices}); BUILD_DOUBLE_SELECTOR(input->dataType(), indices->dataType(), segmentMaxFunctor_, (context, input, indices, output), SD_NUMERIC_TYPES, SD_INDEXING_TYPES); NDArray::registerSpecialUse({output}, {input, indices}); } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static void unsortedSegmentMaxFunctor_(sd::LaunchContext* context, NDArray* input, NDArray* indices, sd::LongType numOfClasses, NDArray* output) { auto stream = context->getCudaStream(); // NDArray classes = NDArrayFactory::create<int>('c', {numOfClasses, 2}); output->assign(DataTypeUtils::infOrMax<T>()); NDArray classesRangesBegs = NDArrayFactory::create<int>('c', {numOfClasses}, context); NDArray classesRangesLens = NDArrayFactory::create<int>('c', {numOfClasses}, context); // NDArray row = NDArrayFactory::create<int>('c', {1, 2}, {(int)indices->lengthOf(), (int)0}); // classes.applyTrueBroadcast(sd::BroadcastOpsTuple::Assign(), row, classes); classesRangesBegs.assign(indices->lengthOf()); classesRangesLens.assign(0); dim3 dims(numOfClasses, indices->lengthOf(), numOfClasses * 32 + 32); // int* classesBuf = reinterpret_cast<int*>(classes.specialBuffer()); fillUpSegments(indices, numOfClasses, classesRangesBegs, classesRangesLens); int* begins = reinterpret_cast<int*>(classesRangesBegs.specialBuffer()); int* lengths = reinterpret_cast<int*>(classesRangesLens.specialBuffer()); if (input->isVector()) { unsortedSegmentMaxLinearKernel<T, I><<<dims.x, dims.y, dims.z, *stream>>>( input->specialBuffer(), input->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), begins, lengths, numOfClasses, output->specialBuffer(), output->specialShapeInfo()); } else { std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0}); auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions); auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions); auto inputTads = packX.specialShapeInfo(); auto inputTadOffsets = packX.specialOffsets(); auto outputTads = packZ.specialShapeInfo(); auto outputTadOffsets = packZ.specialOffsets(); dims.x = input->sizeAt(0); output->assign(-DataTypeUtils::max<T>()); segmentMaxTadKernel<T, I><<<dims.x, dims.y, dims.z, *stream>>>( input->specialBuffer(), input->specialShapeInfo(), inputTads, inputTadOffsets, reinterpret_cast<I*>(indices->specialBuffer()), begins, lengths, numOfClasses, output->specialBuffer(), output->specialShapeInfo(), outputTads, outputTadOffsets); } } // -------------------------------------------------------------------------------------------------------------- // void unsortedSegmentMaxFunctor(sd::LaunchContext* context, NDArray* input, NDArray* indices, sd::LongType numOfClasses, NDArray* output) { NDArray::prepareSpecialUse({output}, {input, indices}); output->nullify(); BUILD_DOUBLE_SELECTOR(input->dataType(), indices->dataType(), unsortedSegmentMaxFunctor_, (context, input, indices, numOfClasses, output), SD_NUMERIC_TYPES, SD_INDEXING_TYPES); NDArray::registerSpecialUse({output}, {input, indices}); } // -------------------------------------------------------------------------------------------------------------- // // segment max // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static SD_KERNEL void segmentMaxBPLinearKernel(void* inputBuf, sd::LongType const* inputShape, void* forwardOutput, sd::LongType const* forwardShape, void* eps, sd::LongType const* epsShape, void* indicesBuf, sd::LongType const* indicesShape, void* outputBuf, sd::LongType const* outputShape) { __shared__ T* x; __shared__ T* gradIn; __shared__ T* gradOut; __shared__ I* y; __shared__ T* z; __shared__ sd::LongType xLen, gradLen; if (threadIdx.x == 0) { xLen = shape::length(inputShape); x = reinterpret_cast<T*>(inputBuf); y = reinterpret_cast<I*>(indicesBuf); z = reinterpret_cast<T*>(outputBuf); gradIn = reinterpret_cast<T*>(forwardOutput); gradOut = reinterpret_cast<T*>(eps); gradLen = shape::length(epsShape); } __syncthreads(); auto start = blockIdx.x * blockDim.x + threadIdx.x; auto step = gridDim.x * blockDim.x; for (auto e = start; e < xLen; e += step) { auto zOffset = shape::getIndexOffset(e, outputShape); auto xOffset = shape::getIndexOffset(e, inputShape); auto yOffset = shape::getIndexOffset(e, indicesShape); auto classIndex = y[yOffset]; auto gradOffsetI = shape::getIndexOffset(classIndex, forwardShape); auto gradOffsetO = shape::getIndexOffset(classIndex, epsShape); if (sd::math::sd_abs(gradIn[gradOffsetI] - x[xOffset]) <= T(1.e-6)) { z[zOffset] = gradOut[gradOffsetO]; } } } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static SD_KERNEL void segmentMaxBPTadKernel(void* inputBuf, sd::LongType const* inputShape, void* forwardOutput, sd::LongType const* forwardShape, void* eps, sd::LongType const* epsShape, void* indicesBuf, sd::LongType const* indicesShape, void* outputBuf, sd::LongType const* outputShape, sd::LongType const* inputTad, sd::LongType const* inputOffsets, sd::LongType const* gradInTad, sd::LongType const* gradInOffsets, sd::LongType const* gradOutTad, sd::LongType const* gradOutOffsets, sd::LongType const* outTad, sd::LongType const* outOffsets) { __shared__ T* x; __shared__ T* gradIn; __shared__ T* gradOut; __shared__ I* y; __shared__ T* z; __shared__ sd::LongType xLen, yLen, gradLen, currentLen; if (threadIdx.x == 0) { xLen = shape::length(inputShape); x = reinterpret_cast<T*>(inputBuf); y = reinterpret_cast<I*>(indicesBuf); z = reinterpret_cast<T*>(outputBuf); yLen = shape::length(indicesShape); gradOut = reinterpret_cast<T*>(eps); gradIn = reinterpret_cast<T*>(forwardOutput); gradLen = shape::length(epsShape); currentLen = shape::length(outTad); } __syncthreads(); for (auto i = blockIdx.x; i < yLen; i += gridDim.x) { auto yIndex = shape::getIndexOffset(i, indicesShape); auto segment = y[yIndex]; T* current = x + inputOffsets[i]; T* currentOut = z + outOffsets[i]; T* in = gradIn + gradInOffsets[segment]; T* outGrad = gradOut + gradOutOffsets[segment]; for (auto e = threadIdx.x; e < currentLen; e += blockDim.x) { if (sd::math::sd_abs(in[e] - current[e]) <= T(1.e-6)) currentOut[e] = outGrad[e]; } } } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> sd::Status segmentMaxFunctorBP_(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* gradOut, NDArray* output) { // int numOfClasses = gradOut->sizeAt(0); // if input is a vector: (as if in doc sample) auto stream = context->getCudaStream(); NDArray tempRes(gradOut->ordering(), gradOut->getShapeAsVector(), DataTypeUtils::fromT<T>(), context); //->shapeInfo(), context); segmentMaxFunctor_<T, I>(context, input, indices, &tempRes); NDArray::prepareSpecialUse({output}, {input, indices, gradOut, &tempRes}); if (input->isVector()) { sd::LongType loop_size = input->lengthOf(); auto numOfClasses = gradOut->lengthOf(); // indices->e<sd::LongType>(loop_size - 1); segmentMaxBPLinearKernel<T, I><<<1 + gradOut->lengthOf(), input->lengthOf(), 256, *stream>>>( input->specialBuffer(), input->specialShapeInfo(), tempRes.specialBuffer(), tempRes.specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo()); } else { std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0}); auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions); auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions); auto packGradIn = sd::ConstantTadHelper::getInstance().tadForDimensions(tempRes.shapeInfo(), dimensions); auto packGradOut = sd::ConstantTadHelper::getInstance().tadForDimensions(gradOut->shapeInfo(), dimensions); sd::LongType const* inputTads = packX.specialShapeInfo(); sd::LongType const* inputTadOffsets = packX.specialOffsets(); sd::LongType const* outputTads = packZ.specialShapeInfo(); sd::LongType const* outputTadOffsets = packZ.specialOffsets(); sd::LongType const* gradInTads = packGradIn.specialShapeInfo(); sd::LongType const* gradInTadOffsets = packGradIn.specialOffsets(); sd::LongType const* gradOutTads = packGradOut.specialShapeInfo(); sd::LongType const* gradOutTadOffsets = packGradOut.specialOffsets(); segmentMaxBPTadKernel<T, I><<<gradOut->lengthOf(), input->lengthOf(), 256, *stream>>>( input->specialBuffer(), input->specialShapeInfo(), tempRes.specialBuffer(), tempRes.specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), inputTads, inputTadOffsets, gradInTads, gradInTadOffsets, gradOutTads, gradOutTadOffsets, outputTads, outputTadOffsets); } NDArray::registerSpecialUse({output}, {input, indices, gradOut, &tempRes}); return sd::Status::OK; } // -------------------------------------------------------------------------------------------------------------- // sd::Status segmentMaxFunctorBP(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* gradOut, NDArray* output) { NDArray::prepareSpecialUse({output}, {input, indices, gradOut}); BUILD_DOUBLE_SELECTOR(output->dataType(), indices->dataType(), return segmentMaxFunctorBP_, (context, input, indices, gradOut, output), SD_FLOAT_TYPES, SD_INDEXING_TYPES); NDArray::registerSpecialUse({output}, {input, indices, gradOut}); } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static sd::Status unsortedSegmentMaxFunctorBP_(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* gradOut, sd::LongType numOfClasses, NDArray* output) { // int numOfClasses = gradOut->sizeAt(0); // if input is a vector: (as if in doc sample) auto stream = context->getCudaStream(); NDArray tempRes(gradOut->ordering(), gradOut->getShapeAsVector(), DataTypeUtils::fromT<T>(), context); //->shapeInfo(), context); unsortedSegmentMaxFunctor_<T, I>(context, input, indices, numOfClasses, &tempRes); NDArray::prepareSpecialUse({output}, {input, indices, gradOut, &tempRes}); if (input->isVector()) { sd::LongType loop_size = input->lengthOf(); auto numOfClasses = gradOut->lengthOf(); // indices->e<sd::LongType>(loop_size - 1); segmentMaxBPLinearKernel<T, I><<<gradOut->lengthOf(), input->lengthOf(), 256, *stream>>>( input->specialBuffer(), input->specialShapeInfo(), tempRes.specialBuffer(), tempRes.specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo()); } else { std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0}); auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions); auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions); auto packGradIn = sd::ConstantTadHelper::getInstance().tadForDimensions(tempRes.shapeInfo(), dimensions); auto packGradOut = sd::ConstantTadHelper::getInstance().tadForDimensions(gradOut->shapeInfo(), dimensions); sd::LongType const* inputTads = packX.specialShapeInfo(); sd::LongType const* inputTadOffsets = packX.specialOffsets(); sd::LongType const* outputTads = packZ.specialShapeInfo(); sd::LongType const* outputTadOffsets = packZ.specialOffsets(); sd::LongType const* gradInTads = packGradIn.specialShapeInfo(); sd::LongType const* gradInTadOffsets = packGradIn.specialOffsets(); sd::LongType const* gradOutTads = packGradOut.specialShapeInfo(); sd::LongType const* gradOutTadOffsets = packGradOut.specialOffsets(); segmentMaxBPTadKernel<T, I><<<gradOut->lengthOf(), input->lengthOf(), 256, *stream>>>( input->specialBuffer(), input->specialShapeInfo(), tempRes.specialBuffer(), tempRes.specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), inputTads, inputTadOffsets, gradInTads, gradInTadOffsets, gradOutTads, gradOutTadOffsets, outputTads, outputTadOffsets); } NDArray::registerSpecialUse({output}, {input, indices, gradOut, &tempRes}); return sd::Status::OK; } // -------------------------------------------------------------------------------------------------------------- // sd::Status unsortedSegmentMaxFunctorBP(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* gradOut, sd::LongType numOfClasses, NDArray* output) { NDArray::prepareSpecialUse({output}, {input, indices, gradOut}); BUILD_DOUBLE_SELECTOR(output->dataType(), indices->dataType(), return unsortedSegmentMaxFunctorBP_, (context, input, indices, gradOut, numOfClasses, output), SD_FLOAT_TYPES, SD_INDEXING_TYPES); NDArray::registerSpecialUse({output}, {input, indices, gradOut}); } } // namespace helpers } // namespace ops } // namespace sd
c13ad34c697ded0982856f31b9131aaf3312b81e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void __hashmult2(int nrows, int nfeats, int ncols, int brows1, int brows2, float *A, float *Bdata, int *Bir, int *Bjc, float *C, int transpose) {}
c13ad34c697ded0982856f31b9131aaf3312b81e.cu
#include "includes.h" __global__ void __hashmult2(int nrows, int nfeats, int ncols, int brows1, int brows2, float *A, float *Bdata, int *Bir, int *Bjc, float *C, int transpose) {}
1682bf98115f42c6aea9a5868d3c110d094254c2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //========================================================================== // This file has been automatically generated for C++ Standalone by // MadGraph5_aMC@NLO v. 2.8.2, 2020-10-30 // By the MadGraph5_aMC@NLO Development Team // Visit launchpad.net/madgraph5 and amcatnlo.web.cern.ch //========================================================================== #include "../../src/HelAmps_sm.cu" #include <algorithm> #include <iostream> #include "mgOnGpuTypes.h" #include "mgOnGpuConfig.h" #include "gCPPProcess.h" //========================================================================== // Class member functions for calculating the matrix elements for // Process: g g > t t~ g g WEIGHTED<=4 @1 #ifdef __HIPCC__ namespace gProc #else namespace Proc #endif { using mgOnGpu::np4; // 4: the dimension of 4-momenta (E,px,py,pz) using mgOnGpu::npar; // number of particles in total (initial + final) using mgOnGpu::ncomb; // number of helicity combinations #ifdef __HIPCC__ __device__ __constant__ int cHel[ncomb][npar]; __device__ __constant__ fptype cIPC[6]; __device__ __constant__ fptype cIPD[2]; __device__ __constant__ int cNGoodHel[1]; __device__ __constant__ int cGoodHel[ncomb]; #else static int cHel[ncomb][npar]; static fptype cIPC[6]; static fptype cIPD[2]; #endif //-------------------------------------------------------------------------- using mgOnGpu::nwf; using mgOnGpu::nw6; //-------------------------------------------------------------------------- // Evaluate |M|^2 for each subprocess // NB: calculate_wavefunctions ADDS |M|^2 for a given ihel to the running sum // of |M|^2 over helicities for the given event __device__ void calculate_wavefunctions(int ihel, const fptype * allmomenta, fptype &meHelSum #ifndef __HIPCC__ , const int ievt #endif ) { using namespace MG5_sm; mgDebug(0, __FUNCTION__); cxtype amp[1]; // was 159 const int ncolor = 24; cxtype jamp[ncolor]; // Calculate wavefunctions for all processes using namespace MG5_sm; cxtype w[nwf][nw6]; for(int i = 0; i < 24; i++ ) { jamp[i] = cxtype(0., 0.); } #ifdef __HIPCC__ vxxxxx(allmomenta, 0., cHel[ihel][0], -1, w[0], 0); #else vxxxxx(allmomenta, 0., cHel[ihel][0], -1, w[0], ievt, 0); #endif #ifdef __HIPCC__ vxxxxx(allmomenta, 0., cHel[ihel][1], -1, w[1], 1); #else vxxxxx(allmomenta, 0., cHel[ihel][1], -1, w[1], ievt, 1); #endif #ifdef __HIPCC__ oxxxxx(allmomenta, cIPD[0], cHel[ihel][2], +1, w[2], 2); #else oxxxxx(allmomenta, cIPD[0], cHel[ihel][2], +1, w[2], ievt, 2); #endif #ifdef __HIPCC__ ixxxxx(allmomenta, cIPD[0], cHel[ihel][3], -1, w[3], 3); #else ixxxxx(allmomenta, cIPD[0], cHel[ihel][3], -1, w[3], ievt, 3); #endif #ifdef __HIPCC__ vxxxxx(allmomenta, 0., cHel[ihel][4], +1, w[4], 4); #else vxxxxx(allmomenta, 0., cHel[ihel][4], +1, w[4], ievt, 4); #endif #ifdef __HIPCC__ vxxxxx(allmomenta, 0., cHel[ihel][5], +1, w[5], 5); #else vxxxxx(allmomenta, 0., cHel[ihel][5], +1, w[5], ievt, 5); #endif VVV1P0_1(w[0], w[1], cxtype(cIPC[0], cIPC[1]), 0., 0., w[6]); FFV1P0_3(w[3], w[2], cxtype(cIPC[2], cIPC[3]), 0., 0., w[7]); // Amplitude(s) for diagram number 1 VVVV1_0(w[6], w[7], w[4], w[5], cxtype(cIPC[4], cIPC[5]), &amp[0]); jamp[0] += +cxtype(0, 1) * amp[0]; jamp[1] += -cxtype(0, 1) * amp[0]; jamp[6] += -cxtype(0, 1) * amp[0]; jamp[7] += +cxtype(0, 1) * amp[0]; jamp[16] += -cxtype(0, 1) * amp[0]; jamp[17] += +cxtype(0, 1) * amp[0]; jamp[22] += +cxtype(0, 1) * amp[0]; jamp[23] += -cxtype(0, 1) * amp[0]; VVVV3_0(w[6], w[7], w[4], w[5], cxtype(cIPC[4], cIPC[5]), &amp[0]); jamp[0] += +cxtype(0, 1) * amp[0]; jamp[6] += -cxtype(0, 1) * amp[0]; jamp[12] += -cxtype(0, 1) * amp[0]; jamp[14] += +cxtype(0, 1) * amp[0]; jamp[18] += -cxtype(0, 1) * amp[0]; jamp[20] += +cxtype(0, 1) * amp[0]; jamp[22] += +cxtype(0, 1) * amp[0]; jamp[23] += -cxtype(0, 1) * amp[0]; VVVV4_0(w[6], w[7], w[4], w[5], cxtype(cIPC[4], cIPC[5]), &amp[0]); jamp[1] += +cxtype(0, 1) * amp[0]; jamp[7] += -cxtype(0, 1) * amp[0]; jamp[12] += -cxtype(0, 1) * amp[0]; jamp[14] += +cxtype(0, 1) * amp[0]; jamp[16] += +cxtype(0, 1) * amp[0]; jamp[17] += -cxtype(0, 1) * amp[0]; jamp[18] += -cxtype(0, 1) * amp[0]; jamp[20] += +cxtype(0, 1) * amp[0]; VVV1P0_1(w[6], w[4], cxtype(cIPC[0], cIPC[1]), 0., 0., w[8]); // Amplitude(s) for diagram number 2 VVV1_0(w[7], w[5], w[8], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[0] += +cxtype(0, 1) * amp[0]; jamp[6] += -cxtype(0, 1) * amp[0]; jamp[12] += -cxtype(0, 1) * amp[0]; jamp[14] += +cxtype(0, 1) * amp[0]; jamp[18] += -cxtype(0, 1) * amp[0]; jamp[20] += +cxtype(0, 1) * amp[0]; jamp[22] += +cxtype(0, 1) * amp[0]; jamp[23] += -cxtype(0, 1) * amp[0]; VVV1P0_1(w[6], w[5], cxtype(cIPC[0], cIPC[1]), 0., 0., w[9]); // Amplitude(s) for diagram number 3 VVV1_0(w[7], w[4], w[9], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[1] += +cxtype(0, 1) * amp[0]; jamp[7] += -cxtype(0, 1) * amp[0]; jamp[12] += -cxtype(0, 1) * amp[0]; jamp[14] += +cxtype(0, 1) * amp[0]; jamp[16] += +cxtype(0, 1) * amp[0]; jamp[17] += -cxtype(0, 1) * amp[0]; jamp[18] += -cxtype(0, 1) * amp[0]; jamp[20] += +cxtype(0, 1) * amp[0]; VVV1P0_1(w[4], w[5], cxtype(cIPC[0], cIPC[1]), 0., 0., w[10]); // Amplitude(s) for diagram number 4 VVV1_0(w[6], w[7], w[10], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[0] += +cxtype(0, 1) * amp[0]; jamp[1] += -cxtype(0, 1) * amp[0]; jamp[6] += -cxtype(0, 1) * amp[0]; jamp[7] += +cxtype(0, 1) * amp[0]; jamp[16] += -cxtype(0, 1) * amp[0]; jamp[17] += +cxtype(0, 1) * amp[0]; jamp[22] += +cxtype(0, 1) * amp[0]; jamp[23] += -cxtype(0, 1) * amp[0]; FFV1_1(w[2], w[4], cxtype(cIPC[2], cIPC[3]), cIPD[0], cIPD[1], w[11]); FFV1_2(w[3], w[6], cxtype(cIPC[2], cIPC[3]), cIPD[0], cIPD[1], w[12]); // Amplitude(s) for diagram number 5 FFV1_0(w[12], w[11], w[5], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[16] += +cxtype(0, 1) * amp[0]; jamp[17] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 6 FFV1_0(w[3], w[11], w[9], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[12] += +amp[0]; jamp[14] += -amp[0]; jamp[16] += -amp[0]; jamp[17] += +amp[0]; FFV1_2(w[3], w[5], cxtype(cIPC[2], cIPC[3]), cIPD[0], cIPD[1], w[13]); // Amplitude(s) for diagram number 7 FFV1_0(w[13], w[11], w[6], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[12] += +cxtype(0, 1) * amp[0]; jamp[14] += -cxtype(0, 1) * amp[0]; FFV1_1(w[2], w[5], cxtype(cIPC[2], cIPC[3]), cIPD[0], cIPD[1], w[14]); // Amplitude(s) for diagram number 8 FFV1_0(w[12], w[14], w[4], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[22] += +cxtype(0, 1) * amp[0]; jamp[23] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 9 FFV1_0(w[3], w[14], w[8], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[18] += +amp[0]; jamp[20] += -amp[0]; jamp[22] += -amp[0]; jamp[23] += +amp[0]; FFV1_2(w[3], w[4], cxtype(cIPC[2], cIPC[3]), cIPD[0], cIPD[1], w[15]); // Amplitude(s) for diagram number 10 FFV1_0(w[15], w[14], w[6], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[18] += +cxtype(0, 1) * amp[0]; jamp[20] += -cxtype(0, 1) * amp[0]; FFV1_1(w[2], w[6], cxtype(cIPC[2], cIPC[3]), cIPD[0], cIPD[1], w[16]); // Amplitude(s) for diagram number 11 FFV1_0(w[15], w[16], w[5], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[1] += +cxtype(0, 1) * amp[0]; jamp[7] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 12 FFV1_0(w[15], w[2], w[9], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[1] += +amp[0]; jamp[7] += -amp[0]; jamp[18] += -amp[0]; jamp[20] += +amp[0]; // Amplitude(s) for diagram number 13 FFV1_0(w[13], w[16], w[4], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[0] += +cxtype(0, 1) * amp[0]; jamp[6] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 14 FFV1_0(w[13], w[2], w[8], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[0] += +amp[0]; jamp[6] += -amp[0]; jamp[12] += -amp[0]; jamp[14] += +amp[0]; // Amplitude(s) for diagram number 15 FFV1_0(w[3], w[16], w[10], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[0] += +amp[0]; jamp[1] += -amp[0]; jamp[6] += -amp[0]; jamp[7] += +amp[0]; // Amplitude(s) for diagram number 16 FFV1_0(w[12], w[2], w[10], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[16] += +amp[0]; jamp[17] += -amp[0]; jamp[22] += -amp[0]; jamp[23] += +amp[0]; FFV1_1(w[2], w[0], cxtype(cIPC[2], cIPC[3]), cIPD[0], cIPD[1], w[12]); FFV1_2(w[3], w[1], cxtype(cIPC[2], cIPC[3]), cIPD[0], cIPD[1], w[16]); FFV1_1(w[12], w[4], cxtype(cIPC[2], cIPC[3]), cIPD[0], cIPD[1], w[8]); // Amplitude(s) for diagram number 17 FFV1_0(w[16], w[8], w[5], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[3] += -amp[0]; FFV1_1(w[12], w[5], cxtype(cIPC[2], cIPC[3]), cIPD[0], cIPD[1], w[9]); // Amplitude(s) for diagram number 18 FFV1_0(w[16], w[9], w[4], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[5] += -amp[0]; // Amplitude(s) for diagram number 19 FFV1_0(w[16], w[12], w[10], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[3] += +cxtype(0, 1) * amp[0]; jamp[5] += -cxtype(0, 1) * amp[0]; VVV1P0_1(w[1], w[4], cxtype(cIPC[0], cIPC[1]), 0., 0., w[6]); FFV1P0_3(w[3], w[12], cxtype(cIPC[2], cIPC[3]), 0., 0., w[17]); // Amplitude(s) for diagram number 20 VVV1_0(w[6], w[5], w[17], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[0] += +amp[0]; jamp[2] += -amp[0]; jamp[4] += -amp[0]; jamp[5] += +amp[0]; // Amplitude(s) for diagram number 21 FFV1_0(w[3], w[9], w[6], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[4] += +cxtype(0, 1) * amp[0]; jamp[5] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 22 FFV1_0(w[13], w[12], w[6], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[0] += +cxtype(0, 1) * amp[0]; jamp[2] += -cxtype(0, 1) * amp[0]; VVV1P0_1(w[1], w[5], cxtype(cIPC[0], cIPC[1]), 0., 0., w[18]); // Amplitude(s) for diagram number 23 VVV1_0(w[18], w[4], w[17], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[1] += +amp[0]; jamp[2] += -amp[0]; jamp[3] += +amp[0]; jamp[4] += -amp[0]; // Amplitude(s) for diagram number 24 FFV1_0(w[3], w[8], w[18], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[2] += +cxtype(0, 1) * amp[0]; jamp[3] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 25 FFV1_0(w[15], w[12], w[18], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[1] += +cxtype(0, 1) * amp[0]; jamp[4] += -cxtype(0, 1) * amp[0]; FFV1_1(w[12], w[1], cxtype(cIPC[2], cIPC[3]), cIPD[0], cIPD[1], w[19]); // Amplitude(s) for diagram number 26 FFV1_0(w[15], w[19], w[5], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[1] += -amp[0]; // Amplitude(s) for diagram number 27 FFV1_0(w[15], w[9], w[1], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[4] += -amp[0]; // Amplitude(s) for diagram number 28 FFV1_0(w[13], w[19], w[4], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[0] += -amp[0]; // Amplitude(s) for diagram number 29 FFV1_0(w[13], w[8], w[1], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[2] += -amp[0]; // Amplitude(s) for diagram number 30 FFV1_0(w[3], w[19], w[10], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[0] += +cxtype(0, 1) * amp[0]; jamp[1] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 31 VVV1_0(w[1], w[10], w[17], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[0] += +amp[0]; jamp[1] += -amp[0]; jamp[3] += -amp[0]; jamp[5] += +amp[0]; VVVV1P0_1(w[1], w[4], w[5], cxtype(cIPC[4], cIPC[5]), 0., 0., w[17]); VVVV3P0_1(w[1], w[4], w[5], cxtype(cIPC[4], cIPC[5]), 0., 0., w[19]); VVVV4P0_1(w[1], w[4], w[5], cxtype(cIPC[4], cIPC[5]), 0., 0., w[8]); // Amplitude(s) for diagram number 32 FFV1_0(w[3], w[12], w[17], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[0] += +amp[0]; jamp[1] += -amp[0]; jamp[3] += -amp[0]; jamp[5] += +amp[0]; FFV1_0(w[3], w[12], w[19], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[1] += -amp[0]; jamp[2] += +amp[0]; jamp[3] += -amp[0]; jamp[4] += +amp[0]; FFV1_0(w[3], w[12], w[8], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[0] += -amp[0]; jamp[2] += +amp[0]; jamp[4] += +amp[0]; jamp[5] += -amp[0]; FFV1_2(w[3], w[0], cxtype(cIPC[2], cIPC[3]), cIPD[0], cIPD[1], w[12]); FFV1_1(w[2], w[1], cxtype(cIPC[2], cIPC[3]), cIPD[0], cIPD[1], w[9]); FFV1_2(w[12], w[4], cxtype(cIPC[2], cIPC[3]), cIPD[0], cIPD[1], w[20]); // Amplitude(s) for diagram number 33 FFV1_0(w[20], w[9], w[5], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[11] += -amp[0]; FFV1_2(w[12], w[5], cxtype(cIPC[2], cIPC[3]), cIPD[0], cIPD[1], w[21]); // Amplitude(s) for diagram number 34 FFV1_0(w[21], w[9], w[4], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[9] += -amp[0]; // Amplitude(s) for diagram number 35 FFV1_0(w[12], w[9], w[10], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[9] += +cxtype(0, 1) * amp[0]; jamp[11] += -cxtype(0, 1) * amp[0]; FFV1P0_3(w[12], w[2], cxtype(cIPC[2], cIPC[3]), 0., 0., w[22]); // Amplitude(s) for diagram number 36 VVV1_0(w[6], w[5], w[22], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[9] += +amp[0]; jamp[15] += -amp[0]; jamp[21] += -amp[0]; jamp[23] += +amp[0]; // Amplitude(s) for diagram number 37 FFV1_0(w[21], w[2], w[6], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[9] += +cxtype(0, 1) * amp[0]; jamp[15] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 38 FFV1_0(w[12], w[14], w[6], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[21] += +cxtype(0, 1) * amp[0]; jamp[23] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 39 VVV1_0(w[18], w[4], w[22], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[11] += +amp[0]; jamp[15] += -amp[0]; jamp[17] += +amp[0]; jamp[21] += -amp[0]; // Amplitude(s) for diagram number 40 FFV1_0(w[20], w[2], w[18], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[11] += +cxtype(0, 1) * amp[0]; jamp[21] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 41 FFV1_0(w[12], w[11], w[18], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[15] += +cxtype(0, 1) * amp[0]; jamp[17] += -cxtype(0, 1) * amp[0]; FFV1_2(w[12], w[1], cxtype(cIPC[2], cIPC[3]), cIPD[0], cIPD[1], w[23]); // Amplitude(s) for diagram number 42 FFV1_0(w[23], w[11], w[5], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[17] += -amp[0]; // Amplitude(s) for diagram number 43 FFV1_0(w[21], w[11], w[1], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[15] += -amp[0]; // Amplitude(s) for diagram number 44 FFV1_0(w[23], w[14], w[4], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[23] += -amp[0]; // Amplitude(s) for diagram number 45 FFV1_0(w[20], w[14], w[1], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[21] += -amp[0]; // Amplitude(s) for diagram number 46 FFV1_0(w[23], w[2], w[10], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[17] += +cxtype(0, 1) * amp[0]; jamp[23] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 47 VVV1_0(w[1], w[10], w[22], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[9] += +amp[0]; jamp[11] += -amp[0]; jamp[17] += -amp[0]; jamp[23] += +amp[0]; // Amplitude(s) for diagram number 48 FFV1_0(w[12], w[2], w[17], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[9] += +amp[0]; jamp[11] += -amp[0]; jamp[17] += -amp[0]; jamp[23] += +amp[0]; FFV1_0(w[12], w[2], w[19], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[11] += -amp[0]; jamp[15] += +amp[0]; jamp[17] += -amp[0]; jamp[21] += +amp[0]; FFV1_0(w[12], w[2], w[8], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[9] += -amp[0]; jamp[15] += +amp[0]; jamp[21] += +amp[0]; jamp[23] += -amp[0]; VVV1P0_1(w[0], w[4], cxtype(cIPC[0], cIPC[1]), 0., 0., w[12]); FFV1_2(w[3], w[12], cxtype(cIPC[2], cIPC[3]), cIPD[0], cIPD[1], w[22]); // Amplitude(s) for diagram number 49 FFV1_0(w[22], w[9], w[5], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[10] += +cxtype(0, 1) * amp[0]; jamp[11] += -cxtype(0, 1) * amp[0]; VVV1P0_1(w[12], w[5], cxtype(cIPC[0], cIPC[1]), 0., 0., w[23]); // Amplitude(s) for diagram number 50 FFV1_0(w[3], w[9], w[23], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[6] += +amp[0]; jamp[8] += -amp[0]; jamp[10] += -amp[0]; jamp[11] += +amp[0]; // Amplitude(s) for diagram number 51 FFV1_0(w[13], w[9], w[12], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[6] += +cxtype(0, 1) * amp[0]; jamp[8] += -cxtype(0, 1) * amp[0]; FFV1_1(w[2], w[12], cxtype(cIPC[2], cIPC[3]), cIPD[0], cIPD[1], w[20]); // Amplitude(s) for diagram number 52 FFV1_0(w[16], w[20], w[5], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[3] += +cxtype(0, 1) * amp[0]; jamp[13] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 53 FFV1_0(w[16], w[2], w[23], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[3] += +amp[0]; jamp[13] += -amp[0]; jamp[19] += -amp[0]; jamp[22] += +amp[0]; // Amplitude(s) for diagram number 54 FFV1_0(w[16], w[14], w[12], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[19] += +cxtype(0, 1) * amp[0]; jamp[22] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 55 FFV1_0(w[3], w[20], w[18], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[2] += +amp[0]; jamp[3] += -amp[0]; jamp[12] += -amp[0]; jamp[13] += +amp[0]; // Amplitude(s) for diagram number 56 FFV1_0(w[22], w[2], w[18], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[10] += +amp[0]; jamp[11] += -amp[0]; jamp[20] += -amp[0]; jamp[21] += +amp[0]; // Amplitude(s) for diagram number 57 VVV1_0(w[12], w[18], w[7], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[2] += -cxtype(0, 1) * amp[0]; jamp[3] += +cxtype(0, 1) * amp[0]; jamp[10] += +cxtype(0, 1) * amp[0]; jamp[11] += -cxtype(0, 1) * amp[0]; jamp[12] += +cxtype(0, 1) * amp[0]; jamp[13] += -cxtype(0, 1) * amp[0]; jamp[20] += -cxtype(0, 1) * amp[0]; jamp[21] += +cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 58 VVVV1_0(w[12], w[1], w[7], w[5], cxtype(cIPC[4], cIPC[5]), &amp[0]); jamp[2] += +cxtype(0, 1) * amp[0]; jamp[6] += -cxtype(0, 1) * amp[0]; jamp[8] += +cxtype(0, 1) * amp[0]; jamp[12] += -cxtype(0, 1) * amp[0]; jamp[19] += -cxtype(0, 1) * amp[0]; jamp[20] += +cxtype(0, 1) * amp[0]; jamp[21] += -cxtype(0, 1) * amp[0]; jamp[22] += +cxtype(0, 1) * amp[0]; VVVV3_0(w[12], w[1], w[7], w[5], cxtype(cIPC[4], cIPC[5]), &amp[0]); jamp[2] += +cxtype(0, 1) * amp[0]; jamp[3] += -cxtype(0, 1) * amp[0]; jamp[10] += -cxtype(0, 1) * amp[0]; jamp[11] += +cxtype(0, 1) * amp[0]; jamp[12] += -cxtype(0, 1) * amp[0]; jamp[13] += +cxtype(0, 1) * amp[0]; jamp[20] += +cxtype(0, 1) * amp[0]; jamp[21] += -cxtype(0, 1) * amp[0]; VVVV4_0(w[12], w[1], w[7], w[5], cxtype(cIPC[4], cIPC[5]), &amp[0]); jamp[3] += -cxtype(0, 1) * amp[0]; jamp[6] += +cxtype(0, 1) * amp[0]; jamp[8] += -cxtype(0, 1) * amp[0]; jamp[10] += -cxtype(0, 1) * amp[0]; jamp[11] += +cxtype(0, 1) * amp[0]; jamp[13] += +cxtype(0, 1) * amp[0]; jamp[19] += +cxtype(0, 1) * amp[0]; jamp[22] += -cxtype(0, 1) * amp[0]; VVV1P0_1(w[12], w[1], cxtype(cIPC[0], cIPC[1]), 0., 0., w[21]); // Amplitude(s) for diagram number 59 VVV1_0(w[7], w[5], w[21], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[2] += +cxtype(0, 1) * amp[0]; jamp[6] += -cxtype(0, 1) * amp[0]; jamp[8] += +cxtype(0, 1) * amp[0]; jamp[12] += -cxtype(0, 1) * amp[0]; jamp[19] += -cxtype(0, 1) * amp[0]; jamp[20] += +cxtype(0, 1) * amp[0]; jamp[21] += -cxtype(0, 1) * amp[0]; jamp[22] += +cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 60 VVV1_0(w[1], w[7], w[23], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[3] += -cxtype(0, 1) * amp[0]; jamp[6] += +cxtype(0, 1) * amp[0]; jamp[8] += -cxtype(0, 1) * amp[0]; jamp[10] += -cxtype(0, 1) * amp[0]; jamp[11] += +cxtype(0, 1) * amp[0]; jamp[13] += +cxtype(0, 1) * amp[0]; jamp[19] += +cxtype(0, 1) * amp[0]; jamp[22] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 61 FFV1_0(w[3], w[14], w[21], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[19] += +amp[0]; jamp[20] += -amp[0]; jamp[21] += +amp[0]; jamp[22] += -amp[0]; // Amplitude(s) for diagram number 62 FFV1_0(w[22], w[14], w[1], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[20] += +cxtype(0, 1) * amp[0]; jamp[21] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 63 FFV1_0(w[13], w[2], w[21], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[2] += +amp[0]; jamp[6] += -amp[0]; jamp[8] += +amp[0]; jamp[12] += -amp[0]; // Amplitude(s) for diagram number 64 FFV1_0(w[13], w[20], w[1], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[2] += +cxtype(0, 1) * amp[0]; jamp[12] += -cxtype(0, 1) * amp[0]; VVV1P0_1(w[0], w[5], cxtype(cIPC[0], cIPC[1]), 0., 0., w[20]); FFV1_2(w[3], w[20], cxtype(cIPC[2], cIPC[3]), cIPD[0], cIPD[1], w[21]); // Amplitude(s) for diagram number 65 FFV1_0(w[21], w[9], w[4], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[8] += +cxtype(0, 1) * amp[0]; jamp[9] += -cxtype(0, 1) * amp[0]; VVV1P0_1(w[20], w[4], cxtype(cIPC[0], cIPC[1]), 0., 0., w[22]); // Amplitude(s) for diagram number 66 FFV1_0(w[3], w[9], w[22], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[7] += +amp[0]; jamp[8] += -amp[0]; jamp[9] += +amp[0]; jamp[10] += -amp[0]; // Amplitude(s) for diagram number 67 FFV1_0(w[15], w[9], w[20], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[7] += +cxtype(0, 1) * amp[0]; jamp[10] += -cxtype(0, 1) * amp[0]; FFV1_1(w[2], w[20], cxtype(cIPC[2], cIPC[3]), cIPD[0], cIPD[1], w[23]); // Amplitude(s) for diagram number 68 FFV1_0(w[16], w[23], w[4], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[5] += +cxtype(0, 1) * amp[0]; jamp[19] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 69 FFV1_0(w[16], w[2], w[22], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[5] += +amp[0]; jamp[13] += -amp[0]; jamp[16] += +amp[0]; jamp[19] += -amp[0]; // Amplitude(s) for diagram number 70 FFV1_0(w[16], w[11], w[20], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[13] += +cxtype(0, 1) * amp[0]; jamp[16] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 71 FFV1_0(w[3], w[23], w[6], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[4] += +amp[0]; jamp[5] += -amp[0]; jamp[18] += -amp[0]; jamp[19] += +amp[0]; // Amplitude(s) for diagram number 72 FFV1_0(w[21], w[2], w[6], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[8] += +amp[0]; jamp[9] += -amp[0]; jamp[14] += -amp[0]; jamp[15] += +amp[0]; // Amplitude(s) for diagram number 73 VVV1_0(w[20], w[6], w[7], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[4] += -cxtype(0, 1) * amp[0]; jamp[5] += +cxtype(0, 1) * amp[0]; jamp[8] += +cxtype(0, 1) * amp[0]; jamp[9] += -cxtype(0, 1) * amp[0]; jamp[14] += -cxtype(0, 1) * amp[0]; jamp[15] += +cxtype(0, 1) * amp[0]; jamp[18] += +cxtype(0, 1) * amp[0]; jamp[19] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 74 VVVV1_0(w[20], w[1], w[7], w[4], cxtype(cIPC[4], cIPC[5]), &amp[0]); jamp[4] += +cxtype(0, 1) * amp[0]; jamp[7] += -cxtype(0, 1) * amp[0]; jamp[10] += +cxtype(0, 1) * amp[0]; jamp[13] += -cxtype(0, 1) * amp[0]; jamp[14] += +cxtype(0, 1) * amp[0]; jamp[15] += -cxtype(0, 1) * amp[0]; jamp[16] += +cxtype(0, 1) * amp[0]; jamp[18] += -cxtype(0, 1) * amp[0]; VVVV3_0(w[20], w[1], w[7], w[4], cxtype(cIPC[4], cIPC[5]), &amp[0]); jamp[4] += +cxtype(0, 1) * amp[0]; jamp[5] += -cxtype(0, 1) * amp[0]; jamp[8] += -cxtype(0, 1) * amp[0]; jamp[9] += +cxtype(0, 1) * amp[0]; jamp[14] += +cxtype(0, 1) * amp[0]; jamp[15] += -cxtype(0, 1) * amp[0]; jamp[18] += -cxtype(0, 1) * amp[0]; jamp[19] += +cxtype(0, 1) * amp[0]; VVVV4_0(w[20], w[1], w[7], w[4], cxtype(cIPC[4], cIPC[5]), &amp[0]); jamp[5] += -cxtype(0, 1) * amp[0]; jamp[7] += +cxtype(0, 1) * amp[0]; jamp[8] += -cxtype(0, 1) * amp[0]; jamp[9] += +cxtype(0, 1) * amp[0]; jamp[10] += -cxtype(0, 1) * amp[0]; jamp[13] += +cxtype(0, 1) * amp[0]; jamp[16] += -cxtype(0, 1) * amp[0]; jamp[19] += +cxtype(0, 1) * amp[0]; VVV1P0_1(w[20], w[1], cxtype(cIPC[0], cIPC[1]), 0., 0., w[12]); // Amplitude(s) for diagram number 75 VVV1_0(w[7], w[4], w[12], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[4] += +cxtype(0, 1) * amp[0]; jamp[7] += -cxtype(0, 1) * amp[0]; jamp[10] += +cxtype(0, 1) * amp[0]; jamp[13] += -cxtype(0, 1) * amp[0]; jamp[14] += +cxtype(0, 1) * amp[0]; jamp[15] += -cxtype(0, 1) * amp[0]; jamp[16] += +cxtype(0, 1) * amp[0]; jamp[18] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 76 VVV1_0(w[1], w[7], w[22], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[5] += -cxtype(0, 1) * amp[0]; jamp[7] += +cxtype(0, 1) * amp[0]; jamp[8] += -cxtype(0, 1) * amp[0]; jamp[9] += +cxtype(0, 1) * amp[0]; jamp[10] += -cxtype(0, 1) * amp[0]; jamp[13] += +cxtype(0, 1) * amp[0]; jamp[16] += -cxtype(0, 1) * amp[0]; jamp[19] += +cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 77 FFV1_0(w[3], w[11], w[12], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[13] += +amp[0]; jamp[14] += -amp[0]; jamp[15] += +amp[0]; jamp[16] += -amp[0]; // Amplitude(s) for diagram number 78 FFV1_0(w[21], w[11], w[1], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[14] += +cxtype(0, 1) * amp[0]; jamp[15] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 79 FFV1_0(w[15], w[2], w[12], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[4] += +amp[0]; jamp[7] += -amp[0]; jamp[10] += +amp[0]; jamp[18] += -amp[0]; // Amplitude(s) for diagram number 80 FFV1_0(w[15], w[23], w[1], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[4] += +cxtype(0, 1) * amp[0]; jamp[18] += -cxtype(0, 1) * amp[0]; FFV1_1(w[9], w[0], cxtype(cIPC[2], cIPC[3]), cIPD[0], cIPD[1], w[23]); // Amplitude(s) for diagram number 81 FFV1_0(w[15], w[23], w[5], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[7] += -amp[0]; FFV1_2(w[15], w[0], cxtype(cIPC[2], cIPC[3]), cIPD[0], cIPD[1], w[12]); // Amplitude(s) for diagram number 82 FFV1_0(w[12], w[9], w[5], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[10] += -amp[0]; // Amplitude(s) for diagram number 83 FFV1_0(w[13], w[23], w[4], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[6] += -amp[0]; FFV1_2(w[13], w[0], cxtype(cIPC[2], cIPC[3]), cIPD[0], cIPD[1], w[21]); // Amplitude(s) for diagram number 84 FFV1_0(w[21], w[9], w[4], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[8] += -amp[0]; // Amplitude(s) for diagram number 85 FFV1_0(w[3], w[23], w[10], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[6] += +cxtype(0, 1) * amp[0]; jamp[7] += -cxtype(0, 1) * amp[0]; VVV1P0_1(w[0], w[10], cxtype(cIPC[0], cIPC[1]), 0., 0., w[23]); // Amplitude(s) for diagram number 86 FFV1_0(w[3], w[9], w[23], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[6] += +amp[0]; jamp[7] += -amp[0]; jamp[9] += -amp[0]; jamp[11] += +amp[0]; FFV1_2(w[16], w[0], cxtype(cIPC[2], cIPC[3]), cIPD[0], cIPD[1], w[22]); // Amplitude(s) for diagram number 87 FFV1_0(w[22], w[11], w[5], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[16] += -amp[0]; FFV1_1(w[11], w[0], cxtype(cIPC[2], cIPC[3]), cIPD[0], cIPD[1], w[20]); // Amplitude(s) for diagram number 88 FFV1_0(w[16], w[20], w[5], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[13] += -amp[0]; // Amplitude(s) for diagram number 89 FFV1_0(w[22], w[14], w[4], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[22] += -amp[0]; FFV1_1(w[14], w[0], cxtype(cIPC[2], cIPC[3]), cIPD[0], cIPD[1], w[24]); // Amplitude(s) for diagram number 90 FFV1_0(w[16], w[24], w[4], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[19] += -amp[0]; // Amplitude(s) for diagram number 91 FFV1_0(w[22], w[2], w[10], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[16] += +cxtype(0, 1) * amp[0]; jamp[22] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 92 FFV1_0(w[16], w[2], w[23], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[3] += +amp[0]; jamp[5] += -amp[0]; jamp[16] += -amp[0]; jamp[22] += +amp[0]; // Amplitude(s) for diagram number 93 VVVV1_0(w[0], w[6], w[7], w[5], cxtype(cIPC[4], cIPC[5]), &amp[0]); jamp[0] += +cxtype(0, 1) * amp[0]; jamp[2] += -cxtype(0, 1) * amp[0]; jamp[8] += -cxtype(0, 1) * amp[0]; jamp[14] += +cxtype(0, 1) * amp[0]; jamp[18] += -cxtype(0, 1) * amp[0]; jamp[19] += +cxtype(0, 1) * amp[0]; jamp[21] += +cxtype(0, 1) * amp[0]; jamp[23] += -cxtype(0, 1) * amp[0]; VVVV3_0(w[0], w[6], w[7], w[5], cxtype(cIPC[4], cIPC[5]), &amp[0]); jamp[0] += +cxtype(0, 1) * amp[0]; jamp[2] += -cxtype(0, 1) * amp[0]; jamp[4] += -cxtype(0, 1) * amp[0]; jamp[5] += +cxtype(0, 1) * amp[0]; jamp[9] += -cxtype(0, 1) * amp[0]; jamp[15] += +cxtype(0, 1) * amp[0]; jamp[21] += +cxtype(0, 1) * amp[0]; jamp[23] += -cxtype(0, 1) * amp[0]; VVVV4_0(w[0], w[6], w[7], w[5], cxtype(cIPC[4], cIPC[5]), &amp[0]); jamp[4] += -cxtype(0, 1) * amp[0]; jamp[5] += +cxtype(0, 1) * amp[0]; jamp[8] += +cxtype(0, 1) * amp[0]; jamp[9] += -cxtype(0, 1) * amp[0]; jamp[14] += -cxtype(0, 1) * amp[0]; jamp[15] += +cxtype(0, 1) * amp[0]; jamp[18] += +cxtype(0, 1) * amp[0]; jamp[19] += -cxtype(0, 1) * amp[0]; VVV1P0_1(w[0], w[6], cxtype(cIPC[0], cIPC[1]), 0., 0., w[22]); // Amplitude(s) for diagram number 94 VVV1_0(w[7], w[5], w[22], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[0] += +cxtype(0, 1) * amp[0]; jamp[2] += -cxtype(0, 1) * amp[0]; jamp[8] += -cxtype(0, 1) * amp[0]; jamp[14] += +cxtype(0, 1) * amp[0]; jamp[18] += -cxtype(0, 1) * amp[0]; jamp[19] += +cxtype(0, 1) * amp[0]; jamp[21] += +cxtype(0, 1) * amp[0]; jamp[23] += -cxtype(0, 1) * amp[0]; VVV1P0_1(w[0], w[7], cxtype(cIPC[0], cIPC[1]), 0., 0., w[25]); // Amplitude(s) for diagram number 95 VVV1_0(w[6], w[5], w[25], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[0] += +cxtype(0, 1) * amp[0]; jamp[2] += -cxtype(0, 1) * amp[0]; jamp[4] += -cxtype(0, 1) * amp[0]; jamp[5] += +cxtype(0, 1) * amp[0]; jamp[9] += -cxtype(0, 1) * amp[0]; jamp[15] += +cxtype(0, 1) * amp[0]; jamp[21] += +cxtype(0, 1) * amp[0]; jamp[23] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 96 FFV1_0(w[3], w[14], w[22], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[18] += +amp[0]; jamp[19] += -amp[0]; jamp[21] += -amp[0]; jamp[23] += +amp[0]; // Amplitude(s) for diagram number 97 FFV1_0(w[3], w[24], w[6], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[18] += +cxtype(0, 1) * amp[0]; jamp[19] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 98 FFV1_0(w[13], w[2], w[22], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[0] += +amp[0]; jamp[2] += -amp[0]; jamp[8] += -amp[0]; jamp[14] += +amp[0]; // Amplitude(s) for diagram number 99 FFV1_0(w[21], w[2], w[6], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[8] += +cxtype(0, 1) * amp[0]; jamp[14] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 100 VVVV1_0(w[0], w[18], w[7], w[4], cxtype(cIPC[4], cIPC[5]), &amp[0]); jamp[1] += +cxtype(0, 1) * amp[0]; jamp[4] += -cxtype(0, 1) * amp[0]; jamp[10] += -cxtype(0, 1) * amp[0]; jamp[12] += -cxtype(0, 1) * amp[0]; jamp[13] += +cxtype(0, 1) * amp[0]; jamp[15] += +cxtype(0, 1) * amp[0]; jamp[17] += -cxtype(0, 1) * amp[0]; jamp[20] += +cxtype(0, 1) * amp[0]; VVVV3_0(w[0], w[18], w[7], w[4], cxtype(cIPC[4], cIPC[5]), &amp[0]); jamp[1] += +cxtype(0, 1) * amp[0]; jamp[2] += -cxtype(0, 1) * amp[0]; jamp[3] += +cxtype(0, 1) * amp[0]; jamp[4] += -cxtype(0, 1) * amp[0]; jamp[11] += -cxtype(0, 1) * amp[0]; jamp[15] += +cxtype(0, 1) * amp[0]; jamp[17] += -cxtype(0, 1) * amp[0]; jamp[21] += +cxtype(0, 1) * amp[0]; VVVV4_0(w[0], w[18], w[7], w[4], cxtype(cIPC[4], cIPC[5]), &amp[0]); jamp[2] += -cxtype(0, 1) * amp[0]; jamp[3] += +cxtype(0, 1) * amp[0]; jamp[10] += +cxtype(0, 1) * amp[0]; jamp[11] += -cxtype(0, 1) * amp[0]; jamp[12] += +cxtype(0, 1) * amp[0]; jamp[13] += -cxtype(0, 1) * amp[0]; jamp[20] += -cxtype(0, 1) * amp[0]; jamp[21] += +cxtype(0, 1) * amp[0]; VVV1P0_1(w[0], w[18], cxtype(cIPC[0], cIPC[1]), 0., 0., w[6]); // Amplitude(s) for diagram number 101 VVV1_0(w[7], w[4], w[6], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[1] += +cxtype(0, 1) * amp[0]; jamp[4] += -cxtype(0, 1) * amp[0]; jamp[10] += -cxtype(0, 1) * amp[0]; jamp[12] += -cxtype(0, 1) * amp[0]; jamp[13] += +cxtype(0, 1) * amp[0]; jamp[15] += +cxtype(0, 1) * amp[0]; jamp[17] += -cxtype(0, 1) * amp[0]; jamp[20] += +cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 102 VVV1_0(w[18], w[4], w[25], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[1] += +cxtype(0, 1) * amp[0]; jamp[2] += -cxtype(0, 1) * amp[0]; jamp[3] += +cxtype(0, 1) * amp[0]; jamp[4] += -cxtype(0, 1) * amp[0]; jamp[11] += -cxtype(0, 1) * amp[0]; jamp[15] += +cxtype(0, 1) * amp[0]; jamp[17] += -cxtype(0, 1) * amp[0]; jamp[21] += +cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 103 FFV1_0(w[3], w[11], w[6], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[12] += +amp[0]; jamp[13] += -amp[0]; jamp[15] += -amp[0]; jamp[17] += +amp[0]; // Amplitude(s) for diagram number 104 FFV1_0(w[3], w[20], w[18], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[12] += +cxtype(0, 1) * amp[0]; jamp[13] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 105 FFV1_0(w[15], w[2], w[6], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[1] += +amp[0]; jamp[4] += -amp[0]; jamp[10] += -amp[0]; jamp[20] += +amp[0]; // Amplitude(s) for diagram number 106 FFV1_0(w[12], w[2], w[18], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[10] += +cxtype(0, 1) * amp[0]; jamp[20] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 107 VVVV1_0(w[0], w[1], w[7], w[10], cxtype(cIPC[4], cIPC[5]), &amp[0]); jamp[0] += +cxtype(0, 1) * amp[0]; jamp[1] += -cxtype(0, 1) * amp[0]; jamp[6] += -cxtype(0, 1) * amp[0]; jamp[7] += +cxtype(0, 1) * amp[0]; jamp[16] += -cxtype(0, 1) * amp[0]; jamp[17] += +cxtype(0, 1) * amp[0]; jamp[22] += +cxtype(0, 1) * amp[0]; jamp[23] += -cxtype(0, 1) * amp[0]; VVVV3_0(w[0], w[1], w[7], w[10], cxtype(cIPC[4], cIPC[5]), &amp[0]); jamp[0] += +cxtype(0, 1) * amp[0]; jamp[1] += -cxtype(0, 1) * amp[0]; jamp[3] += -cxtype(0, 1) * amp[0]; jamp[5] += +cxtype(0, 1) * amp[0]; jamp[9] += -cxtype(0, 1) * amp[0]; jamp[11] += +cxtype(0, 1) * amp[0]; jamp[17] += +cxtype(0, 1) * amp[0]; jamp[23] += -cxtype(0, 1) * amp[0]; VVVV4_0(w[0], w[1], w[7], w[10], cxtype(cIPC[4], cIPC[5]), &amp[0]); jamp[3] += -cxtype(0, 1) * amp[0]; jamp[5] += +cxtype(0, 1) * amp[0]; jamp[6] += +cxtype(0, 1) * amp[0]; jamp[7] += -cxtype(0, 1) * amp[0]; jamp[9] += -cxtype(0, 1) * amp[0]; jamp[11] += +cxtype(0, 1) * amp[0]; jamp[16] += +cxtype(0, 1) * amp[0]; jamp[22] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 108 VVV1_0(w[1], w[10], w[25], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[0] += +cxtype(0, 1) * amp[0]; jamp[1] += -cxtype(0, 1) * amp[0]; jamp[3] += -cxtype(0, 1) * amp[0]; jamp[5] += +cxtype(0, 1) * amp[0]; jamp[9] += -cxtype(0, 1) * amp[0]; jamp[11] += +cxtype(0, 1) * amp[0]; jamp[17] += +cxtype(0, 1) * amp[0]; jamp[23] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 109 VVV1_0(w[1], w[7], w[23], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[3] += -cxtype(0, 1) * amp[0]; jamp[5] += +cxtype(0, 1) * amp[0]; jamp[6] += +cxtype(0, 1) * amp[0]; jamp[7] += -cxtype(0, 1) * amp[0]; jamp[9] += -cxtype(0, 1) * amp[0]; jamp[11] += +cxtype(0, 1) * amp[0]; jamp[16] += +cxtype(0, 1) * amp[0]; jamp[22] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 110 FFV1_0(w[13], w[20], w[1], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[12] += -amp[0]; // Amplitude(s) for diagram number 111 FFV1_0(w[21], w[11], w[1], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[14] += -amp[0]; // Amplitude(s) for diagram number 112 FFV1_0(w[15], w[24], w[1], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[18] += -amp[0]; // Amplitude(s) for diagram number 113 FFV1_0(w[12], w[14], w[1], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[20] += -amp[0]; VVVV1P0_1(w[0], w[1], w[4], cxtype(cIPC[4], cIPC[5]), 0., 0., w[12]); VVVV3P0_1(w[0], w[1], w[4], cxtype(cIPC[4], cIPC[5]), 0., 0., w[24]); VVVV4P0_1(w[0], w[1], w[4], cxtype(cIPC[4], cIPC[5]), 0., 0., w[21]); // Amplitude(s) for diagram number 114 VVV1_0(w[12], w[7], w[5], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[0] += +cxtype(0, 1) * amp[0]; jamp[2] += -cxtype(0, 1) * amp[0]; jamp[8] += -cxtype(0, 1) * amp[0]; jamp[14] += +cxtype(0, 1) * amp[0]; jamp[18] += -cxtype(0, 1) * amp[0]; jamp[19] += +cxtype(0, 1) * amp[0]; jamp[21] += +cxtype(0, 1) * amp[0]; jamp[23] += -cxtype(0, 1) * amp[0]; VVV1_0(w[24], w[7], w[5], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[2] += -cxtype(0, 1) * amp[0]; jamp[6] += +cxtype(0, 1) * amp[0]; jamp[8] += -cxtype(0, 1) * amp[0]; jamp[12] += +cxtype(0, 1) * amp[0]; jamp[19] += +cxtype(0, 1) * amp[0]; jamp[20] += -cxtype(0, 1) * amp[0]; jamp[21] += +cxtype(0, 1) * amp[0]; jamp[22] += -cxtype(0, 1) * amp[0]; VVV1_0(w[21], w[7], w[5], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[0] += -cxtype(0, 1) * amp[0]; jamp[6] += +cxtype(0, 1) * amp[0]; jamp[12] += +cxtype(0, 1) * amp[0]; jamp[14] += -cxtype(0, 1) * amp[0]; jamp[18] += +cxtype(0, 1) * amp[0]; jamp[20] += -cxtype(0, 1) * amp[0]; jamp[22] += -cxtype(0, 1) * amp[0]; jamp[23] += +cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 115 FFV1_0(w[3], w[14], w[12], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[18] += +amp[0]; jamp[19] += -amp[0]; jamp[21] += -amp[0]; jamp[23] += +amp[0]; FFV1_0(w[3], w[14], w[24], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[19] += -amp[0]; jamp[20] += +amp[0]; jamp[21] += -amp[0]; jamp[22] += +amp[0]; FFV1_0(w[3], w[14], w[21], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[18] += -amp[0]; jamp[20] += +amp[0]; jamp[22] += +amp[0]; jamp[23] += -amp[0]; // Amplitude(s) for diagram number 116 FFV1_0(w[13], w[2], w[12], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[0] += +amp[0]; jamp[2] += -amp[0]; jamp[8] += -amp[0]; jamp[14] += +amp[0]; FFV1_0(w[13], w[2], w[24], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[2] += -amp[0]; jamp[6] += +amp[0]; jamp[8] += -amp[0]; jamp[12] += +amp[0]; FFV1_0(w[13], w[2], w[21], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[0] += -amp[0]; jamp[6] += +amp[0]; jamp[12] += +amp[0]; jamp[14] += -amp[0]; VVVV1P0_1(w[0], w[1], w[5], cxtype(cIPC[4], cIPC[5]), 0., 0., w[21]); VVVV3P0_1(w[0], w[1], w[5], cxtype(cIPC[4], cIPC[5]), 0., 0., w[13]); VVVV4P0_1(w[0], w[1], w[5], cxtype(cIPC[4], cIPC[5]), 0., 0., w[24]); // Amplitude(s) for diagram number 117 VVV1_0(w[21], w[7], w[4], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[1] += +cxtype(0, 1) * amp[0]; jamp[4] += -cxtype(0, 1) * amp[0]; jamp[10] += -cxtype(0, 1) * amp[0]; jamp[12] += -cxtype(0, 1) * amp[0]; jamp[13] += +cxtype(0, 1) * amp[0]; jamp[15] += +cxtype(0, 1) * amp[0]; jamp[17] += -cxtype(0, 1) * amp[0]; jamp[20] += +cxtype(0, 1) * amp[0]; VVV1_0(w[13], w[7], w[4], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[4] += -cxtype(0, 1) * amp[0]; jamp[7] += +cxtype(0, 1) * amp[0]; jamp[10] += -cxtype(0, 1) * amp[0]; jamp[13] += +cxtype(0, 1) * amp[0]; jamp[14] += -cxtype(0, 1) * amp[0]; jamp[15] += +cxtype(0, 1) * amp[0]; jamp[16] += -cxtype(0, 1) * amp[0]; jamp[18] += +cxtype(0, 1) * amp[0]; VVV1_0(w[24], w[7], w[4], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[1] += -cxtype(0, 1) * amp[0]; jamp[7] += +cxtype(0, 1) * amp[0]; jamp[12] += +cxtype(0, 1) * amp[0]; jamp[14] += -cxtype(0, 1) * amp[0]; jamp[16] += -cxtype(0, 1) * amp[0]; jamp[17] += +cxtype(0, 1) * amp[0]; jamp[18] += +cxtype(0, 1) * amp[0]; jamp[20] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 118 FFV1_0(w[3], w[11], w[21], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[12] += +amp[0]; jamp[13] += -amp[0]; jamp[15] += -amp[0]; jamp[17] += +amp[0]; FFV1_0(w[3], w[11], w[13], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[13] += -amp[0]; jamp[14] += +amp[0]; jamp[15] += -amp[0]; jamp[16] += +amp[0]; FFV1_0(w[3], w[11], w[24], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[12] += -amp[0]; jamp[14] += +amp[0]; jamp[16] += +amp[0]; jamp[17] += -amp[0]; // Amplitude(s) for diagram number 119 FFV1_0(w[15], w[2], w[21], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[1] += +amp[0]; jamp[4] += -amp[0]; jamp[10] += -amp[0]; jamp[20] += +amp[0]; FFV1_0(w[15], w[2], w[13], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[4] += -amp[0]; jamp[7] += +amp[0]; jamp[10] += -amp[0]; jamp[18] += +amp[0]; FFV1_0(w[15], w[2], w[24], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[1] += -amp[0]; jamp[7] += +amp[0]; jamp[18] += +amp[0]; jamp[20] += -amp[0]; VVVV1P0_1(w[0], w[4], w[5], cxtype(cIPC[4], cIPC[5]), 0., 0., w[24]); VVVV3P0_1(w[0], w[4], w[5], cxtype(cIPC[4], cIPC[5]), 0., 0., w[15]); VVVV4P0_1(w[0], w[4], w[5], cxtype(cIPC[4], cIPC[5]), 0., 0., w[13]); // Amplitude(s) for diagram number 120 FFV1_0(w[3], w[9], w[24], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[6] += +amp[0]; jamp[7] += -amp[0]; jamp[9] += -amp[0]; jamp[11] += +amp[0]; FFV1_0(w[3], w[9], w[15], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[7] += -amp[0]; jamp[8] += +amp[0]; jamp[9] += -amp[0]; jamp[10] += +amp[0]; FFV1_0(w[3], w[9], w[13], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[6] += -amp[0]; jamp[8] += +amp[0]; jamp[10] += +amp[0]; jamp[11] += -amp[0]; // Amplitude(s) for diagram number 121 FFV1_0(w[16], w[2], w[24], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[3] += +amp[0]; jamp[5] += -amp[0]; jamp[16] += -amp[0]; jamp[22] += +amp[0]; FFV1_0(w[16], w[2], w[15], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[5] += -amp[0]; jamp[13] += +amp[0]; jamp[16] += -amp[0]; jamp[19] += +amp[0]; FFV1_0(w[16], w[2], w[13], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[3] += -amp[0]; jamp[13] += +amp[0]; jamp[19] += +amp[0]; jamp[22] += -amp[0]; // Amplitude(s) for diagram number 122 VVV1_0(w[24], w[1], w[7], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[3] += -cxtype(0, 1) * amp[0]; jamp[5] += +cxtype(0, 1) * amp[0]; jamp[6] += +cxtype(0, 1) * amp[0]; jamp[7] += -cxtype(0, 1) * amp[0]; jamp[9] += -cxtype(0, 1) * amp[0]; jamp[11] += +cxtype(0, 1) * amp[0]; jamp[16] += +cxtype(0, 1) * amp[0]; jamp[22] += -cxtype(0, 1) * amp[0]; VVV1_0(w[15], w[1], w[7], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[5] += +cxtype(0, 1) * amp[0]; jamp[7] += -cxtype(0, 1) * amp[0]; jamp[8] += +cxtype(0, 1) * amp[0]; jamp[9] += -cxtype(0, 1) * amp[0]; jamp[10] += +cxtype(0, 1) * amp[0]; jamp[13] += -cxtype(0, 1) * amp[0]; jamp[16] += +cxtype(0, 1) * amp[0]; jamp[19] += -cxtype(0, 1) * amp[0]; VVV1_0(w[13], w[1], w[7], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[3] += +cxtype(0, 1) * amp[0]; jamp[6] += -cxtype(0, 1) * amp[0]; jamp[8] += +cxtype(0, 1) * amp[0]; jamp[10] += +cxtype(0, 1) * amp[0]; jamp[11] += -cxtype(0, 1) * amp[0]; jamp[13] += -cxtype(0, 1) * amp[0]; jamp[19] += -cxtype(0, 1) * amp[0]; jamp[22] += +cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 123 VVV1_0(w[0], w[17], w[7], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[0] += -cxtype(0, 1) * amp[0]; jamp[1] += +cxtype(0, 1) * amp[0]; jamp[3] += +cxtype(0, 1) * amp[0]; jamp[5] += -cxtype(0, 1) * amp[0]; jamp[9] += +cxtype(0, 1) * amp[0]; jamp[11] += -cxtype(0, 1) * amp[0]; jamp[17] += -cxtype(0, 1) * amp[0]; jamp[23] += +cxtype(0, 1) * amp[0]; VVV1_0(w[0], w[19], w[7], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[1] += +cxtype(0, 1) * amp[0]; jamp[2] += -cxtype(0, 1) * amp[0]; jamp[3] += +cxtype(0, 1) * amp[0]; jamp[4] += -cxtype(0, 1) * amp[0]; jamp[11] += -cxtype(0, 1) * amp[0]; jamp[15] += +cxtype(0, 1) * amp[0]; jamp[17] += -cxtype(0, 1) * amp[0]; jamp[21] += +cxtype(0, 1) * amp[0]; VVV1_0(w[0], w[8], w[7], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[0] += +cxtype(0, 1) * amp[0]; jamp[2] += -cxtype(0, 1) * amp[0]; jamp[4] += -cxtype(0, 1) * amp[0]; jamp[5] += +cxtype(0, 1) * amp[0]; jamp[9] += -cxtype(0, 1) * amp[0]; jamp[15] += +cxtype(0, 1) * amp[0]; jamp[21] += +cxtype(0, 1) * amp[0]; jamp[23] += -cxtype(0, 1) * amp[0]; // double CPPProcess::matrix_1_gg_ttxgg() { // Local variables // The color matrix; static const fptype denom[ncolor] = {54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54}; static const fptype cf[ncolor][ncolor] = {{512, -64, -64, 8, 8, 80, -64, 8, 8, -1, -1, -10, 8, -1, 80, -10, 71, 62, -1, -10, -10, 62, 62, -28}, {-64, 512, 8, 80, -64, 8, 8, -64, -1, -10, 8, -1, -1, -10, -10, 62, 62, -28, 8, -1, 80, -10, 71, 62}, {-64, 8, 512, -64, 80, 8, 8, -1, 80, -10, 71, 62, -64, 8, 8, -1, -1, -10, -10, -1, 62, -28, -10, 62}, {8, 80, -64, 512, 8, -64, -1, -10, -10, 62, 62, -28, 8, -64, -1, -10, 8, -1, -1, 8, 71, 62, 80, -10}, {8, -64, 80, 8, 512, -64, -1, 8, 71, 62, 80, -10, -10, -1, 62, -28, -10, 62, -64, 8, 8, -1, -1, -10}, {80, 8, 8, -64, -64, 512, -10, -1, 62, -28, -10, 62, -1, 8, 71, 62, 80, -10, 8, -64, -1, -10, 8, -1}, {-64, 8, 8, -1, -1, -10, 512, -64, -64, 8, 8, 80, 80, -10, 8, -1, 62, 71, -10, 62, -1, -10, -28, 62}, {8, -64, -1, -10, 8, -1, -64, 512, 8, 80, -64, 8, -10, 62, -1, -10, -28, 62, 80, -10, 8, -1, 62, 71}, {8, -1, 80, -10, 71, 62, -64, 8, 512, -64, 80, 8, 8, -1, -64, 8, -10, -1, 62, -28, -10, -1, 62, -10}, {-1, -10, -10, 62, 62, -28, 8, 80, -64, 512, 8, -64, -1, -10, 8, -64, -1, 8, 71, 62, -1, 8, -10, 80}, {-1, 8, 71, 62, 80, -10, 8, -64, 80, 8, 512, -64, 62, -28, -10, -1, 62, -10, 8, -1, -64, 8, -10, -1}, {-10, -1, 62, -28, -10, 62, 80, 8, 8, -64, -64, 512, 71, 62, -1, 8, -10, 80, -1, -10, 8, -64, -1, 8}, {8, -1, -64, 8, -10, -1, 80, -10, 8, -1, 62, 71, 512, -64, -64, 8, 8, 80, 62, -10, -28, 62, -1, -10}, {-1, -10, 8, -64, -1, 8, -10, 62, -1, -10, -28, 62, -64, 512, 8, 80, -64, 8, -10, 80, 62, 71, 8, -1}, {80, -10, 8, -1, 62, 71, 8, -1, -64, 8, -10, -1, -64, 8, 512, -64, 80, 8, -28, 62, 62, -10, -10, -1}, {-10, 62, -1, -10, -28, 62, -1, -10, 8, -64, -1, 8, 8, 80, -64, 512, 8, -64, 62, 71, -10, 80, -1, 8}, {71, 62, -1, 8, -10, 80, 62, -28, -10, -1, 62, -10, 8, -64, 80, 8, 512, -64, -1, 8, -10, -1, -64, 8}, {62, -28, -10, -1, 62, -10, 71, 62, -1, 8, -10, 80, 80, 8, 8, -64, -64, 512, -10, -1, -1, 8, 8, -64}, {-1, 8, -10, -1, -64, 8, -10, 80, 62, 71, 8, -1, 62, -10, -28, 62, -1, -10, 512, -64, -64, 8, 8, 80}, {-10, -1, -1, 8, 8, -64, 62, -10, -28, 62, -1, -10, -10, 80, 62, 71, 8, -1, -64, 512, 8, 80, -64, 8}, {-10, 80, 62, 71, 8, -1, -1, 8, -10, -1, -64, 8, -28, 62, 62, -10, -10, -1, -64, 8, 512, -64, 80, 8}, {62, -10, -28, 62, -1, -10, -10, -1, -1, 8, 8, -64, 62, 71, -10, 80, -1, 8, 8, 80, -64, 512, 8, -64}, {62, 71, -10, 80, -1, 8, -28, 62, 62, -10, -10, -1, -1, 8, -10, -1, -64, 8, 8, -64, 80, 8, 512, -64}, {-28, 62, 62, -10, -10, -1, 62, 71, -10, 80, -1, 8, -10, -1, -1, 8, 8, -64, 80, 8, 8, -64, -64, 512}}; // Sum and square the color flows to get the matrix element for(int icol = 0; icol < ncolor; icol++ ) { cxtype ztemp = cxmake(0, 0); for(int jcol = 0; jcol < ncolor; jcol++ ) ztemp = ztemp + cf[icol][jcol] * jamp[jcol]; meHelSum = meHelSum + cxreal(ztemp * conj(jamp[icol]))/denom[icol]; } // Store the leading color flows for choice of color // for(i=0;i < ncolor; i++) // jamp2[0][i] += real(jamp[i]*conj(jamp[i])); mgDebug(1, __FUNCTION__); return; } CPPProcess::CPPProcess(int numiterations, int gpublocks, int gputhreads, bool verbose, bool debug) : m_numiterations(numiterations), gpu_nblocks(gpublocks), gpu_nthreads(gputhreads), m_verbose(verbose), dim(gpu_nblocks * gpu_nthreads) { // Helicities for the process - nodim static const int tHel[ncomb][nexternal] = {{-1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, 1}, {-1, -1, -1, -1, 1, -1}, {-1, -1, -1, -1, 1, 1}, {-1, -1, -1, 1, -1, -1}, {-1, -1, -1, 1, -1, 1}, {-1, -1, -1, 1, 1, -1}, {-1, -1, -1, 1, 1, 1}, {-1, -1, 1, -1, -1, -1}, {-1, -1, 1, -1, -1, 1}, {-1, -1, 1, -1, 1, -1}, {-1, -1, 1, -1, 1, 1}, {-1, -1, 1, 1, -1, -1}, {-1, -1, 1, 1, -1, 1}, {-1, -1, 1, 1, 1, -1}, {-1, -1, 1, 1, 1, 1}, {-1, 1, -1, -1, -1, -1}, {-1, 1, -1, -1, -1, 1}, {-1, 1, -1, -1, 1, -1}, {-1, 1, -1, -1, 1, 1}, {-1, 1, -1, 1, -1, -1}, {-1, 1, -1, 1, -1, 1}, {-1, 1, -1, 1, 1, -1}, {-1, 1, -1, 1, 1, 1}, {-1, 1, 1, -1, -1, -1}, {-1, 1, 1, -1, -1, 1}, {-1, 1, 1, -1, 1, -1}, {-1, 1, 1, -1, 1, 1}, {-1, 1, 1, 1, -1, -1}, {-1, 1, 1, 1, -1, 1}, {-1, 1, 1, 1, 1, -1}, {-1, 1, 1, 1, 1, 1}, {1, -1, -1, -1, -1, -1}, {1, -1, -1, -1, -1, 1}, {1, -1, -1, -1, 1, -1}, {1, -1, -1, -1, 1, 1}, {1, -1, -1, 1, -1, -1}, {1, -1, -1, 1, -1, 1}, {1, -1, -1, 1, 1, -1}, {1, -1, -1, 1, 1, 1}, {1, -1, 1, -1, -1, -1}, {1, -1, 1, -1, -1, 1}, {1, -1, 1, -1, 1, -1}, {1, -1, 1, -1, 1, 1}, {1, -1, 1, 1, -1, -1}, {1, -1, 1, 1, -1, 1}, {1, -1, 1, 1, 1, -1}, {1, -1, 1, 1, 1, 1}, {1, 1, -1, -1, -1, -1}, {1, 1, -1, -1, -1, 1}, {1, 1, -1, -1, 1, -1}, {1, 1, -1, -1, 1, 1}, {1, 1, -1, 1, -1, -1}, {1, 1, -1, 1, -1, 1}, {1, 1, -1, 1, 1, -1}, {1, 1, -1, 1, 1, 1}, {1, 1, 1, -1, -1, -1}, {1, 1, 1, -1, -1, 1}, {1, 1, 1, -1, 1, -1}, {1, 1, 1, -1, 1, 1}, {1, 1, 1, 1, -1, -1}, {1, 1, 1, 1, -1, 1}, {1, 1, 1, 1, 1, -1}, {1, 1, 1, 1, 1, 1}}; #ifdef __HIPCC__ checkCuda(hipMemcpyToSymbol(cHel, tHel, ncomb * nexternal * sizeof(int))); #else memcpy(cHel, tHel, ncomb * nexternal * sizeof(int)); #endif // SANITY CHECK: GPU memory usage may be based on casts of fptype[2] to cxtype assert(sizeof(cxtype) == 2 * sizeof(fptype)); } CPPProcess::~CPPProcess() {} const std::vector<fptype> &CPPProcess::getMasses() const {return mME;} //-------------------------------------------------------------------------- // Initialize process. void CPPProcess::initProc(string param_card_name) { // Instantiate the model class and set parameters that stay fixed during run pars = Parameters_sm::getInstance(); SLHAReader slha(param_card_name, m_verbose); pars->setIndependentParameters(slha); pars->setIndependentCouplings(); if (m_verbose) { pars->printIndependentParameters(); pars->printIndependentCouplings(); } pars->setDependentParameters(); pars->setDependentCouplings(); // Set external particle masses for this matrix element mME.push_back(pars->ZERO); mME.push_back(pars->ZERO); mME.push_back(pars->mdl_MT); mME.push_back(pars->mdl_MT); mME.push_back(pars->ZERO); mME.push_back(pars->ZERO); static cxtype tIPC[3] = {cxmake(pars->GC_10), cxmake(pars->GC_11), cxmake(pars->GC_12)}; static fptype tIPD[2] = {(fptype)pars->mdl_MT, (fptype)pars->mdl_WT}; #ifdef __HIPCC__ checkCuda(hipMemcpyToSymbol(cIPC, tIPC, 3 * sizeof(cxtype))); checkCuda(hipMemcpyToSymbol(cIPD, tIPD, 2 * sizeof(fptype))); #else memcpy(cIPC, tIPC, 3 * sizeof(cxtype)); memcpy(cIPD, tIPD, 2 * sizeof(fptype)); #endif } //-------------------------------------------------------------------------- #ifdef __HIPCC__ __global__ void sigmaKin_getGoodHel(const fptype * allmomenta, // input: momenta as AOSOA[npagM][npar][4][neppM] with nevt=npagM*neppM bool * isGoodHel) // output: isGoodHel[ncomb] - device array { const int nprocesses = 1; // FIXME: assume process.nprocesses == 1 fptype meHelSum[nprocesses] = {0}; // all zeros fptype meHelSumLast = 0; for (int ihel = 0; ihel < ncomb; ihel++ ) { // NB: calculate_wavefunctions ADDS |M|^2 for a given ihel to the running // sum of |M|^2 over helicities for the given event calculate_wavefunctions(ihel, allmomenta, meHelSum[0]); if (meHelSum[0] != meHelSumLast) { isGoodHel[ihel] = true; meHelSumLast = meHelSum[0]; } } } #endif //-------------------------------------------------------------------------- #ifdef __HIPCC__ void sigmaKin_setGoodHel(const bool * isGoodHel) // input: isGoodHel[ncomb] - host array { int nGoodHel[1] = {0}; int goodHel[ncomb] = {0}; for (int ihel = 0; ihel < ncomb; ihel++ ) { // std::cout << "sigmaKin_setGoodHel ihel=" << ihel << ( isGoodHel[ihel] ? // " true" : " false" ) << std::endl; if (isGoodHel[ihel]) { goodHel[nGoodHel[0]] = ihel; nGoodHel[0]++; } } checkCuda(hipMemcpyToSymbol(cNGoodHel, nGoodHel, sizeof(int))); checkCuda(hipMemcpyToSymbol(cGoodHel, goodHel, ncomb * sizeof(int))); } #endif //-------------------------------------------------------------------------- // Evaluate |M|^2, part independent of incoming flavour. __global__ void sigmaKin(const fptype * allmomenta, fptype * allMEs #ifndef __HIPCC__ , const int nevt // input: #events (for cuda: nevt == ndim == gpublocks*gputhreads) #endif ) { // Set the parameters which change event by event // Need to discuss this with Stefan // pars->setDependentParameters(); // pars->setDependentCouplings(); #ifndef __HIPCC__ const int maxtry = 10; static unsigned long long sigmakin_itry = 0; // first iteration over nevt events static bool sigmakin_goodhel[ncomb] = {false}; #endif // Reset color flows // start sigmakin_lines mgDebugInitialise(); // Set the parameters which change event by event // Need to discuss this with Stefan // pars->setDependentParameters(); // pars->setDependentCouplings(); // Reset color flows #ifndef __HIPCC__ //** START LOOP ON IEVT ** for (int ievt = 0; ievt < nevt; ++ ievt) #endif { #ifdef __HIPCC__ const int idim = blockDim.x * blockIdx.x + threadIdx.x; // event# == threadid (previously was: tid) const int ievt = idim; // printf( "sigmakin: ievt %d\n", ievt ); #endif // Denominators: spins, colors and identical particles const int nprocesses = 1; // FIXME: assume process.nprocesses == 1 const int denominators[1] = {512}; // Reset the "matrix elements" - running sums of |M|^2 over helicities for // the given event fptype meHelSum[nprocesses] = {0}; // all zeros #ifdef __HIPCC__ // CUDA - using precomputed good helicities for (int ighel = 0; ighel < cNGoodHel[0]; ighel++ ) { const int ihel = cGoodHel[ighel]; calculate_wavefunctions(ihel, allmomenta, meHelSum[0]); } #else // C++ - compute good helicities within this loop fptype meHelSumLast = 0; // check for good helicities for (int ihel = 0; ihel < ncomb; ihel++ ) { if (sigmakin_itry > maxtry && !sigmakin_goodhel[ihel]) continue; // NB: calculate_wavefunctions ADDS |M|^2 for a given ihel to the running // sum of |M|^2 over helicities for the given event calculate_wavefunctions(ihel, allmomenta, meHelSum[0], ievt); if (sigmakin_itry <= maxtry) { if ( !sigmakin_goodhel[ihel] && meHelSum[0] > meHelSumLast) sigmakin_goodhel[ihel] = true; meHelSumLast = meHelSum[0]; } } #endif // Get the final |M|^2 as an average over helicities/colors of the running // sum of |M|^2 over helicities for the given event // [NB 'sum over final spins, average over initial spins', eg see // https://www.uzh.ch/cmsssl/physik/dam/jcr:2e24b7b1-f4d7-4160-817e-47b13dbf // 1d7c/Handout_4_2016-UZH.pdf] for (int iproc = 0; iproc < nprocesses; ++ iproc) { meHelSum[iproc] /= denominators[iproc]; } // Set the final average |M|^2 for this event in the output array for all // events for (int iproc = 0; iproc < nprocesses; ++ iproc) { allMEs[iproc * nprocesses + ievt] = meHelSum[iproc]; } #ifndef __HIPCC__ if (sigmakin_itry <= maxtry) sigmakin_itry++; // if ( sigmakin_itry == maxtry ) // for (int ihel = 0; ihel < ncomb; ihel++ ) // printf( "sigmakin: ihelgood %2d %d\n", ihel, sigmakin_goodhel[ihel] ); #endif //** END LOOP ON IEVT ** mgDebugFinalise(); } //-------------------------------------------------------------------------- } } //========================================================================== // Private class member functions //--------------------------------------------------------------------------
1682bf98115f42c6aea9a5868d3c110d094254c2.cu
//========================================================================== // This file has been automatically generated for C++ Standalone by // MadGraph5_aMC@NLO v. 2.8.2, 2020-10-30 // By the MadGraph5_aMC@NLO Development Team // Visit launchpad.net/madgraph5 and amcatnlo.web.cern.ch //========================================================================== #include "../../src/HelAmps_sm.cu" #include <algorithm> #include <iostream> #include "mgOnGpuTypes.h" #include "mgOnGpuConfig.h" #include "gCPPProcess.h" //========================================================================== // Class member functions for calculating the matrix elements for // Process: g g > t t~ g g WEIGHTED<=4 @1 #ifdef __CUDACC__ namespace gProc #else namespace Proc #endif { using mgOnGpu::np4; // 4: the dimension of 4-momenta (E,px,py,pz) using mgOnGpu::npar; // number of particles in total (initial + final) using mgOnGpu::ncomb; // number of helicity combinations #ifdef __CUDACC__ __device__ __constant__ int cHel[ncomb][npar]; __device__ __constant__ fptype cIPC[6]; __device__ __constant__ fptype cIPD[2]; __device__ __constant__ int cNGoodHel[1]; __device__ __constant__ int cGoodHel[ncomb]; #else static int cHel[ncomb][npar]; static fptype cIPC[6]; static fptype cIPD[2]; #endif //-------------------------------------------------------------------------- using mgOnGpu::nwf; using mgOnGpu::nw6; //-------------------------------------------------------------------------- // Evaluate |M|^2 for each subprocess // NB: calculate_wavefunctions ADDS |M|^2 for a given ihel to the running sum // of |M|^2 over helicities for the given event __device__ void calculate_wavefunctions(int ihel, const fptype * allmomenta, fptype &meHelSum #ifndef __CUDACC__ , const int ievt #endif ) { using namespace MG5_sm; mgDebug(0, __FUNCTION__); cxtype amp[1]; // was 159 const int ncolor = 24; cxtype jamp[ncolor]; // Calculate wavefunctions for all processes using namespace MG5_sm; cxtype w[nwf][nw6]; for(int i = 0; i < 24; i++ ) { jamp[i] = cxtype(0., 0.); } #ifdef __CUDACC__ vxxxxx(allmomenta, 0., cHel[ihel][0], -1, w[0], 0); #else vxxxxx(allmomenta, 0., cHel[ihel][0], -1, w[0], ievt, 0); #endif #ifdef __CUDACC__ vxxxxx(allmomenta, 0., cHel[ihel][1], -1, w[1], 1); #else vxxxxx(allmomenta, 0., cHel[ihel][1], -1, w[1], ievt, 1); #endif #ifdef __CUDACC__ oxxxxx(allmomenta, cIPD[0], cHel[ihel][2], +1, w[2], 2); #else oxxxxx(allmomenta, cIPD[0], cHel[ihel][2], +1, w[2], ievt, 2); #endif #ifdef __CUDACC__ ixxxxx(allmomenta, cIPD[0], cHel[ihel][3], -1, w[3], 3); #else ixxxxx(allmomenta, cIPD[0], cHel[ihel][3], -1, w[3], ievt, 3); #endif #ifdef __CUDACC__ vxxxxx(allmomenta, 0., cHel[ihel][4], +1, w[4], 4); #else vxxxxx(allmomenta, 0., cHel[ihel][4], +1, w[4], ievt, 4); #endif #ifdef __CUDACC__ vxxxxx(allmomenta, 0., cHel[ihel][5], +1, w[5], 5); #else vxxxxx(allmomenta, 0., cHel[ihel][5], +1, w[5], ievt, 5); #endif VVV1P0_1(w[0], w[1], cxtype(cIPC[0], cIPC[1]), 0., 0., w[6]); FFV1P0_3(w[3], w[2], cxtype(cIPC[2], cIPC[3]), 0., 0., w[7]); // Amplitude(s) for diagram number 1 VVVV1_0(w[6], w[7], w[4], w[5], cxtype(cIPC[4], cIPC[5]), &amp[0]); jamp[0] += +cxtype(0, 1) * amp[0]; jamp[1] += -cxtype(0, 1) * amp[0]; jamp[6] += -cxtype(0, 1) * amp[0]; jamp[7] += +cxtype(0, 1) * amp[0]; jamp[16] += -cxtype(0, 1) * amp[0]; jamp[17] += +cxtype(0, 1) * amp[0]; jamp[22] += +cxtype(0, 1) * amp[0]; jamp[23] += -cxtype(0, 1) * amp[0]; VVVV3_0(w[6], w[7], w[4], w[5], cxtype(cIPC[4], cIPC[5]), &amp[0]); jamp[0] += +cxtype(0, 1) * amp[0]; jamp[6] += -cxtype(0, 1) * amp[0]; jamp[12] += -cxtype(0, 1) * amp[0]; jamp[14] += +cxtype(0, 1) * amp[0]; jamp[18] += -cxtype(0, 1) * amp[0]; jamp[20] += +cxtype(0, 1) * amp[0]; jamp[22] += +cxtype(0, 1) * amp[0]; jamp[23] += -cxtype(0, 1) * amp[0]; VVVV4_0(w[6], w[7], w[4], w[5], cxtype(cIPC[4], cIPC[5]), &amp[0]); jamp[1] += +cxtype(0, 1) * amp[0]; jamp[7] += -cxtype(0, 1) * amp[0]; jamp[12] += -cxtype(0, 1) * amp[0]; jamp[14] += +cxtype(0, 1) * amp[0]; jamp[16] += +cxtype(0, 1) * amp[0]; jamp[17] += -cxtype(0, 1) * amp[0]; jamp[18] += -cxtype(0, 1) * amp[0]; jamp[20] += +cxtype(0, 1) * amp[0]; VVV1P0_1(w[6], w[4], cxtype(cIPC[0], cIPC[1]), 0., 0., w[8]); // Amplitude(s) for diagram number 2 VVV1_0(w[7], w[5], w[8], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[0] += +cxtype(0, 1) * amp[0]; jamp[6] += -cxtype(0, 1) * amp[0]; jamp[12] += -cxtype(0, 1) * amp[0]; jamp[14] += +cxtype(0, 1) * amp[0]; jamp[18] += -cxtype(0, 1) * amp[0]; jamp[20] += +cxtype(0, 1) * amp[0]; jamp[22] += +cxtype(0, 1) * amp[0]; jamp[23] += -cxtype(0, 1) * amp[0]; VVV1P0_1(w[6], w[5], cxtype(cIPC[0], cIPC[1]), 0., 0., w[9]); // Amplitude(s) for diagram number 3 VVV1_0(w[7], w[4], w[9], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[1] += +cxtype(0, 1) * amp[0]; jamp[7] += -cxtype(0, 1) * amp[0]; jamp[12] += -cxtype(0, 1) * amp[0]; jamp[14] += +cxtype(0, 1) * amp[0]; jamp[16] += +cxtype(0, 1) * amp[0]; jamp[17] += -cxtype(0, 1) * amp[0]; jamp[18] += -cxtype(0, 1) * amp[0]; jamp[20] += +cxtype(0, 1) * amp[0]; VVV1P0_1(w[4], w[5], cxtype(cIPC[0], cIPC[1]), 0., 0., w[10]); // Amplitude(s) for diagram number 4 VVV1_0(w[6], w[7], w[10], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[0] += +cxtype(0, 1) * amp[0]; jamp[1] += -cxtype(0, 1) * amp[0]; jamp[6] += -cxtype(0, 1) * amp[0]; jamp[7] += +cxtype(0, 1) * amp[0]; jamp[16] += -cxtype(0, 1) * amp[0]; jamp[17] += +cxtype(0, 1) * amp[0]; jamp[22] += +cxtype(0, 1) * amp[0]; jamp[23] += -cxtype(0, 1) * amp[0]; FFV1_1(w[2], w[4], cxtype(cIPC[2], cIPC[3]), cIPD[0], cIPD[1], w[11]); FFV1_2(w[3], w[6], cxtype(cIPC[2], cIPC[3]), cIPD[0], cIPD[1], w[12]); // Amplitude(s) for diagram number 5 FFV1_0(w[12], w[11], w[5], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[16] += +cxtype(0, 1) * amp[0]; jamp[17] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 6 FFV1_0(w[3], w[11], w[9], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[12] += +amp[0]; jamp[14] += -amp[0]; jamp[16] += -amp[0]; jamp[17] += +amp[0]; FFV1_2(w[3], w[5], cxtype(cIPC[2], cIPC[3]), cIPD[0], cIPD[1], w[13]); // Amplitude(s) for diagram number 7 FFV1_0(w[13], w[11], w[6], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[12] += +cxtype(0, 1) * amp[0]; jamp[14] += -cxtype(0, 1) * amp[0]; FFV1_1(w[2], w[5], cxtype(cIPC[2], cIPC[3]), cIPD[0], cIPD[1], w[14]); // Amplitude(s) for diagram number 8 FFV1_0(w[12], w[14], w[4], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[22] += +cxtype(0, 1) * amp[0]; jamp[23] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 9 FFV1_0(w[3], w[14], w[8], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[18] += +amp[0]; jamp[20] += -amp[0]; jamp[22] += -amp[0]; jamp[23] += +amp[0]; FFV1_2(w[3], w[4], cxtype(cIPC[2], cIPC[3]), cIPD[0], cIPD[1], w[15]); // Amplitude(s) for diagram number 10 FFV1_0(w[15], w[14], w[6], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[18] += +cxtype(0, 1) * amp[0]; jamp[20] += -cxtype(0, 1) * amp[0]; FFV1_1(w[2], w[6], cxtype(cIPC[2], cIPC[3]), cIPD[0], cIPD[1], w[16]); // Amplitude(s) for diagram number 11 FFV1_0(w[15], w[16], w[5], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[1] += +cxtype(0, 1) * amp[0]; jamp[7] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 12 FFV1_0(w[15], w[2], w[9], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[1] += +amp[0]; jamp[7] += -amp[0]; jamp[18] += -amp[0]; jamp[20] += +amp[0]; // Amplitude(s) for diagram number 13 FFV1_0(w[13], w[16], w[4], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[0] += +cxtype(0, 1) * amp[0]; jamp[6] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 14 FFV1_0(w[13], w[2], w[8], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[0] += +amp[0]; jamp[6] += -amp[0]; jamp[12] += -amp[0]; jamp[14] += +amp[0]; // Amplitude(s) for diagram number 15 FFV1_0(w[3], w[16], w[10], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[0] += +amp[0]; jamp[1] += -amp[0]; jamp[6] += -amp[0]; jamp[7] += +amp[0]; // Amplitude(s) for diagram number 16 FFV1_0(w[12], w[2], w[10], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[16] += +amp[0]; jamp[17] += -amp[0]; jamp[22] += -amp[0]; jamp[23] += +amp[0]; FFV1_1(w[2], w[0], cxtype(cIPC[2], cIPC[3]), cIPD[0], cIPD[1], w[12]); FFV1_2(w[3], w[1], cxtype(cIPC[2], cIPC[3]), cIPD[0], cIPD[1], w[16]); FFV1_1(w[12], w[4], cxtype(cIPC[2], cIPC[3]), cIPD[0], cIPD[1], w[8]); // Amplitude(s) for diagram number 17 FFV1_0(w[16], w[8], w[5], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[3] += -amp[0]; FFV1_1(w[12], w[5], cxtype(cIPC[2], cIPC[3]), cIPD[0], cIPD[1], w[9]); // Amplitude(s) for diagram number 18 FFV1_0(w[16], w[9], w[4], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[5] += -amp[0]; // Amplitude(s) for diagram number 19 FFV1_0(w[16], w[12], w[10], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[3] += +cxtype(0, 1) * amp[0]; jamp[5] += -cxtype(0, 1) * amp[0]; VVV1P0_1(w[1], w[4], cxtype(cIPC[0], cIPC[1]), 0., 0., w[6]); FFV1P0_3(w[3], w[12], cxtype(cIPC[2], cIPC[3]), 0., 0., w[17]); // Amplitude(s) for diagram number 20 VVV1_0(w[6], w[5], w[17], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[0] += +amp[0]; jamp[2] += -amp[0]; jamp[4] += -amp[0]; jamp[5] += +amp[0]; // Amplitude(s) for diagram number 21 FFV1_0(w[3], w[9], w[6], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[4] += +cxtype(0, 1) * amp[0]; jamp[5] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 22 FFV1_0(w[13], w[12], w[6], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[0] += +cxtype(0, 1) * amp[0]; jamp[2] += -cxtype(0, 1) * amp[0]; VVV1P0_1(w[1], w[5], cxtype(cIPC[0], cIPC[1]), 0., 0., w[18]); // Amplitude(s) for diagram number 23 VVV1_0(w[18], w[4], w[17], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[1] += +amp[0]; jamp[2] += -amp[0]; jamp[3] += +amp[0]; jamp[4] += -amp[0]; // Amplitude(s) for diagram number 24 FFV1_0(w[3], w[8], w[18], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[2] += +cxtype(0, 1) * amp[0]; jamp[3] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 25 FFV1_0(w[15], w[12], w[18], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[1] += +cxtype(0, 1) * amp[0]; jamp[4] += -cxtype(0, 1) * amp[0]; FFV1_1(w[12], w[1], cxtype(cIPC[2], cIPC[3]), cIPD[0], cIPD[1], w[19]); // Amplitude(s) for diagram number 26 FFV1_0(w[15], w[19], w[5], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[1] += -amp[0]; // Amplitude(s) for diagram number 27 FFV1_0(w[15], w[9], w[1], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[4] += -amp[0]; // Amplitude(s) for diagram number 28 FFV1_0(w[13], w[19], w[4], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[0] += -amp[0]; // Amplitude(s) for diagram number 29 FFV1_0(w[13], w[8], w[1], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[2] += -amp[0]; // Amplitude(s) for diagram number 30 FFV1_0(w[3], w[19], w[10], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[0] += +cxtype(0, 1) * amp[0]; jamp[1] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 31 VVV1_0(w[1], w[10], w[17], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[0] += +amp[0]; jamp[1] += -amp[0]; jamp[3] += -amp[0]; jamp[5] += +amp[0]; VVVV1P0_1(w[1], w[4], w[5], cxtype(cIPC[4], cIPC[5]), 0., 0., w[17]); VVVV3P0_1(w[1], w[4], w[5], cxtype(cIPC[4], cIPC[5]), 0., 0., w[19]); VVVV4P0_1(w[1], w[4], w[5], cxtype(cIPC[4], cIPC[5]), 0., 0., w[8]); // Amplitude(s) for diagram number 32 FFV1_0(w[3], w[12], w[17], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[0] += +amp[0]; jamp[1] += -amp[0]; jamp[3] += -amp[0]; jamp[5] += +amp[0]; FFV1_0(w[3], w[12], w[19], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[1] += -amp[0]; jamp[2] += +amp[0]; jamp[3] += -amp[0]; jamp[4] += +amp[0]; FFV1_0(w[3], w[12], w[8], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[0] += -amp[0]; jamp[2] += +amp[0]; jamp[4] += +amp[0]; jamp[5] += -amp[0]; FFV1_2(w[3], w[0], cxtype(cIPC[2], cIPC[3]), cIPD[0], cIPD[1], w[12]); FFV1_1(w[2], w[1], cxtype(cIPC[2], cIPC[3]), cIPD[0], cIPD[1], w[9]); FFV1_2(w[12], w[4], cxtype(cIPC[2], cIPC[3]), cIPD[0], cIPD[1], w[20]); // Amplitude(s) for diagram number 33 FFV1_0(w[20], w[9], w[5], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[11] += -amp[0]; FFV1_2(w[12], w[5], cxtype(cIPC[2], cIPC[3]), cIPD[0], cIPD[1], w[21]); // Amplitude(s) for diagram number 34 FFV1_0(w[21], w[9], w[4], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[9] += -amp[0]; // Amplitude(s) for diagram number 35 FFV1_0(w[12], w[9], w[10], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[9] += +cxtype(0, 1) * amp[0]; jamp[11] += -cxtype(0, 1) * amp[0]; FFV1P0_3(w[12], w[2], cxtype(cIPC[2], cIPC[3]), 0., 0., w[22]); // Amplitude(s) for diagram number 36 VVV1_0(w[6], w[5], w[22], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[9] += +amp[0]; jamp[15] += -amp[0]; jamp[21] += -amp[0]; jamp[23] += +amp[0]; // Amplitude(s) for diagram number 37 FFV1_0(w[21], w[2], w[6], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[9] += +cxtype(0, 1) * amp[0]; jamp[15] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 38 FFV1_0(w[12], w[14], w[6], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[21] += +cxtype(0, 1) * amp[0]; jamp[23] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 39 VVV1_0(w[18], w[4], w[22], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[11] += +amp[0]; jamp[15] += -amp[0]; jamp[17] += +amp[0]; jamp[21] += -amp[0]; // Amplitude(s) for diagram number 40 FFV1_0(w[20], w[2], w[18], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[11] += +cxtype(0, 1) * amp[0]; jamp[21] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 41 FFV1_0(w[12], w[11], w[18], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[15] += +cxtype(0, 1) * amp[0]; jamp[17] += -cxtype(0, 1) * amp[0]; FFV1_2(w[12], w[1], cxtype(cIPC[2], cIPC[3]), cIPD[0], cIPD[1], w[23]); // Amplitude(s) for diagram number 42 FFV1_0(w[23], w[11], w[5], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[17] += -amp[0]; // Amplitude(s) for diagram number 43 FFV1_0(w[21], w[11], w[1], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[15] += -amp[0]; // Amplitude(s) for diagram number 44 FFV1_0(w[23], w[14], w[4], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[23] += -amp[0]; // Amplitude(s) for diagram number 45 FFV1_0(w[20], w[14], w[1], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[21] += -amp[0]; // Amplitude(s) for diagram number 46 FFV1_0(w[23], w[2], w[10], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[17] += +cxtype(0, 1) * amp[0]; jamp[23] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 47 VVV1_0(w[1], w[10], w[22], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[9] += +amp[0]; jamp[11] += -amp[0]; jamp[17] += -amp[0]; jamp[23] += +amp[0]; // Amplitude(s) for diagram number 48 FFV1_0(w[12], w[2], w[17], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[9] += +amp[0]; jamp[11] += -amp[0]; jamp[17] += -amp[0]; jamp[23] += +amp[0]; FFV1_0(w[12], w[2], w[19], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[11] += -amp[0]; jamp[15] += +amp[0]; jamp[17] += -amp[0]; jamp[21] += +amp[0]; FFV1_0(w[12], w[2], w[8], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[9] += -amp[0]; jamp[15] += +amp[0]; jamp[21] += +amp[0]; jamp[23] += -amp[0]; VVV1P0_1(w[0], w[4], cxtype(cIPC[0], cIPC[1]), 0., 0., w[12]); FFV1_2(w[3], w[12], cxtype(cIPC[2], cIPC[3]), cIPD[0], cIPD[1], w[22]); // Amplitude(s) for diagram number 49 FFV1_0(w[22], w[9], w[5], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[10] += +cxtype(0, 1) * amp[0]; jamp[11] += -cxtype(0, 1) * amp[0]; VVV1P0_1(w[12], w[5], cxtype(cIPC[0], cIPC[1]), 0., 0., w[23]); // Amplitude(s) for diagram number 50 FFV1_0(w[3], w[9], w[23], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[6] += +amp[0]; jamp[8] += -amp[0]; jamp[10] += -amp[0]; jamp[11] += +amp[0]; // Amplitude(s) for diagram number 51 FFV1_0(w[13], w[9], w[12], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[6] += +cxtype(0, 1) * amp[0]; jamp[8] += -cxtype(0, 1) * amp[0]; FFV1_1(w[2], w[12], cxtype(cIPC[2], cIPC[3]), cIPD[0], cIPD[1], w[20]); // Amplitude(s) for diagram number 52 FFV1_0(w[16], w[20], w[5], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[3] += +cxtype(0, 1) * amp[0]; jamp[13] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 53 FFV1_0(w[16], w[2], w[23], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[3] += +amp[0]; jamp[13] += -amp[0]; jamp[19] += -amp[0]; jamp[22] += +amp[0]; // Amplitude(s) for diagram number 54 FFV1_0(w[16], w[14], w[12], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[19] += +cxtype(0, 1) * amp[0]; jamp[22] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 55 FFV1_0(w[3], w[20], w[18], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[2] += +amp[0]; jamp[3] += -amp[0]; jamp[12] += -amp[0]; jamp[13] += +amp[0]; // Amplitude(s) for diagram number 56 FFV1_0(w[22], w[2], w[18], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[10] += +amp[0]; jamp[11] += -amp[0]; jamp[20] += -amp[0]; jamp[21] += +amp[0]; // Amplitude(s) for diagram number 57 VVV1_0(w[12], w[18], w[7], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[2] += -cxtype(0, 1) * amp[0]; jamp[3] += +cxtype(0, 1) * amp[0]; jamp[10] += +cxtype(0, 1) * amp[0]; jamp[11] += -cxtype(0, 1) * amp[0]; jamp[12] += +cxtype(0, 1) * amp[0]; jamp[13] += -cxtype(0, 1) * amp[0]; jamp[20] += -cxtype(0, 1) * amp[0]; jamp[21] += +cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 58 VVVV1_0(w[12], w[1], w[7], w[5], cxtype(cIPC[4], cIPC[5]), &amp[0]); jamp[2] += +cxtype(0, 1) * amp[0]; jamp[6] += -cxtype(0, 1) * amp[0]; jamp[8] += +cxtype(0, 1) * amp[0]; jamp[12] += -cxtype(0, 1) * amp[0]; jamp[19] += -cxtype(0, 1) * amp[0]; jamp[20] += +cxtype(0, 1) * amp[0]; jamp[21] += -cxtype(0, 1) * amp[0]; jamp[22] += +cxtype(0, 1) * amp[0]; VVVV3_0(w[12], w[1], w[7], w[5], cxtype(cIPC[4], cIPC[5]), &amp[0]); jamp[2] += +cxtype(0, 1) * amp[0]; jamp[3] += -cxtype(0, 1) * amp[0]; jamp[10] += -cxtype(0, 1) * amp[0]; jamp[11] += +cxtype(0, 1) * amp[0]; jamp[12] += -cxtype(0, 1) * amp[0]; jamp[13] += +cxtype(0, 1) * amp[0]; jamp[20] += +cxtype(0, 1) * amp[0]; jamp[21] += -cxtype(0, 1) * amp[0]; VVVV4_0(w[12], w[1], w[7], w[5], cxtype(cIPC[4], cIPC[5]), &amp[0]); jamp[3] += -cxtype(0, 1) * amp[0]; jamp[6] += +cxtype(0, 1) * amp[0]; jamp[8] += -cxtype(0, 1) * amp[0]; jamp[10] += -cxtype(0, 1) * amp[0]; jamp[11] += +cxtype(0, 1) * amp[0]; jamp[13] += +cxtype(0, 1) * amp[0]; jamp[19] += +cxtype(0, 1) * amp[0]; jamp[22] += -cxtype(0, 1) * amp[0]; VVV1P0_1(w[12], w[1], cxtype(cIPC[0], cIPC[1]), 0., 0., w[21]); // Amplitude(s) for diagram number 59 VVV1_0(w[7], w[5], w[21], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[2] += +cxtype(0, 1) * amp[0]; jamp[6] += -cxtype(0, 1) * amp[0]; jamp[8] += +cxtype(0, 1) * amp[0]; jamp[12] += -cxtype(0, 1) * amp[0]; jamp[19] += -cxtype(0, 1) * amp[0]; jamp[20] += +cxtype(0, 1) * amp[0]; jamp[21] += -cxtype(0, 1) * amp[0]; jamp[22] += +cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 60 VVV1_0(w[1], w[7], w[23], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[3] += -cxtype(0, 1) * amp[0]; jamp[6] += +cxtype(0, 1) * amp[0]; jamp[8] += -cxtype(0, 1) * amp[0]; jamp[10] += -cxtype(0, 1) * amp[0]; jamp[11] += +cxtype(0, 1) * amp[0]; jamp[13] += +cxtype(0, 1) * amp[0]; jamp[19] += +cxtype(0, 1) * amp[0]; jamp[22] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 61 FFV1_0(w[3], w[14], w[21], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[19] += +amp[0]; jamp[20] += -amp[0]; jamp[21] += +amp[0]; jamp[22] += -amp[0]; // Amplitude(s) for diagram number 62 FFV1_0(w[22], w[14], w[1], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[20] += +cxtype(0, 1) * amp[0]; jamp[21] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 63 FFV1_0(w[13], w[2], w[21], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[2] += +amp[0]; jamp[6] += -amp[0]; jamp[8] += +amp[0]; jamp[12] += -amp[0]; // Amplitude(s) for diagram number 64 FFV1_0(w[13], w[20], w[1], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[2] += +cxtype(0, 1) * amp[0]; jamp[12] += -cxtype(0, 1) * amp[0]; VVV1P0_1(w[0], w[5], cxtype(cIPC[0], cIPC[1]), 0., 0., w[20]); FFV1_2(w[3], w[20], cxtype(cIPC[2], cIPC[3]), cIPD[0], cIPD[1], w[21]); // Amplitude(s) for diagram number 65 FFV1_0(w[21], w[9], w[4], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[8] += +cxtype(0, 1) * amp[0]; jamp[9] += -cxtype(0, 1) * amp[0]; VVV1P0_1(w[20], w[4], cxtype(cIPC[0], cIPC[1]), 0., 0., w[22]); // Amplitude(s) for diagram number 66 FFV1_0(w[3], w[9], w[22], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[7] += +amp[0]; jamp[8] += -amp[0]; jamp[9] += +amp[0]; jamp[10] += -amp[0]; // Amplitude(s) for diagram number 67 FFV1_0(w[15], w[9], w[20], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[7] += +cxtype(0, 1) * amp[0]; jamp[10] += -cxtype(0, 1) * amp[0]; FFV1_1(w[2], w[20], cxtype(cIPC[2], cIPC[3]), cIPD[0], cIPD[1], w[23]); // Amplitude(s) for diagram number 68 FFV1_0(w[16], w[23], w[4], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[5] += +cxtype(0, 1) * amp[0]; jamp[19] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 69 FFV1_0(w[16], w[2], w[22], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[5] += +amp[0]; jamp[13] += -amp[0]; jamp[16] += +amp[0]; jamp[19] += -amp[0]; // Amplitude(s) for diagram number 70 FFV1_0(w[16], w[11], w[20], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[13] += +cxtype(0, 1) * amp[0]; jamp[16] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 71 FFV1_0(w[3], w[23], w[6], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[4] += +amp[0]; jamp[5] += -amp[0]; jamp[18] += -amp[0]; jamp[19] += +amp[0]; // Amplitude(s) for diagram number 72 FFV1_0(w[21], w[2], w[6], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[8] += +amp[0]; jamp[9] += -amp[0]; jamp[14] += -amp[0]; jamp[15] += +amp[0]; // Amplitude(s) for diagram number 73 VVV1_0(w[20], w[6], w[7], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[4] += -cxtype(0, 1) * amp[0]; jamp[5] += +cxtype(0, 1) * amp[0]; jamp[8] += +cxtype(0, 1) * amp[0]; jamp[9] += -cxtype(0, 1) * amp[0]; jamp[14] += -cxtype(0, 1) * amp[0]; jamp[15] += +cxtype(0, 1) * amp[0]; jamp[18] += +cxtype(0, 1) * amp[0]; jamp[19] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 74 VVVV1_0(w[20], w[1], w[7], w[4], cxtype(cIPC[4], cIPC[5]), &amp[0]); jamp[4] += +cxtype(0, 1) * amp[0]; jamp[7] += -cxtype(0, 1) * amp[0]; jamp[10] += +cxtype(0, 1) * amp[0]; jamp[13] += -cxtype(0, 1) * amp[0]; jamp[14] += +cxtype(0, 1) * amp[0]; jamp[15] += -cxtype(0, 1) * amp[0]; jamp[16] += +cxtype(0, 1) * amp[0]; jamp[18] += -cxtype(0, 1) * amp[0]; VVVV3_0(w[20], w[1], w[7], w[4], cxtype(cIPC[4], cIPC[5]), &amp[0]); jamp[4] += +cxtype(0, 1) * amp[0]; jamp[5] += -cxtype(0, 1) * amp[0]; jamp[8] += -cxtype(0, 1) * amp[0]; jamp[9] += +cxtype(0, 1) * amp[0]; jamp[14] += +cxtype(0, 1) * amp[0]; jamp[15] += -cxtype(0, 1) * amp[0]; jamp[18] += -cxtype(0, 1) * amp[0]; jamp[19] += +cxtype(0, 1) * amp[0]; VVVV4_0(w[20], w[1], w[7], w[4], cxtype(cIPC[4], cIPC[5]), &amp[0]); jamp[5] += -cxtype(0, 1) * amp[0]; jamp[7] += +cxtype(0, 1) * amp[0]; jamp[8] += -cxtype(0, 1) * amp[0]; jamp[9] += +cxtype(0, 1) * amp[0]; jamp[10] += -cxtype(0, 1) * amp[0]; jamp[13] += +cxtype(0, 1) * amp[0]; jamp[16] += -cxtype(0, 1) * amp[0]; jamp[19] += +cxtype(0, 1) * amp[0]; VVV1P0_1(w[20], w[1], cxtype(cIPC[0], cIPC[1]), 0., 0., w[12]); // Amplitude(s) for diagram number 75 VVV1_0(w[7], w[4], w[12], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[4] += +cxtype(0, 1) * amp[0]; jamp[7] += -cxtype(0, 1) * amp[0]; jamp[10] += +cxtype(0, 1) * amp[0]; jamp[13] += -cxtype(0, 1) * amp[0]; jamp[14] += +cxtype(0, 1) * amp[0]; jamp[15] += -cxtype(0, 1) * amp[0]; jamp[16] += +cxtype(0, 1) * amp[0]; jamp[18] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 76 VVV1_0(w[1], w[7], w[22], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[5] += -cxtype(0, 1) * amp[0]; jamp[7] += +cxtype(0, 1) * amp[0]; jamp[8] += -cxtype(0, 1) * amp[0]; jamp[9] += +cxtype(0, 1) * amp[0]; jamp[10] += -cxtype(0, 1) * amp[0]; jamp[13] += +cxtype(0, 1) * amp[0]; jamp[16] += -cxtype(0, 1) * amp[0]; jamp[19] += +cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 77 FFV1_0(w[3], w[11], w[12], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[13] += +amp[0]; jamp[14] += -amp[0]; jamp[15] += +amp[0]; jamp[16] += -amp[0]; // Amplitude(s) for diagram number 78 FFV1_0(w[21], w[11], w[1], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[14] += +cxtype(0, 1) * amp[0]; jamp[15] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 79 FFV1_0(w[15], w[2], w[12], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[4] += +amp[0]; jamp[7] += -amp[0]; jamp[10] += +amp[0]; jamp[18] += -amp[0]; // Amplitude(s) for diagram number 80 FFV1_0(w[15], w[23], w[1], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[4] += +cxtype(0, 1) * amp[0]; jamp[18] += -cxtype(0, 1) * amp[0]; FFV1_1(w[9], w[0], cxtype(cIPC[2], cIPC[3]), cIPD[0], cIPD[1], w[23]); // Amplitude(s) for diagram number 81 FFV1_0(w[15], w[23], w[5], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[7] += -amp[0]; FFV1_2(w[15], w[0], cxtype(cIPC[2], cIPC[3]), cIPD[0], cIPD[1], w[12]); // Amplitude(s) for diagram number 82 FFV1_0(w[12], w[9], w[5], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[10] += -amp[0]; // Amplitude(s) for diagram number 83 FFV1_0(w[13], w[23], w[4], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[6] += -amp[0]; FFV1_2(w[13], w[0], cxtype(cIPC[2], cIPC[3]), cIPD[0], cIPD[1], w[21]); // Amplitude(s) for diagram number 84 FFV1_0(w[21], w[9], w[4], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[8] += -amp[0]; // Amplitude(s) for diagram number 85 FFV1_0(w[3], w[23], w[10], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[6] += +cxtype(0, 1) * amp[0]; jamp[7] += -cxtype(0, 1) * amp[0]; VVV1P0_1(w[0], w[10], cxtype(cIPC[0], cIPC[1]), 0., 0., w[23]); // Amplitude(s) for diagram number 86 FFV1_0(w[3], w[9], w[23], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[6] += +amp[0]; jamp[7] += -amp[0]; jamp[9] += -amp[0]; jamp[11] += +amp[0]; FFV1_2(w[16], w[0], cxtype(cIPC[2], cIPC[3]), cIPD[0], cIPD[1], w[22]); // Amplitude(s) for diagram number 87 FFV1_0(w[22], w[11], w[5], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[16] += -amp[0]; FFV1_1(w[11], w[0], cxtype(cIPC[2], cIPC[3]), cIPD[0], cIPD[1], w[20]); // Amplitude(s) for diagram number 88 FFV1_0(w[16], w[20], w[5], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[13] += -amp[0]; // Amplitude(s) for diagram number 89 FFV1_0(w[22], w[14], w[4], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[22] += -amp[0]; FFV1_1(w[14], w[0], cxtype(cIPC[2], cIPC[3]), cIPD[0], cIPD[1], w[24]); // Amplitude(s) for diagram number 90 FFV1_0(w[16], w[24], w[4], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[19] += -amp[0]; // Amplitude(s) for diagram number 91 FFV1_0(w[22], w[2], w[10], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[16] += +cxtype(0, 1) * amp[0]; jamp[22] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 92 FFV1_0(w[16], w[2], w[23], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[3] += +amp[0]; jamp[5] += -amp[0]; jamp[16] += -amp[0]; jamp[22] += +amp[0]; // Amplitude(s) for diagram number 93 VVVV1_0(w[0], w[6], w[7], w[5], cxtype(cIPC[4], cIPC[5]), &amp[0]); jamp[0] += +cxtype(0, 1) * amp[0]; jamp[2] += -cxtype(0, 1) * amp[0]; jamp[8] += -cxtype(0, 1) * amp[0]; jamp[14] += +cxtype(0, 1) * amp[0]; jamp[18] += -cxtype(0, 1) * amp[0]; jamp[19] += +cxtype(0, 1) * amp[0]; jamp[21] += +cxtype(0, 1) * amp[0]; jamp[23] += -cxtype(0, 1) * amp[0]; VVVV3_0(w[0], w[6], w[7], w[5], cxtype(cIPC[4], cIPC[5]), &amp[0]); jamp[0] += +cxtype(0, 1) * amp[0]; jamp[2] += -cxtype(0, 1) * amp[0]; jamp[4] += -cxtype(0, 1) * amp[0]; jamp[5] += +cxtype(0, 1) * amp[0]; jamp[9] += -cxtype(0, 1) * amp[0]; jamp[15] += +cxtype(0, 1) * amp[0]; jamp[21] += +cxtype(0, 1) * amp[0]; jamp[23] += -cxtype(0, 1) * amp[0]; VVVV4_0(w[0], w[6], w[7], w[5], cxtype(cIPC[4], cIPC[5]), &amp[0]); jamp[4] += -cxtype(0, 1) * amp[0]; jamp[5] += +cxtype(0, 1) * amp[0]; jamp[8] += +cxtype(0, 1) * amp[0]; jamp[9] += -cxtype(0, 1) * amp[0]; jamp[14] += -cxtype(0, 1) * amp[0]; jamp[15] += +cxtype(0, 1) * amp[0]; jamp[18] += +cxtype(0, 1) * amp[0]; jamp[19] += -cxtype(0, 1) * amp[0]; VVV1P0_1(w[0], w[6], cxtype(cIPC[0], cIPC[1]), 0., 0., w[22]); // Amplitude(s) for diagram number 94 VVV1_0(w[7], w[5], w[22], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[0] += +cxtype(0, 1) * amp[0]; jamp[2] += -cxtype(0, 1) * amp[0]; jamp[8] += -cxtype(0, 1) * amp[0]; jamp[14] += +cxtype(0, 1) * amp[0]; jamp[18] += -cxtype(0, 1) * amp[0]; jamp[19] += +cxtype(0, 1) * amp[0]; jamp[21] += +cxtype(0, 1) * amp[0]; jamp[23] += -cxtype(0, 1) * amp[0]; VVV1P0_1(w[0], w[7], cxtype(cIPC[0], cIPC[1]), 0., 0., w[25]); // Amplitude(s) for diagram number 95 VVV1_0(w[6], w[5], w[25], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[0] += +cxtype(0, 1) * amp[0]; jamp[2] += -cxtype(0, 1) * amp[0]; jamp[4] += -cxtype(0, 1) * amp[0]; jamp[5] += +cxtype(0, 1) * amp[0]; jamp[9] += -cxtype(0, 1) * amp[0]; jamp[15] += +cxtype(0, 1) * amp[0]; jamp[21] += +cxtype(0, 1) * amp[0]; jamp[23] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 96 FFV1_0(w[3], w[14], w[22], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[18] += +amp[0]; jamp[19] += -amp[0]; jamp[21] += -amp[0]; jamp[23] += +amp[0]; // Amplitude(s) for diagram number 97 FFV1_0(w[3], w[24], w[6], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[18] += +cxtype(0, 1) * amp[0]; jamp[19] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 98 FFV1_0(w[13], w[2], w[22], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[0] += +amp[0]; jamp[2] += -amp[0]; jamp[8] += -amp[0]; jamp[14] += +amp[0]; // Amplitude(s) for diagram number 99 FFV1_0(w[21], w[2], w[6], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[8] += +cxtype(0, 1) * amp[0]; jamp[14] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 100 VVVV1_0(w[0], w[18], w[7], w[4], cxtype(cIPC[4], cIPC[5]), &amp[0]); jamp[1] += +cxtype(0, 1) * amp[0]; jamp[4] += -cxtype(0, 1) * amp[0]; jamp[10] += -cxtype(0, 1) * amp[0]; jamp[12] += -cxtype(0, 1) * amp[0]; jamp[13] += +cxtype(0, 1) * amp[0]; jamp[15] += +cxtype(0, 1) * amp[0]; jamp[17] += -cxtype(0, 1) * amp[0]; jamp[20] += +cxtype(0, 1) * amp[0]; VVVV3_0(w[0], w[18], w[7], w[4], cxtype(cIPC[4], cIPC[5]), &amp[0]); jamp[1] += +cxtype(0, 1) * amp[0]; jamp[2] += -cxtype(0, 1) * amp[0]; jamp[3] += +cxtype(0, 1) * amp[0]; jamp[4] += -cxtype(0, 1) * amp[0]; jamp[11] += -cxtype(0, 1) * amp[0]; jamp[15] += +cxtype(0, 1) * amp[0]; jamp[17] += -cxtype(0, 1) * amp[0]; jamp[21] += +cxtype(0, 1) * amp[0]; VVVV4_0(w[0], w[18], w[7], w[4], cxtype(cIPC[4], cIPC[5]), &amp[0]); jamp[2] += -cxtype(0, 1) * amp[0]; jamp[3] += +cxtype(0, 1) * amp[0]; jamp[10] += +cxtype(0, 1) * amp[0]; jamp[11] += -cxtype(0, 1) * amp[0]; jamp[12] += +cxtype(0, 1) * amp[0]; jamp[13] += -cxtype(0, 1) * amp[0]; jamp[20] += -cxtype(0, 1) * amp[0]; jamp[21] += +cxtype(0, 1) * amp[0]; VVV1P0_1(w[0], w[18], cxtype(cIPC[0], cIPC[1]), 0., 0., w[6]); // Amplitude(s) for diagram number 101 VVV1_0(w[7], w[4], w[6], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[1] += +cxtype(0, 1) * amp[0]; jamp[4] += -cxtype(0, 1) * amp[0]; jamp[10] += -cxtype(0, 1) * amp[0]; jamp[12] += -cxtype(0, 1) * amp[0]; jamp[13] += +cxtype(0, 1) * amp[0]; jamp[15] += +cxtype(0, 1) * amp[0]; jamp[17] += -cxtype(0, 1) * amp[0]; jamp[20] += +cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 102 VVV1_0(w[18], w[4], w[25], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[1] += +cxtype(0, 1) * amp[0]; jamp[2] += -cxtype(0, 1) * amp[0]; jamp[3] += +cxtype(0, 1) * amp[0]; jamp[4] += -cxtype(0, 1) * amp[0]; jamp[11] += -cxtype(0, 1) * amp[0]; jamp[15] += +cxtype(0, 1) * amp[0]; jamp[17] += -cxtype(0, 1) * amp[0]; jamp[21] += +cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 103 FFV1_0(w[3], w[11], w[6], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[12] += +amp[0]; jamp[13] += -amp[0]; jamp[15] += -amp[0]; jamp[17] += +amp[0]; // Amplitude(s) for diagram number 104 FFV1_0(w[3], w[20], w[18], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[12] += +cxtype(0, 1) * amp[0]; jamp[13] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 105 FFV1_0(w[15], w[2], w[6], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[1] += +amp[0]; jamp[4] += -amp[0]; jamp[10] += -amp[0]; jamp[20] += +amp[0]; // Amplitude(s) for diagram number 106 FFV1_0(w[12], w[2], w[18], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[10] += +cxtype(0, 1) * amp[0]; jamp[20] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 107 VVVV1_0(w[0], w[1], w[7], w[10], cxtype(cIPC[4], cIPC[5]), &amp[0]); jamp[0] += +cxtype(0, 1) * amp[0]; jamp[1] += -cxtype(0, 1) * amp[0]; jamp[6] += -cxtype(0, 1) * amp[0]; jamp[7] += +cxtype(0, 1) * amp[0]; jamp[16] += -cxtype(0, 1) * amp[0]; jamp[17] += +cxtype(0, 1) * amp[0]; jamp[22] += +cxtype(0, 1) * amp[0]; jamp[23] += -cxtype(0, 1) * amp[0]; VVVV3_0(w[0], w[1], w[7], w[10], cxtype(cIPC[4], cIPC[5]), &amp[0]); jamp[0] += +cxtype(0, 1) * amp[0]; jamp[1] += -cxtype(0, 1) * amp[0]; jamp[3] += -cxtype(0, 1) * amp[0]; jamp[5] += +cxtype(0, 1) * amp[0]; jamp[9] += -cxtype(0, 1) * amp[0]; jamp[11] += +cxtype(0, 1) * amp[0]; jamp[17] += +cxtype(0, 1) * amp[0]; jamp[23] += -cxtype(0, 1) * amp[0]; VVVV4_0(w[0], w[1], w[7], w[10], cxtype(cIPC[4], cIPC[5]), &amp[0]); jamp[3] += -cxtype(0, 1) * amp[0]; jamp[5] += +cxtype(0, 1) * amp[0]; jamp[6] += +cxtype(0, 1) * amp[0]; jamp[7] += -cxtype(0, 1) * amp[0]; jamp[9] += -cxtype(0, 1) * amp[0]; jamp[11] += +cxtype(0, 1) * amp[0]; jamp[16] += +cxtype(0, 1) * amp[0]; jamp[22] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 108 VVV1_0(w[1], w[10], w[25], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[0] += +cxtype(0, 1) * amp[0]; jamp[1] += -cxtype(0, 1) * amp[0]; jamp[3] += -cxtype(0, 1) * amp[0]; jamp[5] += +cxtype(0, 1) * amp[0]; jamp[9] += -cxtype(0, 1) * amp[0]; jamp[11] += +cxtype(0, 1) * amp[0]; jamp[17] += +cxtype(0, 1) * amp[0]; jamp[23] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 109 VVV1_0(w[1], w[7], w[23], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[3] += -cxtype(0, 1) * amp[0]; jamp[5] += +cxtype(0, 1) * amp[0]; jamp[6] += +cxtype(0, 1) * amp[0]; jamp[7] += -cxtype(0, 1) * amp[0]; jamp[9] += -cxtype(0, 1) * amp[0]; jamp[11] += +cxtype(0, 1) * amp[0]; jamp[16] += +cxtype(0, 1) * amp[0]; jamp[22] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 110 FFV1_0(w[13], w[20], w[1], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[12] += -amp[0]; // Amplitude(s) for diagram number 111 FFV1_0(w[21], w[11], w[1], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[14] += -amp[0]; // Amplitude(s) for diagram number 112 FFV1_0(w[15], w[24], w[1], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[18] += -amp[0]; // Amplitude(s) for diagram number 113 FFV1_0(w[12], w[14], w[1], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[20] += -amp[0]; VVVV1P0_1(w[0], w[1], w[4], cxtype(cIPC[4], cIPC[5]), 0., 0., w[12]); VVVV3P0_1(w[0], w[1], w[4], cxtype(cIPC[4], cIPC[5]), 0., 0., w[24]); VVVV4P0_1(w[0], w[1], w[4], cxtype(cIPC[4], cIPC[5]), 0., 0., w[21]); // Amplitude(s) for diagram number 114 VVV1_0(w[12], w[7], w[5], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[0] += +cxtype(0, 1) * amp[0]; jamp[2] += -cxtype(0, 1) * amp[0]; jamp[8] += -cxtype(0, 1) * amp[0]; jamp[14] += +cxtype(0, 1) * amp[0]; jamp[18] += -cxtype(0, 1) * amp[0]; jamp[19] += +cxtype(0, 1) * amp[0]; jamp[21] += +cxtype(0, 1) * amp[0]; jamp[23] += -cxtype(0, 1) * amp[0]; VVV1_0(w[24], w[7], w[5], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[2] += -cxtype(0, 1) * amp[0]; jamp[6] += +cxtype(0, 1) * amp[0]; jamp[8] += -cxtype(0, 1) * amp[0]; jamp[12] += +cxtype(0, 1) * amp[0]; jamp[19] += +cxtype(0, 1) * amp[0]; jamp[20] += -cxtype(0, 1) * amp[0]; jamp[21] += +cxtype(0, 1) * amp[0]; jamp[22] += -cxtype(0, 1) * amp[0]; VVV1_0(w[21], w[7], w[5], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[0] += -cxtype(0, 1) * amp[0]; jamp[6] += +cxtype(0, 1) * amp[0]; jamp[12] += +cxtype(0, 1) * amp[0]; jamp[14] += -cxtype(0, 1) * amp[0]; jamp[18] += +cxtype(0, 1) * amp[0]; jamp[20] += -cxtype(0, 1) * amp[0]; jamp[22] += -cxtype(0, 1) * amp[0]; jamp[23] += +cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 115 FFV1_0(w[3], w[14], w[12], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[18] += +amp[0]; jamp[19] += -amp[0]; jamp[21] += -amp[0]; jamp[23] += +amp[0]; FFV1_0(w[3], w[14], w[24], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[19] += -amp[0]; jamp[20] += +amp[0]; jamp[21] += -amp[0]; jamp[22] += +amp[0]; FFV1_0(w[3], w[14], w[21], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[18] += -amp[0]; jamp[20] += +amp[0]; jamp[22] += +amp[0]; jamp[23] += -amp[0]; // Amplitude(s) for diagram number 116 FFV1_0(w[13], w[2], w[12], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[0] += +amp[0]; jamp[2] += -amp[0]; jamp[8] += -amp[0]; jamp[14] += +amp[0]; FFV1_0(w[13], w[2], w[24], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[2] += -amp[0]; jamp[6] += +amp[0]; jamp[8] += -amp[0]; jamp[12] += +amp[0]; FFV1_0(w[13], w[2], w[21], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[0] += -amp[0]; jamp[6] += +amp[0]; jamp[12] += +amp[0]; jamp[14] += -amp[0]; VVVV1P0_1(w[0], w[1], w[5], cxtype(cIPC[4], cIPC[5]), 0., 0., w[21]); VVVV3P0_1(w[0], w[1], w[5], cxtype(cIPC[4], cIPC[5]), 0., 0., w[13]); VVVV4P0_1(w[0], w[1], w[5], cxtype(cIPC[4], cIPC[5]), 0., 0., w[24]); // Amplitude(s) for diagram number 117 VVV1_0(w[21], w[7], w[4], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[1] += +cxtype(0, 1) * amp[0]; jamp[4] += -cxtype(0, 1) * amp[0]; jamp[10] += -cxtype(0, 1) * amp[0]; jamp[12] += -cxtype(0, 1) * amp[0]; jamp[13] += +cxtype(0, 1) * amp[0]; jamp[15] += +cxtype(0, 1) * amp[0]; jamp[17] += -cxtype(0, 1) * amp[0]; jamp[20] += +cxtype(0, 1) * amp[0]; VVV1_0(w[13], w[7], w[4], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[4] += -cxtype(0, 1) * amp[0]; jamp[7] += +cxtype(0, 1) * amp[0]; jamp[10] += -cxtype(0, 1) * amp[0]; jamp[13] += +cxtype(0, 1) * amp[0]; jamp[14] += -cxtype(0, 1) * amp[0]; jamp[15] += +cxtype(0, 1) * amp[0]; jamp[16] += -cxtype(0, 1) * amp[0]; jamp[18] += +cxtype(0, 1) * amp[0]; VVV1_0(w[24], w[7], w[4], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[1] += -cxtype(0, 1) * amp[0]; jamp[7] += +cxtype(0, 1) * amp[0]; jamp[12] += +cxtype(0, 1) * amp[0]; jamp[14] += -cxtype(0, 1) * amp[0]; jamp[16] += -cxtype(0, 1) * amp[0]; jamp[17] += +cxtype(0, 1) * amp[0]; jamp[18] += +cxtype(0, 1) * amp[0]; jamp[20] += -cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 118 FFV1_0(w[3], w[11], w[21], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[12] += +amp[0]; jamp[13] += -amp[0]; jamp[15] += -amp[0]; jamp[17] += +amp[0]; FFV1_0(w[3], w[11], w[13], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[13] += -amp[0]; jamp[14] += +amp[0]; jamp[15] += -amp[0]; jamp[16] += +amp[0]; FFV1_0(w[3], w[11], w[24], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[12] += -amp[0]; jamp[14] += +amp[0]; jamp[16] += +amp[0]; jamp[17] += -amp[0]; // Amplitude(s) for diagram number 119 FFV1_0(w[15], w[2], w[21], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[1] += +amp[0]; jamp[4] += -amp[0]; jamp[10] += -amp[0]; jamp[20] += +amp[0]; FFV1_0(w[15], w[2], w[13], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[4] += -amp[0]; jamp[7] += +amp[0]; jamp[10] += -amp[0]; jamp[18] += +amp[0]; FFV1_0(w[15], w[2], w[24], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[1] += -amp[0]; jamp[7] += +amp[0]; jamp[18] += +amp[0]; jamp[20] += -amp[0]; VVVV1P0_1(w[0], w[4], w[5], cxtype(cIPC[4], cIPC[5]), 0., 0., w[24]); VVVV3P0_1(w[0], w[4], w[5], cxtype(cIPC[4], cIPC[5]), 0., 0., w[15]); VVVV4P0_1(w[0], w[4], w[5], cxtype(cIPC[4], cIPC[5]), 0., 0., w[13]); // Amplitude(s) for diagram number 120 FFV1_0(w[3], w[9], w[24], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[6] += +amp[0]; jamp[7] += -amp[0]; jamp[9] += -amp[0]; jamp[11] += +amp[0]; FFV1_0(w[3], w[9], w[15], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[7] += -amp[0]; jamp[8] += +amp[0]; jamp[9] += -amp[0]; jamp[10] += +amp[0]; FFV1_0(w[3], w[9], w[13], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[6] += -amp[0]; jamp[8] += +amp[0]; jamp[10] += +amp[0]; jamp[11] += -amp[0]; // Amplitude(s) for diagram number 121 FFV1_0(w[16], w[2], w[24], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[3] += +amp[0]; jamp[5] += -amp[0]; jamp[16] += -amp[0]; jamp[22] += +amp[0]; FFV1_0(w[16], w[2], w[15], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[5] += -amp[0]; jamp[13] += +amp[0]; jamp[16] += -amp[0]; jamp[19] += +amp[0]; FFV1_0(w[16], w[2], w[13], cxtype(cIPC[2], cIPC[3]), &amp[0]); jamp[3] += -amp[0]; jamp[13] += +amp[0]; jamp[19] += +amp[0]; jamp[22] += -amp[0]; // Amplitude(s) for diagram number 122 VVV1_0(w[24], w[1], w[7], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[3] += -cxtype(0, 1) * amp[0]; jamp[5] += +cxtype(0, 1) * amp[0]; jamp[6] += +cxtype(0, 1) * amp[0]; jamp[7] += -cxtype(0, 1) * amp[0]; jamp[9] += -cxtype(0, 1) * amp[0]; jamp[11] += +cxtype(0, 1) * amp[0]; jamp[16] += +cxtype(0, 1) * amp[0]; jamp[22] += -cxtype(0, 1) * amp[0]; VVV1_0(w[15], w[1], w[7], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[5] += +cxtype(0, 1) * amp[0]; jamp[7] += -cxtype(0, 1) * amp[0]; jamp[8] += +cxtype(0, 1) * amp[0]; jamp[9] += -cxtype(0, 1) * amp[0]; jamp[10] += +cxtype(0, 1) * amp[0]; jamp[13] += -cxtype(0, 1) * amp[0]; jamp[16] += +cxtype(0, 1) * amp[0]; jamp[19] += -cxtype(0, 1) * amp[0]; VVV1_0(w[13], w[1], w[7], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[3] += +cxtype(0, 1) * amp[0]; jamp[6] += -cxtype(0, 1) * amp[0]; jamp[8] += +cxtype(0, 1) * amp[0]; jamp[10] += +cxtype(0, 1) * amp[0]; jamp[11] += -cxtype(0, 1) * amp[0]; jamp[13] += -cxtype(0, 1) * amp[0]; jamp[19] += -cxtype(0, 1) * amp[0]; jamp[22] += +cxtype(0, 1) * amp[0]; // Amplitude(s) for diagram number 123 VVV1_0(w[0], w[17], w[7], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[0] += -cxtype(0, 1) * amp[0]; jamp[1] += +cxtype(0, 1) * amp[0]; jamp[3] += +cxtype(0, 1) * amp[0]; jamp[5] += -cxtype(0, 1) * amp[0]; jamp[9] += +cxtype(0, 1) * amp[0]; jamp[11] += -cxtype(0, 1) * amp[0]; jamp[17] += -cxtype(0, 1) * amp[0]; jamp[23] += +cxtype(0, 1) * amp[0]; VVV1_0(w[0], w[19], w[7], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[1] += +cxtype(0, 1) * amp[0]; jamp[2] += -cxtype(0, 1) * amp[0]; jamp[3] += +cxtype(0, 1) * amp[0]; jamp[4] += -cxtype(0, 1) * amp[0]; jamp[11] += -cxtype(0, 1) * amp[0]; jamp[15] += +cxtype(0, 1) * amp[0]; jamp[17] += -cxtype(0, 1) * amp[0]; jamp[21] += +cxtype(0, 1) * amp[0]; VVV1_0(w[0], w[8], w[7], cxtype(cIPC[0], cIPC[1]), &amp[0]); jamp[0] += +cxtype(0, 1) * amp[0]; jamp[2] += -cxtype(0, 1) * amp[0]; jamp[4] += -cxtype(0, 1) * amp[0]; jamp[5] += +cxtype(0, 1) * amp[0]; jamp[9] += -cxtype(0, 1) * amp[0]; jamp[15] += +cxtype(0, 1) * amp[0]; jamp[21] += +cxtype(0, 1) * amp[0]; jamp[23] += -cxtype(0, 1) * amp[0]; // double CPPProcess::matrix_1_gg_ttxgg() { // Local variables // The color matrix; static const fptype denom[ncolor] = {54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54}; static const fptype cf[ncolor][ncolor] = {{512, -64, -64, 8, 8, 80, -64, 8, 8, -1, -1, -10, 8, -1, 80, -10, 71, 62, -1, -10, -10, 62, 62, -28}, {-64, 512, 8, 80, -64, 8, 8, -64, -1, -10, 8, -1, -1, -10, -10, 62, 62, -28, 8, -1, 80, -10, 71, 62}, {-64, 8, 512, -64, 80, 8, 8, -1, 80, -10, 71, 62, -64, 8, 8, -1, -1, -10, -10, -1, 62, -28, -10, 62}, {8, 80, -64, 512, 8, -64, -1, -10, -10, 62, 62, -28, 8, -64, -1, -10, 8, -1, -1, 8, 71, 62, 80, -10}, {8, -64, 80, 8, 512, -64, -1, 8, 71, 62, 80, -10, -10, -1, 62, -28, -10, 62, -64, 8, 8, -1, -1, -10}, {80, 8, 8, -64, -64, 512, -10, -1, 62, -28, -10, 62, -1, 8, 71, 62, 80, -10, 8, -64, -1, -10, 8, -1}, {-64, 8, 8, -1, -1, -10, 512, -64, -64, 8, 8, 80, 80, -10, 8, -1, 62, 71, -10, 62, -1, -10, -28, 62}, {8, -64, -1, -10, 8, -1, -64, 512, 8, 80, -64, 8, -10, 62, -1, -10, -28, 62, 80, -10, 8, -1, 62, 71}, {8, -1, 80, -10, 71, 62, -64, 8, 512, -64, 80, 8, 8, -1, -64, 8, -10, -1, 62, -28, -10, -1, 62, -10}, {-1, -10, -10, 62, 62, -28, 8, 80, -64, 512, 8, -64, -1, -10, 8, -64, -1, 8, 71, 62, -1, 8, -10, 80}, {-1, 8, 71, 62, 80, -10, 8, -64, 80, 8, 512, -64, 62, -28, -10, -1, 62, -10, 8, -1, -64, 8, -10, -1}, {-10, -1, 62, -28, -10, 62, 80, 8, 8, -64, -64, 512, 71, 62, -1, 8, -10, 80, -1, -10, 8, -64, -1, 8}, {8, -1, -64, 8, -10, -1, 80, -10, 8, -1, 62, 71, 512, -64, -64, 8, 8, 80, 62, -10, -28, 62, -1, -10}, {-1, -10, 8, -64, -1, 8, -10, 62, -1, -10, -28, 62, -64, 512, 8, 80, -64, 8, -10, 80, 62, 71, 8, -1}, {80, -10, 8, -1, 62, 71, 8, -1, -64, 8, -10, -1, -64, 8, 512, -64, 80, 8, -28, 62, 62, -10, -10, -1}, {-10, 62, -1, -10, -28, 62, -1, -10, 8, -64, -1, 8, 8, 80, -64, 512, 8, -64, 62, 71, -10, 80, -1, 8}, {71, 62, -1, 8, -10, 80, 62, -28, -10, -1, 62, -10, 8, -64, 80, 8, 512, -64, -1, 8, -10, -1, -64, 8}, {62, -28, -10, -1, 62, -10, 71, 62, -1, 8, -10, 80, 80, 8, 8, -64, -64, 512, -10, -1, -1, 8, 8, -64}, {-1, 8, -10, -1, -64, 8, -10, 80, 62, 71, 8, -1, 62, -10, -28, 62, -1, -10, 512, -64, -64, 8, 8, 80}, {-10, -1, -1, 8, 8, -64, 62, -10, -28, 62, -1, -10, -10, 80, 62, 71, 8, -1, -64, 512, 8, 80, -64, 8}, {-10, 80, 62, 71, 8, -1, -1, 8, -10, -1, -64, 8, -28, 62, 62, -10, -10, -1, -64, 8, 512, -64, 80, 8}, {62, -10, -28, 62, -1, -10, -10, -1, -1, 8, 8, -64, 62, 71, -10, 80, -1, 8, 8, 80, -64, 512, 8, -64}, {62, 71, -10, 80, -1, 8, -28, 62, 62, -10, -10, -1, -1, 8, -10, -1, -64, 8, 8, -64, 80, 8, 512, -64}, {-28, 62, 62, -10, -10, -1, 62, 71, -10, 80, -1, 8, -10, -1, -1, 8, 8, -64, 80, 8, 8, -64, -64, 512}}; // Sum and square the color flows to get the matrix element for(int icol = 0; icol < ncolor; icol++ ) { cxtype ztemp = cxmake(0, 0); for(int jcol = 0; jcol < ncolor; jcol++ ) ztemp = ztemp + cf[icol][jcol] * jamp[jcol]; meHelSum = meHelSum + cxreal(ztemp * conj(jamp[icol]))/denom[icol]; } // Store the leading color flows for choice of color // for(i=0;i < ncolor; i++) // jamp2[0][i] += real(jamp[i]*conj(jamp[i])); mgDebug(1, __FUNCTION__); return; } CPPProcess::CPPProcess(int numiterations, int gpublocks, int gputhreads, bool verbose, bool debug) : m_numiterations(numiterations), gpu_nblocks(gpublocks), gpu_nthreads(gputhreads), m_verbose(verbose), dim(gpu_nblocks * gpu_nthreads) { // Helicities for the process - nodim static const int tHel[ncomb][nexternal] = {{-1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, 1}, {-1, -1, -1, -1, 1, -1}, {-1, -1, -1, -1, 1, 1}, {-1, -1, -1, 1, -1, -1}, {-1, -1, -1, 1, -1, 1}, {-1, -1, -1, 1, 1, -1}, {-1, -1, -1, 1, 1, 1}, {-1, -1, 1, -1, -1, -1}, {-1, -1, 1, -1, -1, 1}, {-1, -1, 1, -1, 1, -1}, {-1, -1, 1, -1, 1, 1}, {-1, -1, 1, 1, -1, -1}, {-1, -1, 1, 1, -1, 1}, {-1, -1, 1, 1, 1, -1}, {-1, -1, 1, 1, 1, 1}, {-1, 1, -1, -1, -1, -1}, {-1, 1, -1, -1, -1, 1}, {-1, 1, -1, -1, 1, -1}, {-1, 1, -1, -1, 1, 1}, {-1, 1, -1, 1, -1, -1}, {-1, 1, -1, 1, -1, 1}, {-1, 1, -1, 1, 1, -1}, {-1, 1, -1, 1, 1, 1}, {-1, 1, 1, -1, -1, -1}, {-1, 1, 1, -1, -1, 1}, {-1, 1, 1, -1, 1, -1}, {-1, 1, 1, -1, 1, 1}, {-1, 1, 1, 1, -1, -1}, {-1, 1, 1, 1, -1, 1}, {-1, 1, 1, 1, 1, -1}, {-1, 1, 1, 1, 1, 1}, {1, -1, -1, -1, -1, -1}, {1, -1, -1, -1, -1, 1}, {1, -1, -1, -1, 1, -1}, {1, -1, -1, -1, 1, 1}, {1, -1, -1, 1, -1, -1}, {1, -1, -1, 1, -1, 1}, {1, -1, -1, 1, 1, -1}, {1, -1, -1, 1, 1, 1}, {1, -1, 1, -1, -1, -1}, {1, -1, 1, -1, -1, 1}, {1, -1, 1, -1, 1, -1}, {1, -1, 1, -1, 1, 1}, {1, -1, 1, 1, -1, -1}, {1, -1, 1, 1, -1, 1}, {1, -1, 1, 1, 1, -1}, {1, -1, 1, 1, 1, 1}, {1, 1, -1, -1, -1, -1}, {1, 1, -1, -1, -1, 1}, {1, 1, -1, -1, 1, -1}, {1, 1, -1, -1, 1, 1}, {1, 1, -1, 1, -1, -1}, {1, 1, -1, 1, -1, 1}, {1, 1, -1, 1, 1, -1}, {1, 1, -1, 1, 1, 1}, {1, 1, 1, -1, -1, -1}, {1, 1, 1, -1, -1, 1}, {1, 1, 1, -1, 1, -1}, {1, 1, 1, -1, 1, 1}, {1, 1, 1, 1, -1, -1}, {1, 1, 1, 1, -1, 1}, {1, 1, 1, 1, 1, -1}, {1, 1, 1, 1, 1, 1}}; #ifdef __CUDACC__ checkCuda(cudaMemcpyToSymbol(cHel, tHel, ncomb * nexternal * sizeof(int))); #else memcpy(cHel, tHel, ncomb * nexternal * sizeof(int)); #endif // SANITY CHECK: GPU memory usage may be based on casts of fptype[2] to cxtype assert(sizeof(cxtype) == 2 * sizeof(fptype)); } CPPProcess::~CPPProcess() {} const std::vector<fptype> &CPPProcess::getMasses() const {return mME;} //-------------------------------------------------------------------------- // Initialize process. void CPPProcess::initProc(string param_card_name) { // Instantiate the model class and set parameters that stay fixed during run pars = Parameters_sm::getInstance(); SLHAReader slha(param_card_name, m_verbose); pars->setIndependentParameters(slha); pars->setIndependentCouplings(); if (m_verbose) { pars->printIndependentParameters(); pars->printIndependentCouplings(); } pars->setDependentParameters(); pars->setDependentCouplings(); // Set external particle masses for this matrix element mME.push_back(pars->ZERO); mME.push_back(pars->ZERO); mME.push_back(pars->mdl_MT); mME.push_back(pars->mdl_MT); mME.push_back(pars->ZERO); mME.push_back(pars->ZERO); static cxtype tIPC[3] = {cxmake(pars->GC_10), cxmake(pars->GC_11), cxmake(pars->GC_12)}; static fptype tIPD[2] = {(fptype)pars->mdl_MT, (fptype)pars->mdl_WT}; #ifdef __CUDACC__ checkCuda(cudaMemcpyToSymbol(cIPC, tIPC, 3 * sizeof(cxtype))); checkCuda(cudaMemcpyToSymbol(cIPD, tIPD, 2 * sizeof(fptype))); #else memcpy(cIPC, tIPC, 3 * sizeof(cxtype)); memcpy(cIPD, tIPD, 2 * sizeof(fptype)); #endif } //-------------------------------------------------------------------------- #ifdef __CUDACC__ __global__ void sigmaKin_getGoodHel(const fptype * allmomenta, // input: momenta as AOSOA[npagM][npar][4][neppM] with nevt=npagM*neppM bool * isGoodHel) // output: isGoodHel[ncomb] - device array { const int nprocesses = 1; // FIXME: assume process.nprocesses == 1 fptype meHelSum[nprocesses] = {0}; // all zeros fptype meHelSumLast = 0; for (int ihel = 0; ihel < ncomb; ihel++ ) { // NB: calculate_wavefunctions ADDS |M|^2 for a given ihel to the running // sum of |M|^2 over helicities for the given event calculate_wavefunctions(ihel, allmomenta, meHelSum[0]); if (meHelSum[0] != meHelSumLast) { isGoodHel[ihel] = true; meHelSumLast = meHelSum[0]; } } } #endif //-------------------------------------------------------------------------- #ifdef __CUDACC__ void sigmaKin_setGoodHel(const bool * isGoodHel) // input: isGoodHel[ncomb] - host array { int nGoodHel[1] = {0}; int goodHel[ncomb] = {0}; for (int ihel = 0; ihel < ncomb; ihel++ ) { // std::cout << "sigmaKin_setGoodHel ihel=" << ihel << ( isGoodHel[ihel] ? // " true" : " false" ) << std::endl; if (isGoodHel[ihel]) { goodHel[nGoodHel[0]] = ihel; nGoodHel[0]++; } } checkCuda(cudaMemcpyToSymbol(cNGoodHel, nGoodHel, sizeof(int))); checkCuda(cudaMemcpyToSymbol(cGoodHel, goodHel, ncomb * sizeof(int))); } #endif //-------------------------------------------------------------------------- // Evaluate |M|^2, part independent of incoming flavour. __global__ void sigmaKin(const fptype * allmomenta, fptype * allMEs #ifndef __CUDACC__ , const int nevt // input: #events (for cuda: nevt == ndim == gpublocks*gputhreads) #endif ) { // Set the parameters which change event by event // Need to discuss this with Stefan // pars->setDependentParameters(); // pars->setDependentCouplings(); #ifndef __CUDACC__ const int maxtry = 10; static unsigned long long sigmakin_itry = 0; // first iteration over nevt events static bool sigmakin_goodhel[ncomb] = {false}; #endif // Reset color flows // start sigmakin_lines mgDebugInitialise(); // Set the parameters which change event by event // Need to discuss this with Stefan // pars->setDependentParameters(); // pars->setDependentCouplings(); // Reset color flows #ifndef __CUDACC__ //** START LOOP ON IEVT ** for (int ievt = 0; ievt < nevt; ++ ievt) #endif { #ifdef __CUDACC__ const int idim = blockDim.x * blockIdx.x + threadIdx.x; // event# == threadid (previously was: tid) const int ievt = idim; // printf( "sigmakin: ievt %d\n", ievt ); #endif // Denominators: spins, colors and identical particles const int nprocesses = 1; // FIXME: assume process.nprocesses == 1 const int denominators[1] = {512}; // Reset the "matrix elements" - running sums of |M|^2 over helicities for // the given event fptype meHelSum[nprocesses] = {0}; // all zeros #ifdef __CUDACC__ // CUDA - using precomputed good helicities for (int ighel = 0; ighel < cNGoodHel[0]; ighel++ ) { const int ihel = cGoodHel[ighel]; calculate_wavefunctions(ihel, allmomenta, meHelSum[0]); } #else // C++ - compute good helicities within this loop fptype meHelSumLast = 0; // check for good helicities for (int ihel = 0; ihel < ncomb; ihel++ ) { if (sigmakin_itry > maxtry && !sigmakin_goodhel[ihel]) continue; // NB: calculate_wavefunctions ADDS |M|^2 for a given ihel to the running // sum of |M|^2 over helicities for the given event calculate_wavefunctions(ihel, allmomenta, meHelSum[0], ievt); if (sigmakin_itry <= maxtry) { if ( !sigmakin_goodhel[ihel] && meHelSum[0] > meHelSumLast) sigmakin_goodhel[ihel] = true; meHelSumLast = meHelSum[0]; } } #endif // Get the final |M|^2 as an average over helicities/colors of the running // sum of |M|^2 over helicities for the given event // [NB 'sum over final spins, average over initial spins', eg see // https://www.uzh.ch/cmsssl/physik/dam/jcr:2e24b7b1-f4d7-4160-817e-47b13dbf // 1d7c/Handout_4_2016-UZH.pdf] for (int iproc = 0; iproc < nprocesses; ++ iproc) { meHelSum[iproc] /= denominators[iproc]; } // Set the final average |M|^2 for this event in the output array for all // events for (int iproc = 0; iproc < nprocesses; ++ iproc) { allMEs[iproc * nprocesses + ievt] = meHelSum[iproc]; } #ifndef __CUDACC__ if (sigmakin_itry <= maxtry) sigmakin_itry++; // if ( sigmakin_itry == maxtry ) // for (int ihel = 0; ihel < ncomb; ihel++ ) // printf( "sigmakin: ihelgood %2d %d\n", ihel, sigmakin_goodhel[ihel] ); #endif //** END LOOP ON IEVT ** mgDebugFinalise(); } //-------------------------------------------------------------------------- } } //========================================================================== // Private class member functions //--------------------------------------------------------------------------
76cf57c99a50e4aa2ca981ddafa5fa6c884977db.hip
// !!! This is a file automatically generated by hipify!!! #include "cuda_structs.h" //This module comtains functions required for finite field arithmetic //---------------------------------------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------------------------------------- DEVICE_FUNC uint256_g FIELD_ADD_INV(const uint256_g& elem) { if (!is_zero(elem)) return SUB(BASE_FIELD_P, elem); else return elem; } DEVICE_FUNC uint256_g FIELD_ADD(const uint256_g& a, const uint256_g& b ) { uint256_g w = ADD(a, b); if (CMP(w, BASE_FIELD_P) >= 0) return SUB(w, BASE_FIELD_P); return w; } DEVICE_FUNC uint256_g FIELD_SUB(const uint256_g& a, const uint256_g& b) { if (CMP(a, b) >= 0) return SUB(a, b); else { uint256_g t = ADD(a, BASE_FIELD_P); return SUB(t, b); } } //We are using https://www.researchgate.net/publication/3387259_Improved_Montgomery_modular_inverse_algorithm (algorithm 5) //the description of The Almost Montgomery Inverse (so-called phase 1) is taken from //http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.75.8377&rep=rep1&type=pdf struct stage_one_data { uint256_g almost_mont_inverse; uint32_t k; }; #include <stdio.h> __device__ void print2_uint256(const uint256_g& val) { printf("%x %x %x %x %x %x %x %x\n", val.n[7], val.n[6], val.n[5], val.n[4], val.n[3], val.n[2], val.n[1], val.n[0]); } static DEVICE_FUNC inline stage_one_data stage_one_mul_inv(const uint256_g& elem) { uint256_g U = BASE_FIELD_P; uint256_g V = elem; uint256_g R = uint256_g{0, 0, 0, 0, 0, 0, 0, 0}; uint256_g S = uint256_g{1, 0, 0, 0, 0, 0, 0, 0}; uint32_t k = 0; while (!is_zero(V)) { if (is_even(U)) { U = SHIFT_RIGHT(U, 1); S = SHIFT_LEFT(S, 1); } else if (is_even(V)) { V = SHIFT_RIGHT(V, 1); R = SHIFT_LEFT(R, 1); } else if (CMP(U, V) > 0) { U = SHIFT_RIGHT(SUB(U, V), 1); R = ADD(R, S); S = SHIFT_LEFT(S, 1); } else { V = SHIFT_RIGHT(SUB(V, U), 1); S = ADD(R, S); R = SHIFT_LEFT(R, 1); } k++; } if (CMP(R, BASE_FIELD_P) >= 0) R = SUB(R, BASE_FIELD_P); R = SUB(BASE_FIELD_P, R); return stage_one_data{R, k}; } DEVICE_FUNC uint256_g FIELD_MUL_INV(const uint256_g& elem) { auto data = stage_one_mul_inv(elem); if (data.k == R_LOG) { return MONT_MUL(data.almost_mont_inverse, BASE_FIELD_R2); } else { uint32_t n = 2 * R_LOG - data.k; auto res = uint256_g{0, 0, 0, 0, 0, 0, 0, 0}; if (n < R_LOG) { set_bit(res, n); res = MONT_MUL(data.almost_mont_inverse, res); } else if (n == R_LOG + 1) { res = MONT_MUL(data.almost_mont_inverse, BASE_FIELD_R2); } else { //here n == R_LOG_2 + 2 res = MONT_MUL(data.almost_mont_inverse, BASE_FIELD_R4); } return MONT_MUL(res, BASE_FIELD_R_SQUARED); } } //batch inversion - simulaneously (in place) invert all-non zero elements in the array. //NB: we assume that all elements in the array are non-zero DEVICE_FUNC void BATCH_FIELD_MUL_INV(uint256_g* vec, size_t vec_size) { } //this is a field embedded into a group of points on elliptic curve DEVICE_FUNC embedded_field::embedded_field(const uint256_g rep): rep_(rep) {} DEVICE_FUNC embedded_field::embedded_field() {} DEVICE_FUNC bool embedded_field::operator==(const embedded_field& other) const { return EQUAL(rep_, other.rep_); } DEVICE_FUNC embedded_field embedded_field::zero() { uint256_g x; #pragma unroll for(uint32_t i = 0; i < N; i++) x.n[i] = 0; return embedded_field(x); } DEVICE_FUNC embedded_field embedded_field::one() { return embedded_field(EMBEDDED_FIELD_R); } DEVICE_FUNC bool embedded_field::operator!=(const embedded_field& other) const { return !EQUAL(rep_, other.rep_); } DEVICE_FUNC embedded_field::operator uint256_g() const { return rep_; } DEVICE_FUNC embedded_field embedded_field::operator-() const { if (!is_zero(rep_)) return embedded_field(SUB(EMBEDDED_FIELD_P, rep_)); else return *this; } //NB: for now we assume that highest possible limb bit is zero for the field modulus DEVICE_FUNC embedded_field& embedded_field::operator+=(const embedded_field& other) { rep_ = ADD(rep_, other.rep_); if (CMP(rep_, EMBEDDED_FIELD_P) >= 0) rep_ = SUB(rep_, EMBEDDED_FIELD_P); return *this; } DEVICE_FUNC embedded_field& embedded_field::operator-=(const embedded_field& other) { if (CMP(rep_, other.rep_) >= 0) rep_ = SUB(rep_, other.rep_); else { uint256_g t = ADD(rep_, EMBEDDED_FIELD_P); rep_ = SUB(t, other.rep_); } return *this; } //here we mean montgomery multiplication DEVICE_FUNC embedded_field& embedded_field::operator*=(const embedded_field& other) { uint256_g T; uint256_g u = rep_; uint256_g v = other.rep_; #pragma unroll for (uint32_t j = 0; j < N; j++) T.n[j] = 0; uint32_t prefix_low = 0, prefix_high = 0, m; uint32_t high_word, low_word; #pragma unroll for (uint32_t i = 0; i < N; i++) { uint32_t carry = 0; #pragma unroll for (uint32_t j = 0; j < N; j++) { low_word = device_long_mul(u.n[j], v.n[i], &high_word); low_word = device_fused_add(low_word, T.n[j], &high_word); low_word = device_fused_add(low_word, carry, &high_word); carry = high_word; T.n[j] = low_word; } //TODO: may be we actually require less space? (only one additional limb instead of two) prefix_high = 0; prefix_low = device_fused_add(prefix_low, carry, &prefix_high); m = T.n[0] * EMBEDDED_FIELD_N; low_word = device_long_mul(EMBEDDED_FIELD_P.n[0], m, &high_word); low_word = device_fused_add(low_word, T.n[0], &high_word); carry = high_word; #pragma unroll for (uint32_t j = 1; j < N; j++) { low_word = device_long_mul(EMBEDDED_FIELD_P.n[j], m, &high_word); low_word = device_fused_add(low_word, T.n[j], &high_word); low_word = device_fused_add(low_word, carry, &high_word); T.n[j-1] = low_word; carry = high_word; } T.n[N-1] = device_fused_add(prefix_low, carry, &prefix_high); prefix_low = prefix_high; } if (CMP(T, EMBEDDED_FIELD_P) >= 0) { //TODO: may be better change to inary version of sub? T = SUB(T, EMBEDDED_FIELD_P); } rep_ = T; return *this; } DEVICE_FUNC embedded_field operator+(const embedded_field& left, const embedded_field& right) { embedded_field result(left); result += right; return result; } DEVICE_FUNC embedded_field operator-(const embedded_field& left, const embedded_field& right) { embedded_field result(left); result -= right; return result; } DEVICE_FUNC embedded_field operator*(const embedded_field& left, const embedded_field& right) { embedded_field result(left); result *= right; return result; } DEVICE_FUNC void gen_random_elem(embedded_field& x, hiprandState_t& state) { for (int i = 0; i < N; i++) { x.rep_.n[i] = hiprand(&state); } x.rep_.n[N - 1] >>= 3; }
76cf57c99a50e4aa2ca981ddafa5fa6c884977db.cu
#include "cuda_structs.h" //This module comtains functions required for finite field arithmetic //---------------------------------------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------------------------------------- DEVICE_FUNC uint256_g FIELD_ADD_INV(const uint256_g& elem) { if (!is_zero(elem)) return SUB(BASE_FIELD_P, elem); else return elem; } DEVICE_FUNC uint256_g FIELD_ADD(const uint256_g& a, const uint256_g& b ) { uint256_g w = ADD(a, b); if (CMP(w, BASE_FIELD_P) >= 0) return SUB(w, BASE_FIELD_P); return w; } DEVICE_FUNC uint256_g FIELD_SUB(const uint256_g& a, const uint256_g& b) { if (CMP(a, b) >= 0) return SUB(a, b); else { uint256_g t = ADD(a, BASE_FIELD_P); return SUB(t, b); } } //We are using https://www.researchgate.net/publication/3387259_Improved_Montgomery_modular_inverse_algorithm (algorithm 5) //the description of The Almost Montgomery Inverse (so-called phase 1) is taken from //http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.75.8377&rep=rep1&type=pdf struct stage_one_data { uint256_g almost_mont_inverse; uint32_t k; }; #include <stdio.h> __device__ void print2_uint256(const uint256_g& val) { printf("%x %x %x %x %x %x %x %x\n", val.n[7], val.n[6], val.n[5], val.n[4], val.n[3], val.n[2], val.n[1], val.n[0]); } static DEVICE_FUNC inline stage_one_data stage_one_mul_inv(const uint256_g& elem) { uint256_g U = BASE_FIELD_P; uint256_g V = elem; uint256_g R = uint256_g{0, 0, 0, 0, 0, 0, 0, 0}; uint256_g S = uint256_g{1, 0, 0, 0, 0, 0, 0, 0}; uint32_t k = 0; while (!is_zero(V)) { if (is_even(U)) { U = SHIFT_RIGHT(U, 1); S = SHIFT_LEFT(S, 1); } else if (is_even(V)) { V = SHIFT_RIGHT(V, 1); R = SHIFT_LEFT(R, 1); } else if (CMP(U, V) > 0) { U = SHIFT_RIGHT(SUB(U, V), 1); R = ADD(R, S); S = SHIFT_LEFT(S, 1); } else { V = SHIFT_RIGHT(SUB(V, U), 1); S = ADD(R, S); R = SHIFT_LEFT(R, 1); } k++; } if (CMP(R, BASE_FIELD_P) >= 0) R = SUB(R, BASE_FIELD_P); R = SUB(BASE_FIELD_P, R); return stage_one_data{R, k}; } DEVICE_FUNC uint256_g FIELD_MUL_INV(const uint256_g& elem) { auto data = stage_one_mul_inv(elem); if (data.k == R_LOG) { return MONT_MUL(data.almost_mont_inverse, BASE_FIELD_R2); } else { uint32_t n = 2 * R_LOG - data.k; auto res = uint256_g{0, 0, 0, 0, 0, 0, 0, 0}; if (n < R_LOG) { set_bit(res, n); res = MONT_MUL(data.almost_mont_inverse, res); } else if (n == R_LOG + 1) { res = MONT_MUL(data.almost_mont_inverse, BASE_FIELD_R2); } else { //here n == R_LOG_2 + 2 res = MONT_MUL(data.almost_mont_inverse, BASE_FIELD_R4); } return MONT_MUL(res, BASE_FIELD_R_SQUARED); } } //batch inversion - simulaneously (in place) invert all-non zero elements in the array. //NB: we assume that all elements in the array are non-zero DEVICE_FUNC void BATCH_FIELD_MUL_INV(uint256_g* vec, size_t vec_size) { } //this is a field embedded into a group of points on elliptic curve DEVICE_FUNC embedded_field::embedded_field(const uint256_g rep): rep_(rep) {} DEVICE_FUNC embedded_field::embedded_field() {} DEVICE_FUNC bool embedded_field::operator==(const embedded_field& other) const { return EQUAL(rep_, other.rep_); } DEVICE_FUNC embedded_field embedded_field::zero() { uint256_g x; #pragma unroll for(uint32_t i = 0; i < N; i++) x.n[i] = 0; return embedded_field(x); } DEVICE_FUNC embedded_field embedded_field::one() { return embedded_field(EMBEDDED_FIELD_R); } DEVICE_FUNC bool embedded_field::operator!=(const embedded_field& other) const { return !EQUAL(rep_, other.rep_); } DEVICE_FUNC embedded_field::operator uint256_g() const { return rep_; } DEVICE_FUNC embedded_field embedded_field::operator-() const { if (!is_zero(rep_)) return embedded_field(SUB(EMBEDDED_FIELD_P, rep_)); else return *this; } //NB: for now we assume that highest possible limb bit is zero for the field modulus DEVICE_FUNC embedded_field& embedded_field::operator+=(const embedded_field& other) { rep_ = ADD(rep_, other.rep_); if (CMP(rep_, EMBEDDED_FIELD_P) >= 0) rep_ = SUB(rep_, EMBEDDED_FIELD_P); return *this; } DEVICE_FUNC embedded_field& embedded_field::operator-=(const embedded_field& other) { if (CMP(rep_, other.rep_) >= 0) rep_ = SUB(rep_, other.rep_); else { uint256_g t = ADD(rep_, EMBEDDED_FIELD_P); rep_ = SUB(t, other.rep_); } return *this; } //here we mean montgomery multiplication DEVICE_FUNC embedded_field& embedded_field::operator*=(const embedded_field& other) { uint256_g T; uint256_g u = rep_; uint256_g v = other.rep_; #pragma unroll for (uint32_t j = 0; j < N; j++) T.n[j] = 0; uint32_t prefix_low = 0, prefix_high = 0, m; uint32_t high_word, low_word; #pragma unroll for (uint32_t i = 0; i < N; i++) { uint32_t carry = 0; #pragma unroll for (uint32_t j = 0; j < N; j++) { low_word = device_long_mul(u.n[j], v.n[i], &high_word); low_word = device_fused_add(low_word, T.n[j], &high_word); low_word = device_fused_add(low_word, carry, &high_word); carry = high_word; T.n[j] = low_word; } //TODO: may be we actually require less space? (only one additional limb instead of two) prefix_high = 0; prefix_low = device_fused_add(prefix_low, carry, &prefix_high); m = T.n[0] * EMBEDDED_FIELD_N; low_word = device_long_mul(EMBEDDED_FIELD_P.n[0], m, &high_word); low_word = device_fused_add(low_word, T.n[0], &high_word); carry = high_word; #pragma unroll for (uint32_t j = 1; j < N; j++) { low_word = device_long_mul(EMBEDDED_FIELD_P.n[j], m, &high_word); low_word = device_fused_add(low_word, T.n[j], &high_word); low_word = device_fused_add(low_word, carry, &high_word); T.n[j-1] = low_word; carry = high_word; } T.n[N-1] = device_fused_add(prefix_low, carry, &prefix_high); prefix_low = prefix_high; } if (CMP(T, EMBEDDED_FIELD_P) >= 0) { //TODO: may be better change to inary version of sub? T = SUB(T, EMBEDDED_FIELD_P); } rep_ = T; return *this; } DEVICE_FUNC embedded_field operator+(const embedded_field& left, const embedded_field& right) { embedded_field result(left); result += right; return result; } DEVICE_FUNC embedded_field operator-(const embedded_field& left, const embedded_field& right) { embedded_field result(left); result -= right; return result; } DEVICE_FUNC embedded_field operator*(const embedded_field& left, const embedded_field& right) { embedded_field result(left); result *= right; return result; } DEVICE_FUNC void gen_random_elem(embedded_field& x, curandState& state) { for (int i = 0; i < N; i++) { x.rep_.n[i] = curand(&state); } x.rep_.n[N - 1] >>= 3; }
51cee015142091f3b017a810a186f9266c2add31.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2017 Madhavan Seshadri // 2018 Patrick Diehl // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) extern "C" { __global__ void kernel(char *out, int *width, int *height, int *yStart, int* n){ unsigned int xDim = blockIdx.x * blockDim.x + threadIdx.x; unsigned int yDim = blockIdx.y * blockDim.y + threadIdx.y; //index of the output array, multiplied by 3 for R,G,B values int arrayIndex = 3 * (*width) * yDim + xDim*3; float xPoint = ((float) (xDim)/(*width)) * 3.25f - 2.0f; float yPoint = ((float) (yDim+*yStart)/(*height)) * 2.5f - 1.25f; //for calculation of complex number float x = 0.0; float y = 0.0; int iterationCount = 0; int numIterations = 256; //terminating condition x^2+y^2 < 4 or iterations >= numIterations while(y*y+x*x<=4 && iterationCount<(numIterations)){ float xTemp = x*x-y*y + xPoint; y = 2*x*y + yPoint; x = xTemp; iterationCount++; } if (arrayIndex < *n) { if(iterationCount == (numIterations)){ out[arrayIndex] = iterationCount; out[arrayIndex+1]=1; out[arrayIndex+2]=iterationCount; }else{ out[arrayIndex] = 0; out[arrayIndex+1]=iterationCount; out[arrayIndex+2]=0; } } } };
51cee015142091f3b017a810a186f9266c2add31.cu
// Copyright (c) 2017 Madhavan Seshadri // 2018 Patrick Diehl // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) extern "C" { __global__ void kernel(char *out, int *width, int *height, int *yStart, int* n){ unsigned int xDim = blockIdx.x * blockDim.x + threadIdx.x; unsigned int yDim = blockIdx.y * blockDim.y + threadIdx.y; //index of the output array, multiplied by 3 for R,G,B values int arrayIndex = 3 * (*width) * yDim + xDim*3; float xPoint = ((float) (xDim)/(*width)) * 3.25f - 2.0f; float yPoint = ((float) (yDim+*yStart)/(*height)) * 2.5f - 1.25f; //for calculation of complex number float x = 0.0; float y = 0.0; int iterationCount = 0; int numIterations = 256; //terminating condition x^2+y^2 < 4 or iterations >= numIterations while(y*y+x*x<=4 && iterationCount<(numIterations)){ float xTemp = x*x-y*y + xPoint; y = 2*x*y + yPoint; x = xTemp; iterationCount++; } if (arrayIndex < *n) { if(iterationCount == (numIterations)){ out[arrayIndex] = iterationCount; out[arrayIndex+1]=1; out[arrayIndex+2]=iterationCount; }else{ out[arrayIndex] = 0; out[arrayIndex+1]=iterationCount; out[arrayIndex+2]=0; } } } };
4c3e43bbc9fc8480979f8027d29a2ccd7a9aadd9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*codigo CUDA utilizando memoria compartilhada*/ #include "ApplySmooth_CUDA.h" __global__ void smooth_cuda(unsigned short* cuda_image, unsigned short* new_cuda_image, int rows, int cols){ __shared__ short localcopy[12][8]; int coordX = (blockIdx.x*blockDim.x)+threadIdx.x; int coordY = (blockIdx.y*blockDim.y)+threadIdx.y; int baseX = blockIdx.x*blockDim.x-2; int baseY = blockIdx.y*blockDim.y-2; int localX = threadIdx.x+2; int localY = threadIdx.y+2; /*copia da memoria global para a compartilhada*/ if (threadIdx.x < 6){ for(int i=threadIdx.x*2; i<=threadIdx.x*2+1; i++){ for(int j=threadIdx.y*2; j<=threadIdx.y*2+1; j++){ int x = baseX+i; int y = baseY+j; if (x >= 0 && y >= 0 && x < rows*cols && y < rows*cols){ localcopy[i][j] = cuda_image[coord(x,y)]; }else{ localcopy[i][j] = -1; } } } } __syncthreads(); unsigned int sum = 0; int count = 0; for(int i=localX-2; i<=localX+2; i++){ for(int j=localY-2; j<=localY+2; j++){ if (localcopy[i][j] != -1){ sum += localcopy[i][j]; count++; } } } if (count > 0) new_cuda_image[coord(coordX, coordY)] = sum/count; //printf("%d\t%d\n", cuda_image[coord(coordX, coordY)], new_cuda_image[coord(coordX, coordY)]); } void smooth(unsigned short *image, int rows, int cols){ unsigned short* cuda_image; unsigned short* new_cuda_image; hipMalloc(&cuda_image, rows*cols*sizeof(unsigned short)); hipMalloc(&new_cuda_image, rows*cols*sizeof(unsigned short)); hipMemcpy(cuda_image, image, rows*cols*sizeof(unsigned short), hipMemcpyHostToDevice); dim3 threadsPerBlock(8,4); dim3 numBlocks(rows/threadsPerBlock.x, cols/threadsPerBlock.y); //dim3 numBlocks(1,1); hipLaunchKernelGGL(( smooth_cuda), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, cuda_image, new_cuda_image, rows, cols); hipDeviceSynchronize(); hipError_t cuda_error = hipGetLastError(); if (cuda_error != hipSuccess) printf("Cuda Error: %s\n", hipGetErrorString(cuda_error)); hipMemcpy(image, new_cuda_image, rows*cols*sizeof(unsigned short), hipMemcpyDeviceToHost); hipFree(cuda_image); hipFree(new_cuda_image); return; }
4c3e43bbc9fc8480979f8027d29a2ccd7a9aadd9.cu
/*codigo CUDA utilizando memoria compartilhada*/ #include "ApplySmooth_CUDA.h" __global__ void smooth_cuda(unsigned short* cuda_image, unsigned short* new_cuda_image, int rows, int cols){ __shared__ short localcopy[12][8]; int coordX = (blockIdx.x*blockDim.x)+threadIdx.x; int coordY = (blockIdx.y*blockDim.y)+threadIdx.y; int baseX = blockIdx.x*blockDim.x-2; int baseY = blockIdx.y*blockDim.y-2; int localX = threadIdx.x+2; int localY = threadIdx.y+2; /*copia da memoria global para a compartilhada*/ if (threadIdx.x < 6){ for(int i=threadIdx.x*2; i<=threadIdx.x*2+1; i++){ for(int j=threadIdx.y*2; j<=threadIdx.y*2+1; j++){ int x = baseX+i; int y = baseY+j; if (x >= 0 && y >= 0 && x < rows*cols && y < rows*cols){ localcopy[i][j] = cuda_image[coord(x,y)]; }else{ localcopy[i][j] = -1; } } } } __syncthreads(); unsigned int sum = 0; int count = 0; for(int i=localX-2; i<=localX+2; i++){ for(int j=localY-2; j<=localY+2; j++){ if (localcopy[i][j] != -1){ sum += localcopy[i][j]; count++; } } } if (count > 0) new_cuda_image[coord(coordX, coordY)] = sum/count; //printf("%d\t%d\n", cuda_image[coord(coordX, coordY)], new_cuda_image[coord(coordX, coordY)]); } void smooth(unsigned short *image, int rows, int cols){ unsigned short* cuda_image; unsigned short* new_cuda_image; cudaMalloc(&cuda_image, rows*cols*sizeof(unsigned short)); cudaMalloc(&new_cuda_image, rows*cols*sizeof(unsigned short)); cudaMemcpy(cuda_image, image, rows*cols*sizeof(unsigned short), cudaMemcpyHostToDevice); dim3 threadsPerBlock(8,4); dim3 numBlocks(rows/threadsPerBlock.x, cols/threadsPerBlock.y); //dim3 numBlocks(1,1); smooth_cuda<<<numBlocks, threadsPerBlock>>>(cuda_image, new_cuda_image, rows, cols); cudaDeviceSynchronize(); cudaError_t cuda_error = cudaGetLastError(); if (cuda_error != cudaSuccess) printf("Cuda Error: %s\n", cudaGetErrorString(cuda_error)); cudaMemcpy(image, new_cuda_image, rows*cols*sizeof(unsigned short), cudaMemcpyDeviceToHost); cudaFree(cuda_image); cudaFree(new_cuda_image); return; }
514c40c2c4002a6121cf0dbc3ae7a8999c972205.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/TensorUtils.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h> #include <ATen/hip/HIPApplyUtils.cuh> #include "cuda_helpers.h" template <typename T> __global__ void PSROIPoolForward( const int nthreads, const T* input, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const T* rois, const int channels_out, T* output, int* channel_mapping) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c_out, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c_out = (index / pooled_width / pooled_height) % channels_out; int n = index / pooled_width / pooled_height / channels_out; // (n, c_in, ph, pw) is the associated element in the input int c_in = (c_out * pooled_height + ph) * pooled_width + pw; // [start, end) interval for spatial sampling const T* offset_rois = rois + n * 5; int roi_batch_ind = offset_rois[0]; int roi_start_w = roundf(offset_rois[1] * spatial_scale); int roi_start_h = roundf(offset_rois[2] * spatial_scale); int roi_end_w = roundf(offset_rois[3] * spatial_scale); int roi_end_h = roundf(offset_rois[4] * spatial_scale); // Force too small ROIs to be 1x1 int roi_width = max(roi_end_w - roi_start_w, 1); int roi_height = max(roi_end_h - roi_start_h, 1); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); int hstart = static_cast<int>(floor(static_cast<T>(ph) * bin_size_h)); int wstart = static_cast<int>(floor(static_cast<T>(pw) * bin_size_w)); int hend = static_cast<int>(ceil(static_cast<T>(ph + 1) * bin_size_h)); int wend = static_cast<int>(ceil(static_cast<T>(pw + 1) * bin_size_w)); // Add roi offsets and clip to input boundaries hstart = min(max(hstart + roi_start_h, 0), height - 1); hend = min(max(hend + roi_start_h, 0), height - 1); wstart = min(max(wstart + roi_start_w, 0), width - 1); wend = min(max(wend + roi_start_w, 0), width - 1); bool is_empty = (hend <= hstart) || (wend <= wstart); const T* offset_input = input + (roi_batch_ind * channels + c_in) * height * width; T out_sum = 0; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int input_index = h * width + w; out_sum += offset_input[input_index]; } } T bin_area = (hend - hstart) * (wend - wstart); output[index] = is_empty ? static_cast<T>(0) : out_sum / bin_area; channel_mapping[index] = c_in; } } template <typename T> __global__ void PSROIPoolBackward( const int nthreads, const T* grad_output, const int* channel_mapping, const int num_rois, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int channels_out, T* grad_input, const T* rois) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, *, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int n = index / pooled_width / pooled_height / channels_out; const T* offset_rois = rois + n * 5; int roi_batch_ind = offset_rois[0]; int roi_start_w = roundf(offset_rois[1] * spatial_scale); int roi_start_h = roundf(offset_rois[2] * spatial_scale); int roi_end_w = roundf(offset_rois[3] * spatial_scale); int roi_end_h = roundf(offset_rois[4] * spatial_scale); // Force too small ROIs to be 1x1 int roi_width = max(roi_end_w - roi_start_w, 1); int roi_height = max(roi_end_h - roi_start_h, 1); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); int hstart = static_cast<int>(floor(static_cast<T>(ph) * bin_size_h)); int wstart = static_cast<int>(floor(static_cast<T>(pw) * bin_size_w)); int hend = static_cast<int>(ceil(static_cast<T>(ph + 1) * bin_size_h)); int wend = static_cast<int>(ceil(static_cast<T>(pw + 1) * bin_size_w)); // Add roi offsets and clip to input boundaries hstart = min(max(hstart + roi_start_h, 0), height); hend = min(max(hend + roi_start_h, 0), height); wstart = min(max(wstart + roi_start_w, 0), width); wend = min(max(wend + roi_start_w, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); int c_in = channel_mapping[index]; T* grad_input_offset = grad_input + (roi_batch_ind * channels + c_in) * height * width; T bin_area = (hend - hstart) * (wend - wstart); T diff_val = is_empty ? static_cast<T>(0) : grad_output[index] / bin_area; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int grad_input_index = h * width + w; atomicAdd(grad_input_offset + grad_input_index, diff_val); } } } } std::tuple<at::Tensor, at::Tensor> PSROIPool_forward_cuda( const at::Tensor& input, const at::Tensor& rois, const float spatial_scale, const int pooled_height, const int pooled_width) { // Check if input tensors are CUDA tensors AT_ASSERTM(input.is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(rois.is_cuda(), "rois must be a CUDA tensor"); at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2}; at::CheckedFrom c = "PSROIPool_forward_cuda"; at::checkAllSameGPU(c, {input_t, rois_t}); at::checkAllSameType(c, {input_t, rois_t}); at::hip::HIPGuardMasqueradingAsCUDA device_guard(input.device()); auto num_rois = rois.size(0); auto channels = input.size(1); auto height = input.size(2); auto width = input.size(3); AT_ASSERTM( channels % (pooled_height * pooled_width) == 0, "input channels must be a multiple of pooling height * pooling width"); int channels_out = channels / (pooled_height * pooled_width); auto output = at::zeros( {num_rois, channels_out, pooled_height, pooled_width}, input.options()); auto channel_mapping = at::zeros(output.sizes(), input.options().dtype(at::kInt)); auto output_size = output.numel(); if (output_size == 0) { AT_CUDA_CHECK(hipGetLastError()); return std::make_tuple(output, channel_mapping); } hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 grid(::min( at::cuda::ATenCeilDiv( static_cast<int64_t>(output_size), static_cast<int64_t>(512)), static_cast<int64_t>(4096))); dim3 block(512); auto input_ = input.contiguous(), rois_ = rois.contiguous(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "PSROIPool_forward", [&] { hipLaunchKernelGGL(( PSROIPoolForward<scalar_t>), dim3(grid), dim3(block), 0, stream, output_size, input_.data_ptr<scalar_t>(), spatial_scale, channels, height, width, pooled_height, pooled_width, rois_.data_ptr<scalar_t>(), channels_out, output.data_ptr<scalar_t>(), channel_mapping.data_ptr<int>()); }); AT_CUDA_CHECK(hipGetLastError()); return std::make_tuple(output, channel_mapping); } at::Tensor PSROIPool_backward_cuda( const at::Tensor& grad, const at::Tensor& rois, const at::Tensor& channel_mapping, const float spatial_scale, const int pooled_height, const int pooled_width, const int batch_size, const int channels, const int height, const int width) { // Check if input tensors are CUDA tensors AT_ASSERTM(grad.is_cuda(), "grad must be a CUDA tensor"); AT_ASSERTM(rois.is_cuda(), "rois must be a CUDA tensor"); AT_ASSERTM( channel_mapping.is_cuda(), "channel_mapping must be a CUDA tensor"); at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2}, channel_mapping_t{channel_mapping, "channel_mapping", 3}; at::CheckedFrom c = "PSROIPool_backward_cuda"; at::checkAllSameGPU(c, {grad_t, rois_t, channel_mapping_t}); at::checkAllSameType(c, {grad_t, rois_t}); at::hip::HIPGuardMasqueradingAsCUDA device_guard(grad.device()); auto num_rois = rois.size(0); auto grad_input = at::zeros({batch_size, channels, height, width}, grad.options()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 grid(::min( at::cuda::ATenCeilDiv( static_cast<int64_t>(grad.numel()), static_cast<int64_t>(512)), static_cast<int64_t>(4096))); dim3 block(512); // handle possibly empty gradients if (grad.numel() == 0) { AT_CUDA_CHECK(hipGetLastError()); return grad_input; } int channels_out = channels / (pooled_height * pooled_width); auto grad_ = grad.contiguous(), rois_ = rois.contiguous(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad.scalar_type(), "PSROIPool_backward", [&] { hipLaunchKernelGGL(( PSROIPoolBackward<scalar_t>), dim3(grid), dim3(block), 0, stream, grad.numel(), grad_.data_ptr<scalar_t>(), channel_mapping.data_ptr<int>(), num_rois, spatial_scale, channels, height, width, pooled_height, pooled_width, channels_out, grad_input.data_ptr<scalar_t>(), rois_.data_ptr<scalar_t>()); }); AT_CUDA_CHECK(hipGetLastError()); return grad_input; }
514c40c2c4002a6121cf0dbc3ae7a8999c972205.cu
#include <ATen/ATen.h> #include <ATen/TensorUtils.h> #include <ATen/cuda/CUDAContext.h> #include <c10/cuda/CUDAGuard.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include "cuda_helpers.h" template <typename T> __global__ void PSROIPoolForward( const int nthreads, const T* input, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const T* rois, const int channels_out, T* output, int* channel_mapping) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c_out, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c_out = (index / pooled_width / pooled_height) % channels_out; int n = index / pooled_width / pooled_height / channels_out; // (n, c_in, ph, pw) is the associated element in the input int c_in = (c_out * pooled_height + ph) * pooled_width + pw; // [start, end) interval for spatial sampling const T* offset_rois = rois + n * 5; int roi_batch_ind = offset_rois[0]; int roi_start_w = roundf(offset_rois[1] * spatial_scale); int roi_start_h = roundf(offset_rois[2] * spatial_scale); int roi_end_w = roundf(offset_rois[3] * spatial_scale); int roi_end_h = roundf(offset_rois[4] * spatial_scale); // Force too small ROIs to be 1x1 int roi_width = max(roi_end_w - roi_start_w, 1); int roi_height = max(roi_end_h - roi_start_h, 1); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); int hstart = static_cast<int>(floor(static_cast<T>(ph) * bin_size_h)); int wstart = static_cast<int>(floor(static_cast<T>(pw) * bin_size_w)); int hend = static_cast<int>(ceil(static_cast<T>(ph + 1) * bin_size_h)); int wend = static_cast<int>(ceil(static_cast<T>(pw + 1) * bin_size_w)); // Add roi offsets and clip to input boundaries hstart = min(max(hstart + roi_start_h, 0), height - 1); hend = min(max(hend + roi_start_h, 0), height - 1); wstart = min(max(wstart + roi_start_w, 0), width - 1); wend = min(max(wend + roi_start_w, 0), width - 1); bool is_empty = (hend <= hstart) || (wend <= wstart); const T* offset_input = input + (roi_batch_ind * channels + c_in) * height * width; T out_sum = 0; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int input_index = h * width + w; out_sum += offset_input[input_index]; } } T bin_area = (hend - hstart) * (wend - wstart); output[index] = is_empty ? static_cast<T>(0) : out_sum / bin_area; channel_mapping[index] = c_in; } } template <typename T> __global__ void PSROIPoolBackward( const int nthreads, const T* grad_output, const int* channel_mapping, const int num_rois, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int channels_out, T* grad_input, const T* rois) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, *, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int n = index / pooled_width / pooled_height / channels_out; const T* offset_rois = rois + n * 5; int roi_batch_ind = offset_rois[0]; int roi_start_w = roundf(offset_rois[1] * spatial_scale); int roi_start_h = roundf(offset_rois[2] * spatial_scale); int roi_end_w = roundf(offset_rois[3] * spatial_scale); int roi_end_h = roundf(offset_rois[4] * spatial_scale); // Force too small ROIs to be 1x1 int roi_width = max(roi_end_w - roi_start_w, 1); int roi_height = max(roi_end_h - roi_start_h, 1); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); int hstart = static_cast<int>(floor(static_cast<T>(ph) * bin_size_h)); int wstart = static_cast<int>(floor(static_cast<T>(pw) * bin_size_w)); int hend = static_cast<int>(ceil(static_cast<T>(ph + 1) * bin_size_h)); int wend = static_cast<int>(ceil(static_cast<T>(pw + 1) * bin_size_w)); // Add roi offsets and clip to input boundaries hstart = min(max(hstart + roi_start_h, 0), height); hend = min(max(hend + roi_start_h, 0), height); wstart = min(max(wstart + roi_start_w, 0), width); wend = min(max(wend + roi_start_w, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); int c_in = channel_mapping[index]; T* grad_input_offset = grad_input + (roi_batch_ind * channels + c_in) * height * width; T bin_area = (hend - hstart) * (wend - wstart); T diff_val = is_empty ? static_cast<T>(0) : grad_output[index] / bin_area; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int grad_input_index = h * width + w; atomicAdd(grad_input_offset + grad_input_index, diff_val); } } } } std::tuple<at::Tensor, at::Tensor> PSROIPool_forward_cuda( const at::Tensor& input, const at::Tensor& rois, const float spatial_scale, const int pooled_height, const int pooled_width) { // Check if input tensors are CUDA tensors AT_ASSERTM(input.is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(rois.is_cuda(), "rois must be a CUDA tensor"); at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2}; at::CheckedFrom c = "PSROIPool_forward_cuda"; at::checkAllSameGPU(c, {input_t, rois_t}); at::checkAllSameType(c, {input_t, rois_t}); at::cuda::CUDAGuard device_guard(input.device()); auto num_rois = rois.size(0); auto channels = input.size(1); auto height = input.size(2); auto width = input.size(3); AT_ASSERTM( channels % (pooled_height * pooled_width) == 0, "input channels must be a multiple of pooling height * pooling width"); int channels_out = channels / (pooled_height * pooled_width); auto output = at::zeros( {num_rois, channels_out, pooled_height, pooled_width}, input.options()); auto channel_mapping = at::zeros(output.sizes(), input.options().dtype(at::kInt)); auto output_size = output.numel(); if (output_size == 0) { AT_CUDA_CHECK(cudaGetLastError()); return std::make_tuple(output, channel_mapping); } cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 grid(std::min( at::cuda::ATenCeilDiv( static_cast<int64_t>(output_size), static_cast<int64_t>(512)), static_cast<int64_t>(4096))); dim3 block(512); auto input_ = input.contiguous(), rois_ = rois.contiguous(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "PSROIPool_forward", [&] { PSROIPoolForward<scalar_t><<<grid, block, 0, stream>>>( output_size, input_.data_ptr<scalar_t>(), spatial_scale, channels, height, width, pooled_height, pooled_width, rois_.data_ptr<scalar_t>(), channels_out, output.data_ptr<scalar_t>(), channel_mapping.data_ptr<int>()); }); AT_CUDA_CHECK(cudaGetLastError()); return std::make_tuple(output, channel_mapping); } at::Tensor PSROIPool_backward_cuda( const at::Tensor& grad, const at::Tensor& rois, const at::Tensor& channel_mapping, const float spatial_scale, const int pooled_height, const int pooled_width, const int batch_size, const int channels, const int height, const int width) { // Check if input tensors are CUDA tensors AT_ASSERTM(grad.is_cuda(), "grad must be a CUDA tensor"); AT_ASSERTM(rois.is_cuda(), "rois must be a CUDA tensor"); AT_ASSERTM( channel_mapping.is_cuda(), "channel_mapping must be a CUDA tensor"); at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2}, channel_mapping_t{channel_mapping, "channel_mapping", 3}; at::CheckedFrom c = "PSROIPool_backward_cuda"; at::checkAllSameGPU(c, {grad_t, rois_t, channel_mapping_t}); at::checkAllSameType(c, {grad_t, rois_t}); at::cuda::CUDAGuard device_guard(grad.device()); auto num_rois = rois.size(0); auto grad_input = at::zeros({batch_size, channels, height, width}, grad.options()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 grid(std::min( at::cuda::ATenCeilDiv( static_cast<int64_t>(grad.numel()), static_cast<int64_t>(512)), static_cast<int64_t>(4096))); dim3 block(512); // handle possibly empty gradients if (grad.numel() == 0) { AT_CUDA_CHECK(cudaGetLastError()); return grad_input; } int channels_out = channels / (pooled_height * pooled_width); auto grad_ = grad.contiguous(), rois_ = rois.contiguous(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad.scalar_type(), "PSROIPool_backward", [&] { PSROIPoolBackward<scalar_t><<<grid, block, 0, stream>>>( grad.numel(), grad_.data_ptr<scalar_t>(), channel_mapping.data_ptr<int>(), num_rois, spatial_scale, channels, height, width, pooled_height, pooled_width, channels_out, grad_input.data_ptr<scalar_t>(), rois_.data_ptr<scalar_t>()); }); AT_CUDA_CHECK(cudaGetLastError()); return grad_input; }
9e7ef32885f6c3afbd49a8d8783b1ef8b686901b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef __cplusplus extern "C" { #endif #include <stdio.h> #include <math.h> #include <float.h> #include "fps_kernel.h" __global__ void farthestpointsamplingKernel(int b, int n, int m, const float * __restrict__ dataset,float * __restrict__ temp, long * __restrict__ idxs){ if (m<=0) return; const int BlockSize=512; __shared__ float dists[BlockSize]; __shared__ int dists_i[BlockSize]; const int BufferSize=3072; __shared__ float buf[BufferSize*3]; for (int i=blockIdx.x;i<b;i+=gridDim.x){ int old=0; if (threadIdx.x==0) idxs[i*m+0]=old; for (int j=threadIdx.x;j<n;j+=blockDim.x){ temp[blockIdx.x*n+j]=1e38; } for (int j=threadIdx.x;j<min(BufferSize,n)*3;j+=blockDim.x){ buf[j]=dataset[i*n*3+j]; } __syncthreads(); for (int j=1;j<m;j++){ int besti=0; float best=-1; float x1=dataset[i*n*3+old*3+0]; float y1=dataset[i*n*3+old*3+1]; float z1=dataset[i*n*3+old*3+2]; for (int k=threadIdx.x;k<n;k+=blockDim.x){ float td=temp[blockIdx.x*n+k]; float x2,y2,z2; if (k<BufferSize){ x2=buf[k*3+0]; y2=buf[k*3+1]; z2=buf[k*3+2]; }else{ x2=dataset[i*n*3+k*3+0]; y2=dataset[i*n*3+k*3+1]; z2=dataset[i*n*3+k*3+2]; } float d=(x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1); float d2=min(d,td); if (d2!=td) temp[blockIdx.x*n+k]=d2; if (d2>best){ best=d2; besti=k; } } dists[threadIdx.x]=best; dists_i[threadIdx.x]=besti; for (int u=0;(1<<u)<blockDim.x;u++){ __syncthreads(); if (threadIdx.x<(blockDim.x>>(u+1))){ int i1=(threadIdx.x*2)<<u; int i2=(threadIdx.x*2+1)<<u; if (dists[i1]<dists[i2]){ dists[i1]=dists[i2]; dists_i[i1]=dists_i[i2]; } } } __syncthreads(); old=dists_i[0]; if (threadIdx.x==0) idxs[i*m+j]=old; } } } //require 32*n working space int farthestpointsamplingLauncher(int b, int n, int m, const float * inp, float * temp, long * out, hipStream_t stream){ // m number of sample points hipError_t err; hipLaunchKernelGGL(( farthestpointsamplingKernel), dim3(32), dim3(512), 0, stream, b, n, m, inp, temp, out); err = hipGetLastError(); if(hipSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", hipGetErrorString(err)); exit(-1); } return 1; } #ifdef __cplusplus } #endif
9e7ef32885f6c3afbd49a8d8783b1ef8b686901b.cu
#ifdef __cplusplus extern "C" { #endif #include <stdio.h> #include <math.h> #include <float.h> #include "fps_kernel.h" __global__ void farthestpointsamplingKernel(int b, int n, int m, const float * __restrict__ dataset,float * __restrict__ temp, long * __restrict__ idxs){ if (m<=0) return; const int BlockSize=512; __shared__ float dists[BlockSize]; __shared__ int dists_i[BlockSize]; const int BufferSize=3072; __shared__ float buf[BufferSize*3]; for (int i=blockIdx.x;i<b;i+=gridDim.x){ int old=0; if (threadIdx.x==0) idxs[i*m+0]=old; for (int j=threadIdx.x;j<n;j+=blockDim.x){ temp[blockIdx.x*n+j]=1e38; } for (int j=threadIdx.x;j<min(BufferSize,n)*3;j+=blockDim.x){ buf[j]=dataset[i*n*3+j]; } __syncthreads(); for (int j=1;j<m;j++){ int besti=0; float best=-1; float x1=dataset[i*n*3+old*3+0]; float y1=dataset[i*n*3+old*3+1]; float z1=dataset[i*n*3+old*3+2]; for (int k=threadIdx.x;k<n;k+=blockDim.x){ float td=temp[blockIdx.x*n+k]; float x2,y2,z2; if (k<BufferSize){ x2=buf[k*3+0]; y2=buf[k*3+1]; z2=buf[k*3+2]; }else{ x2=dataset[i*n*3+k*3+0]; y2=dataset[i*n*3+k*3+1]; z2=dataset[i*n*3+k*3+2]; } float d=(x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1); float d2=min(d,td); if (d2!=td) temp[blockIdx.x*n+k]=d2; if (d2>best){ best=d2; besti=k; } } dists[threadIdx.x]=best; dists_i[threadIdx.x]=besti; for (int u=0;(1<<u)<blockDim.x;u++){ __syncthreads(); if (threadIdx.x<(blockDim.x>>(u+1))){ int i1=(threadIdx.x*2)<<u; int i2=(threadIdx.x*2+1)<<u; if (dists[i1]<dists[i2]){ dists[i1]=dists[i2]; dists_i[i1]=dists_i[i2]; } } } __syncthreads(); old=dists_i[0]; if (threadIdx.x==0) idxs[i*m+j]=old; } } } //require 32*n working space int farthestpointsamplingLauncher(int b, int n, int m, const float * inp, float * temp, long * out, cudaStream_t stream){ // m number of sample points cudaError_t err; farthestpointsamplingKernel<<<32, 512, 0, stream>>>(b, n, m, inp, temp, out); err = cudaGetLastError(); if(cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString(err)); exit(-1); } return 1; } #ifdef __cplusplus } #endif
4669c8c0fd8c23c538107d2492483d1dfa6597bc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "saber/saber_funcs_param.h" #include "saber/funcs/impl/cuda/saber_reverse_sequence.h" #include "saber/funcs/saber_util.h" namespace anakin { namespace saber { template<DataType OpDtype> SaberStatus SaberReverseSequence<NV, OpDtype>::init(const std::vector<OpTensor*>& inputs, std::vector<OpTensor*>& outputs, EmptyParam<NV>& param, Context<NV>& ctx) { this->_ctx = &ctx; return create(inputs, outputs, param, ctx); }; template<DataType OpDtype> SaberStatus SaberReverseSequence<NV, OpDtype>::create(const std::vector<OpTensor*>& inputs, std::vector<OpTensor*>& outputs, EmptyParam<NV>& param, Context<NV>& ctx) { if (this->_ctx = &ctx) { this->_ctx = &ctx; } int input_size = inputs.size(); CHECK_EQ(input_size, 1) << "only support one input now"; return SaberSuccess; }; template <typename Dtype> __global__ static void ker_reverse_sequence(const Dtype* in, Dtype* out, int length, int word_size, int* offset) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < length) { int word_id = tid / word_size; int word_inner_id = tid % word_size; out[offset[word_id]*word_size + word_inner_id] = in[tid]; } } template<DataType OpDtype> SaberStatus SaberReverseSequence<NV, OpDtype>::dispatch(const std::vector<OpTensor*>& inputs, std::vector<OpTensor*>& outputs, EmptyParam<NV>& param) { int input_size = inputs.size(); CHECK_EQ(input_size, 1) << "only support one input now"; hipStream_t stream = this->_ctx->get_compute_stream(); std::vector<std::vector<int>> offset_vec = inputs[0]->get_seq_offset(); std::vector<int> offset = offset_vec[offset_vec.size() - 1]; int batch_size = offset.size() - 1; int word_size = inputs[0]->valid_shape()[1]; int word_sum = offset[batch_size]; utils::try_expand_tensor(_offset_map, word_sum); utils::try_expand_tensor(_offset_map_cu, word_sum); int* offset_map_ptr = static_cast<int*>(_offset_map.mutable_data()); int* offset_map_cu_ptr = static_cast<int*>(_offset_map_cu.mutable_data()); for (int i = 0; i < batch_size; i++) { int seq_len = offset[i + 1] - offset[i]; int start_word_id = offset[i]; for (int j = 0; j < seq_len; j++) { offset_map_ptr[start_word_id + seq_len - 1 - j] = start_word_id + j; } } CUDA_CHECK(hipMemcpyAsync(offset_map_cu_ptr, offset_map_ptr, sizeof(int)*word_sum, hipMemcpyHostToDevice, stream)); int tid_sum = word_sum * word_size; int block_dim = 256; if (tid_sum < block_dim) { block_dim = tid_sum; } int grid_dim = utils::div_up(tid_sum, block_dim); const OpDataType* in = static_cast<const OpDataType*>(inputs[0]->data()); OpDataType* out = static_cast<OpDataType*>(outputs[0]->mutable_data()); hipLaunchKernelGGL(( ker_reverse_sequence) , dim3(grid_dim), dim3(block_dim), 0, stream, in, out, tid_sum, word_size, offset_map_cu_ptr); return SaberSuccess; }; template class SaberReverseSequence<NV, AK_INT32>; template class SaberReverseSequence<NV, AK_FLOAT>; template class SaberReverseSequence<NV, AK_HALF>; template class SaberReverseSequence<NV, AK_INT8>; } }
4669c8c0fd8c23c538107d2492483d1dfa6597bc.cu
#include "saber/saber_funcs_param.h" #include "saber/funcs/impl/cuda/saber_reverse_sequence.h" #include "saber/funcs/saber_util.h" namespace anakin { namespace saber { template<DataType OpDtype> SaberStatus SaberReverseSequence<NV, OpDtype>::init(const std::vector<OpTensor*>& inputs, std::vector<OpTensor*>& outputs, EmptyParam<NV>& param, Context<NV>& ctx) { this->_ctx = &ctx; return create(inputs, outputs, param, ctx); }; template<DataType OpDtype> SaberStatus SaberReverseSequence<NV, OpDtype>::create(const std::vector<OpTensor*>& inputs, std::vector<OpTensor*>& outputs, EmptyParam<NV>& param, Context<NV>& ctx) { if (this->_ctx = &ctx) { this->_ctx = &ctx; } int input_size = inputs.size(); CHECK_EQ(input_size, 1) << "only support one input now"; return SaberSuccess; }; template <typename Dtype> __global__ static void ker_reverse_sequence(const Dtype* in, Dtype* out, int length, int word_size, int* offset) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < length) { int word_id = tid / word_size; int word_inner_id = tid % word_size; out[offset[word_id]*word_size + word_inner_id] = in[tid]; } } template<DataType OpDtype> SaberStatus SaberReverseSequence<NV, OpDtype>::dispatch(const std::vector<OpTensor*>& inputs, std::vector<OpTensor*>& outputs, EmptyParam<NV>& param) { int input_size = inputs.size(); CHECK_EQ(input_size, 1) << "only support one input now"; cudaStream_t stream = this->_ctx->get_compute_stream(); std::vector<std::vector<int>> offset_vec = inputs[0]->get_seq_offset(); std::vector<int> offset = offset_vec[offset_vec.size() - 1]; int batch_size = offset.size() - 1; int word_size = inputs[0]->valid_shape()[1]; int word_sum = offset[batch_size]; utils::try_expand_tensor(_offset_map, word_sum); utils::try_expand_tensor(_offset_map_cu, word_sum); int* offset_map_ptr = static_cast<int*>(_offset_map.mutable_data()); int* offset_map_cu_ptr = static_cast<int*>(_offset_map_cu.mutable_data()); for (int i = 0; i < batch_size; i++) { int seq_len = offset[i + 1] - offset[i]; int start_word_id = offset[i]; for (int j = 0; j < seq_len; j++) { offset_map_ptr[start_word_id + seq_len - 1 - j] = start_word_id + j; } } CUDA_CHECK(cudaMemcpyAsync(offset_map_cu_ptr, offset_map_ptr, sizeof(int)*word_sum, cudaMemcpyHostToDevice, stream)); int tid_sum = word_sum * word_size; int block_dim = 256; if (tid_sum < block_dim) { block_dim = tid_sum; } int grid_dim = utils::div_up(tid_sum, block_dim); const OpDataType* in = static_cast<const OpDataType*>(inputs[0]->data()); OpDataType* out = static_cast<OpDataType*>(outputs[0]->mutable_data()); ker_reverse_sequence <<< grid_dim, block_dim, 0, stream>>>(in, out, tid_sum, word_size, offset_map_cu_ptr); return SaberSuccess; }; template class SaberReverseSequence<NV, AK_INT32>; template class SaberReverseSequence<NV, AK_FLOAT>; template class SaberReverseSequence<NV, AK_HALF>; template class SaberReverseSequence<NV, AK_INT8>; } }
a5e15d63a44af1aa86d0f04abef9241596314565.hip
// !!! This is a file automatically generated by hipify!!! #include "stdlib.h" #include <fstream> #include <iostream> #include <cstddef> #include <hip/hip_runtime_api.h> #include <hip/hip_runtime.h> #include <hip/hip_fp16.h> #define MAX_LOOP 1000 #define MAX_DIFF 0.15f #define NUM_JOINTS 3 #define PI 3.14159265358979f #define NUM_JOINTS_P1 (NUM_JOINTS + 1) using namespace std; #ifdef SLOW_MATH #include "../include/cuda_math.cuh" #else #include "../include/fast_math.cuh" #endif #define SCATTER using namespace std; __global__ void invkin_kernel(float *xTarget_in, float *yTarget_in, float *angles, int size, float err_thresh, int speed) { #ifdef SCATTER if(blockIdx.x %100 < speed) { #else if(blockIdx.x < speed) { #endif if(threadIdx.x<blockDim.x/2){ int blockId = blockIdx.x + blockIdx.y * gridDim.x; int idx = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + 2*threadIdx.x; if(idx < size) { half2 angle_out[NUM_JOINTS]; half zero = 0.0f; half one = 1.f; half minus_one = -1.f; for(int i = 0; i < NUM_JOINTS; i++) { angle_out[i] = __float2half2_rn(0.0); //angle_out[i].x = 0.f; //angle_out[i].y = 0.f; } half max_err = err_thresh * (float)(NUM_JOINTS); half err = max_err + one; // initialize error to something greater than error threshold // Initialize x and y data half2 xData[NUM_JOINTS_P1]; half2 yData[NUM_JOINTS_P1]; for (int i = 0 ; i < NUM_JOINTS_P1; i++) { xData[i] = __float2half2_rn((float)i); yData[i] = __float2half2_rn(0.f); } half2 xTarget_in_temp = __floats2half2_rn(xTarget_in[idx],xTarget_in[idx+1]); half2 yTarget_in_temp = __floats2half2_rn(yTarget_in[idx],yTarget_in[idx+1]); //half minus_one = -1.0f; half2 pe_x = xData[NUM_JOINTS]; half2 pe_y = yData[NUM_JOINTS]; for(int curr_loop = 0; curr_loop < MAX_LOOP; curr_loop++) { for (int iter = NUM_JOINTS; iter > 0; iter--) { half2 pc_x = xData[iter-1]; half2 pc_y = yData[iter-1]; half2 diff_pe_pc_x = pe_x - pc_x; half2 diff_pe_pc_y = pe_y - pc_y; // half2 diff_tgt_pc_x = xTarget_in[idx] - pc_x; // half2 diff_tgt_pc_y = yTarget_in[idx] - pc_y; half2 diff_tgt_pc_x = xTarget_in_temp - pc_x; half2 diff_tgt_pc_y = yTarget_in_temp - pc_y; half2 len_diff_pe_pc = fast_h2sqrt (diff_pe_pc_x * diff_pe_pc_x + diff_pe_pc_y * diff_pe_pc_y); half2 len_diff_tgt_pc = fast_h2sqrt (diff_tgt_pc_x * diff_tgt_pc_x + diff_tgt_pc_y * diff_tgt_pc_y); half2 a_x = diff_pe_pc_x * fast_h2rcp(len_diff_pe_pc); half2 a_y = diff_pe_pc_y * fast_h2rcp(len_diff_pe_pc); half2 b_x = diff_tgt_pc_x * fast_h2rcp(len_diff_tgt_pc); half2 b_y = diff_tgt_pc_y * fast_h2rcp(len_diff_tgt_pc); half2 a_dot_b = a_x * b_x + a_y * b_y; //float2 a_dot_b_float = __half22float2(a_dot_b); if (a_dot_b.x > one) { a_dot_b.x = one ; } if (a_dot_b.x < minus_one) { a_dot_b.x = minus_one ; } if (a_dot_b.y > one) { a_dot_b.y = one ; } if (a_dot_b.y < minus_one) { a_dot_b.y = minus_one ; } /* if (a_dot_b > 1.f) a_dot_b = 1.f; else if (a_dot_b < -1.f) a_dot_b = -1.f; */ //float2 a_dot_b_float = __half22float2(a_dot_b); //half2 angle =__floats2half2_rn (acosf(a_dot_b_float.x) * (180.f / PI), acosf(a_dot_b_float.x) * (180.f / PI)); //angle.x = acosf(a_dot_b_float.x) * (180.f / PI); //angle.y = acosf(a_dot_b_float.y) * (180.f / PI); half2 angle = fast_h2acos(a_dot_b) * 57.29578;//(180.f / PI); // Determine angle direction half2 direction = a_x * b_y - a_y * b_x; if (direction.x < zero) angle.x = -angle.x ; if (direction.y < zero) angle.y = -angle.y; // Make the result look more natural (these checks may be omitted) // if (angle > 30.f) // angle = 30.f; // else if (angle < -30.f) // angle = -30.f; // Save angle angle_out[iter - 1] = angle; for (int i = 0; i < NUM_JOINTS; i++) { if(i < NUM_JOINTS - 1) { angle_out[i+1] += angle_out[i]; //angle_out[i+1].y += angle_out[i].y; } } }// loop NUM_JOINTS }// loop 1k float2 angle_0 = __half22float2(angle_out[0]); float2 angle_1 = __half22float2(angle_out[1]); float2 angle_2 = __half22float2(angle_out[2]); angles[idx * NUM_JOINTS + 0] = angle_0.x; angles[idx * NUM_JOINTS + 1] = angle_1.x; angles[idx * NUM_JOINTS + 2] = angle_2.x; angles[(idx+1) * NUM_JOINTS + 0] = angle_0.y; angles[(idx+1) * NUM_JOINTS + 1] = angle_1.y; angles[(idx+1) * NUM_JOINTS + 2] = angle_2.y; } } //end if(threadIdx.x<512/2) else return; } else { int blockId = blockIdx.x + blockIdx.y * gridDim.x; int idx = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x; if(idx < size) { // float parrotInput[2]; // float parrotOutput[3]; float angle_out[NUM_JOINTS]; for(int i = 0; i < NUM_JOINTS; i++) { angle_out[i] = 0.0; } float max_err = err_thresh * (float)(NUM_JOINTS); float err = max_err + 1.f; // initialize error to something greater than error threshold /* parrot not used parrotInput[0] = xTarget_in[idx]; parrotInput[1] = yTarget_in[idx]; #pragma parrot(input, "invkin_kernel", [2]<-1.0; 1.0>parrotInput) */ //float max_err = err_thresh * (float)(NUM_JOINTS); //float err = max_err + 1.f; // Initialize x and y data float xData[NUM_JOINTS_P1]; float yData[NUM_JOINTS_P1]; for (int i = 0 ; i < NUM_JOINTS_P1; i++) { xData[i] = i; yData[i] = 0.f; } for(int curr_loop = 0; curr_loop < MAX_LOOP; curr_loop++) { for (int iter = NUM_JOINTS; iter > 0; iter--) { float pe_x = xData[NUM_JOINTS]; float pe_y = yData[NUM_JOINTS]; float pc_x = xData[iter-1]; float pc_y = yData[iter-1]; float diff_pe_pc_x = pe_x - pc_x; float diff_pe_pc_y = pe_y - pc_y; float diff_tgt_pc_x = xTarget_in[idx] - pc_x; float diff_tgt_pc_y = yTarget_in[idx] - pc_y; float len_diff_pe_pc = sqrt(diff_pe_pc_x * diff_pe_pc_x + diff_pe_pc_y * diff_pe_pc_y); float len_diff_tgt_pc = sqrt(diff_tgt_pc_x * diff_tgt_pc_x + diff_tgt_pc_y * diff_tgt_pc_y); float a_x = diff_pe_pc_x / len_diff_pe_pc; float a_y = diff_pe_pc_y / len_diff_pe_pc; float b_x = diff_tgt_pc_x / len_diff_tgt_pc; float b_y = diff_tgt_pc_y / len_diff_tgt_pc; float a_dot_b = a_x * b_x + a_y * b_y; if (a_dot_b > 1.f) a_dot_b = 1.f; else if (a_dot_b < -1.f) a_dot_b = -1.f; float angle = acos(a_dot_b) * (180.f / PI); // Determine angle direction float direction = a_x * b_y - a_y * b_x; if (direction < 0.f) angle = -angle; // Make the result look more natural (these checks may be omitted) // if (angle > 30.f) // angle = 30.f; // else if (angle < -30.f) // angle = -30.f; // Save angle angle_out[iter - 1] = angle; for (int i = 0; i < NUM_JOINTS; i++) { if(i < NUM_JOINTS - 1) { angle_out[i+1] += angle_out[i]; } } } } /* parrot : not used parrotOutput[0] = angle_out[0] / 30.0; parrotOutput[1] = angle_out[1] / 30.0; parrotOutput[2] = angle_out[2] / 30.0; #pragma parrot(output, "invkin_kernel", [3]<-1.0; 1.0>parrotOutput) angle_out[0] = parrotOutput[0] * 30.0; angle_out[1] = parrotOutput[1] * 30.0; angle_out[2] = parrotOutput[2] * 30.0; */ angles[idx * NUM_JOINTS + 0] = angle_out[0]; angles[idx * NUM_JOINTS + 1] = angle_out[1]; angles[idx * NUM_JOINTS + 2] = angle_out[2]; } } } int main(int argc, char* argv[]) { int speed = 50; std::cout << "# Speed = " << speed << std::endl; if(argc != 4) { std::cerr << "Usage: ./invkin.out <input file coefficients> <output file> <error threshold>" << std::endl; exit(EXIT_FAILURE); } float* xTarget_in_h; float* yTarget_in_h; float* angle_out_h; hipError_t cudaStatus; int data_size = 0; // process the files ifstream coordinate_in_file (argv[1]); ofstream angle_out_file (argv[2]); float err_thresh = atof(argv[3]); if(coordinate_in_file.is_open()) { coordinate_in_file >> data_size; std::cout << "# Data Size = " << data_size << std::endl; } // allocate the memory xTarget_in_h = new (nothrow) float[data_size]; if(xTarget_in_h == NULL) { std::cerr << "Memory allocation fails!!!" << std::endl; exit(EXIT_FAILURE); } yTarget_in_h = new (nothrow) float[data_size]; if(yTarget_in_h == NULL) { std::cerr << "Memory allocation fails!!!" << std::endl; exit(EXIT_FAILURE); } angle_out_h = new (nothrow) float[data_size*NUM_JOINTS]; if(angle_out_h == NULL) { std::cerr << "Memory allocation fails!!!" << std::endl; exit(EXIT_FAILURE); } // Prepare hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); // add data to the arrays float xTarget_tmp, yTarget_tmp; int coeff_index = 0; while(coeff_index < data_size) { coordinate_in_file >> xTarget_tmp >> yTarget_tmp; for(int i = 0; i < NUM_JOINTS ; i++) { angle_out_h[coeff_index * NUM_JOINTS + i] = 0.0; } xTarget_in_h[coeff_index] = xTarget_tmp; yTarget_in_h[coeff_index++] = yTarget_tmp; } std::cout << "# Coordinates are read from file..." << std::endl; // memory allocations on the host float *xTarget_in_d, *yTarget_in_d; float *angle_out_d; hipMalloc((void**) &xTarget_in_d, data_size * sizeof(float)); hipMalloc((void**) &yTarget_in_d, data_size * sizeof(float)); hipMalloc((void**) &angle_out_d, data_size * NUM_JOINTS * sizeof(float)); std::cout << "# Memory allocation on GPU is done..." << std::endl; hipMemcpy(xTarget_in_d, xTarget_in_h, data_size * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(yTarget_in_d, yTarget_in_h, data_size * sizeof(float), hipMemcpyHostToDevice); std::cout << "# Data are transfered to GPU..." << std::endl; dim3 dimBlock ( 512, 1 ); dim3 dimGrid ( data_size / 512, 1 ); hipEventRecord(start, 0); #pragma parrot.start("invkin_kernel") hipLaunchKernelGGL(( invkin_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, xTarget_in_d, yTarget_in_d, angle_out_d, data_size, err_thresh, speed); #pragma parrot.end("invkin_kernel") cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { std::cout << "Something was wrong! Error code: " << cudaStatus << std::endl; } hipEventRecord(stop, 0); hipEventSynchronize(stop); float elapsedTime; hipEventElapsedTime(&elapsedTime, start, stop); hipEventDestroy(start); hipEventDestroy(stop); std::cout << "# Elapsed Time in `nrpoly3` kernel = " << elapsedTime << std::endl; std::cout << "# GPU computation is done ..." << std::endl; hipMemcpy(angle_out_h, angle_out_d, data_size * NUM_JOINTS * sizeof(float), hipMemcpyDeviceToHost); for(int i = 0; i < data_size; i++) { // angle_out_file << xTarget_in_h[i] << " " << yTarget_in_h[i] << " "; //compare output, not need to store this for(int j = 0 ; j < NUM_JOINTS; j++) { angle_out_file << angle_out_h[i * NUM_JOINTS + j] << " "; } angle_out_file << std::endl; } // close files coordinate_in_file.close(); angle_out_file.close(); // de-allocate the memory delete[] xTarget_in_h; delete[] yTarget_in_h; delete[] angle_out_h; // de-allocate cuda memory hipFree(xTarget_in_d); hipFree(yTarget_in_d); hipFree(angle_out_d); std::cout << "Thank you..." << std::endl; }
a5e15d63a44af1aa86d0f04abef9241596314565.cu
#include "stdlib.h" #include <fstream> #include <iostream> #include <cstddef> #include <cuda_runtime_api.h> #include <cuda.h> #include <cuda_fp16.h> #define MAX_LOOP 1000 #define MAX_DIFF 0.15f #define NUM_JOINTS 3 #define PI 3.14159265358979f #define NUM_JOINTS_P1 (NUM_JOINTS + 1) using namespace std; #ifdef SLOW_MATH #include "../include/cuda_math.cuh" #else #include "../include/fast_math.cuh" #endif #define SCATTER using namespace std; __global__ void invkin_kernel(float *xTarget_in, float *yTarget_in, float *angles, int size, float err_thresh, int speed) { #ifdef SCATTER if(blockIdx.x %100 < speed) { #else if(blockIdx.x < speed) { #endif if(threadIdx.x<blockDim.x/2){ int blockId = blockIdx.x + blockIdx.y * gridDim.x; int idx = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + 2*threadIdx.x; if(idx < size) { half2 angle_out[NUM_JOINTS]; half zero = 0.0f; half one = 1.f; half minus_one = -1.f; for(int i = 0; i < NUM_JOINTS; i++) { angle_out[i] = __float2half2_rn(0.0); //angle_out[i].x = 0.f; //angle_out[i].y = 0.f; } half max_err = err_thresh * (float)(NUM_JOINTS); half err = max_err + one; // initialize error to something greater than error threshold // Initialize x and y data half2 xData[NUM_JOINTS_P1]; half2 yData[NUM_JOINTS_P1]; for (int i = 0 ; i < NUM_JOINTS_P1; i++) { xData[i] = __float2half2_rn((float)i); yData[i] = __float2half2_rn(0.f); } half2 xTarget_in_temp = __floats2half2_rn(xTarget_in[idx],xTarget_in[idx+1]); half2 yTarget_in_temp = __floats2half2_rn(yTarget_in[idx],yTarget_in[idx+1]); //half minus_one = -1.0f; half2 pe_x = xData[NUM_JOINTS]; half2 pe_y = yData[NUM_JOINTS]; for(int curr_loop = 0; curr_loop < MAX_LOOP; curr_loop++) { for (int iter = NUM_JOINTS; iter > 0; iter--) { half2 pc_x = xData[iter-1]; half2 pc_y = yData[iter-1]; half2 diff_pe_pc_x = pe_x - pc_x; half2 diff_pe_pc_y = pe_y - pc_y; // half2 diff_tgt_pc_x = xTarget_in[idx] - pc_x; // half2 diff_tgt_pc_y = yTarget_in[idx] - pc_y; half2 diff_tgt_pc_x = xTarget_in_temp - pc_x; half2 diff_tgt_pc_y = yTarget_in_temp - pc_y; half2 len_diff_pe_pc = fast_h2sqrt (diff_pe_pc_x * diff_pe_pc_x + diff_pe_pc_y * diff_pe_pc_y); half2 len_diff_tgt_pc = fast_h2sqrt (diff_tgt_pc_x * diff_tgt_pc_x + diff_tgt_pc_y * diff_tgt_pc_y); half2 a_x = diff_pe_pc_x * fast_h2rcp(len_diff_pe_pc); half2 a_y = diff_pe_pc_y * fast_h2rcp(len_diff_pe_pc); half2 b_x = diff_tgt_pc_x * fast_h2rcp(len_diff_tgt_pc); half2 b_y = diff_tgt_pc_y * fast_h2rcp(len_diff_tgt_pc); half2 a_dot_b = a_x * b_x + a_y * b_y; //float2 a_dot_b_float = __half22float2(a_dot_b); if (a_dot_b.x > one) { a_dot_b.x = one ; } if (a_dot_b.x < minus_one) { a_dot_b.x = minus_one ; } if (a_dot_b.y > one) { a_dot_b.y = one ; } if (a_dot_b.y < minus_one) { a_dot_b.y = minus_one ; } /* if (a_dot_b > 1.f) a_dot_b = 1.f; else if (a_dot_b < -1.f) a_dot_b = -1.f; */ //float2 a_dot_b_float = __half22float2(a_dot_b); //half2 angle =__floats2half2_rn (acosf(a_dot_b_float.x) * (180.f / PI), acosf(a_dot_b_float.x) * (180.f / PI)); //angle.x = acosf(a_dot_b_float.x) * (180.f / PI); //angle.y = acosf(a_dot_b_float.y) * (180.f / PI); half2 angle = fast_h2acos(a_dot_b) * 57.29578;//(180.f / PI); // Determine angle direction half2 direction = a_x * b_y - a_y * b_x; if (direction.x < zero) angle.x = -angle.x ; if (direction.y < zero) angle.y = -angle.y; // Make the result look more natural (these checks may be omitted) // if (angle > 30.f) // angle = 30.f; // else if (angle < -30.f) // angle = -30.f; // Save angle angle_out[iter - 1] = angle; for (int i = 0; i < NUM_JOINTS; i++) { if(i < NUM_JOINTS - 1) { angle_out[i+1] += angle_out[i]; //angle_out[i+1].y += angle_out[i].y; } } }// loop NUM_JOINTS }// loop 1k float2 angle_0 = __half22float2(angle_out[0]); float2 angle_1 = __half22float2(angle_out[1]); float2 angle_2 = __half22float2(angle_out[2]); angles[idx * NUM_JOINTS + 0] = angle_0.x; angles[idx * NUM_JOINTS + 1] = angle_1.x; angles[idx * NUM_JOINTS + 2] = angle_2.x; angles[(idx+1) * NUM_JOINTS + 0] = angle_0.y; angles[(idx+1) * NUM_JOINTS + 1] = angle_1.y; angles[(idx+1) * NUM_JOINTS + 2] = angle_2.y; } } //end if(threadIdx.x<512/2) else return; } else { int blockId = blockIdx.x + blockIdx.y * gridDim.x; int idx = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x; if(idx < size) { // float parrotInput[2]; // float parrotOutput[3]; float angle_out[NUM_JOINTS]; for(int i = 0; i < NUM_JOINTS; i++) { angle_out[i] = 0.0; } float max_err = err_thresh * (float)(NUM_JOINTS); float err = max_err + 1.f; // initialize error to something greater than error threshold /* parrot not used parrotInput[0] = xTarget_in[idx]; parrotInput[1] = yTarget_in[idx]; #pragma parrot(input, "invkin_kernel", [2]<-1.0; 1.0>parrotInput) */ //float max_err = err_thresh * (float)(NUM_JOINTS); //float err = max_err + 1.f; // Initialize x and y data float xData[NUM_JOINTS_P1]; float yData[NUM_JOINTS_P1]; for (int i = 0 ; i < NUM_JOINTS_P1; i++) { xData[i] = i; yData[i] = 0.f; } for(int curr_loop = 0; curr_loop < MAX_LOOP; curr_loop++) { for (int iter = NUM_JOINTS; iter > 0; iter--) { float pe_x = xData[NUM_JOINTS]; float pe_y = yData[NUM_JOINTS]; float pc_x = xData[iter-1]; float pc_y = yData[iter-1]; float diff_pe_pc_x = pe_x - pc_x; float diff_pe_pc_y = pe_y - pc_y; float diff_tgt_pc_x = xTarget_in[idx] - pc_x; float diff_tgt_pc_y = yTarget_in[idx] - pc_y; float len_diff_pe_pc = sqrt(diff_pe_pc_x * diff_pe_pc_x + diff_pe_pc_y * diff_pe_pc_y); float len_diff_tgt_pc = sqrt(diff_tgt_pc_x * diff_tgt_pc_x + diff_tgt_pc_y * diff_tgt_pc_y); float a_x = diff_pe_pc_x / len_diff_pe_pc; float a_y = diff_pe_pc_y / len_diff_pe_pc; float b_x = diff_tgt_pc_x / len_diff_tgt_pc; float b_y = diff_tgt_pc_y / len_diff_tgt_pc; float a_dot_b = a_x * b_x + a_y * b_y; if (a_dot_b > 1.f) a_dot_b = 1.f; else if (a_dot_b < -1.f) a_dot_b = -1.f; float angle = acos(a_dot_b) * (180.f / PI); // Determine angle direction float direction = a_x * b_y - a_y * b_x; if (direction < 0.f) angle = -angle; // Make the result look more natural (these checks may be omitted) // if (angle > 30.f) // angle = 30.f; // else if (angle < -30.f) // angle = -30.f; // Save angle angle_out[iter - 1] = angle; for (int i = 0; i < NUM_JOINTS; i++) { if(i < NUM_JOINTS - 1) { angle_out[i+1] += angle_out[i]; } } } } /* parrot : not used parrotOutput[0] = angle_out[0] / 30.0; parrotOutput[1] = angle_out[1] / 30.0; parrotOutput[2] = angle_out[2] / 30.0; #pragma parrot(output, "invkin_kernel", [3]<-1.0; 1.0>parrotOutput) angle_out[0] = parrotOutput[0] * 30.0; angle_out[1] = parrotOutput[1] * 30.0; angle_out[2] = parrotOutput[2] * 30.0; */ angles[idx * NUM_JOINTS + 0] = angle_out[0]; angles[idx * NUM_JOINTS + 1] = angle_out[1]; angles[idx * NUM_JOINTS + 2] = angle_out[2]; } } } int main(int argc, char* argv[]) { int speed = 50; std::cout << "# Speed = " << speed << std::endl; if(argc != 4) { std::cerr << "Usage: ./invkin.out <input file coefficients> <output file> <error threshold>" << std::endl; exit(EXIT_FAILURE); } float* xTarget_in_h; float* yTarget_in_h; float* angle_out_h; cudaError_t cudaStatus; int data_size = 0; // process the files ifstream coordinate_in_file (argv[1]); ofstream angle_out_file (argv[2]); float err_thresh = atof(argv[3]); if(coordinate_in_file.is_open()) { coordinate_in_file >> data_size; std::cout << "# Data Size = " << data_size << std::endl; } // allocate the memory xTarget_in_h = new (nothrow) float[data_size]; if(xTarget_in_h == NULL) { std::cerr << "Memory allocation fails!!!" << std::endl; exit(EXIT_FAILURE); } yTarget_in_h = new (nothrow) float[data_size]; if(yTarget_in_h == NULL) { std::cerr << "Memory allocation fails!!!" << std::endl; exit(EXIT_FAILURE); } angle_out_h = new (nothrow) float[data_size*NUM_JOINTS]; if(angle_out_h == NULL) { std::cerr << "Memory allocation fails!!!" << std::endl; exit(EXIT_FAILURE); } // Prepare cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // add data to the arrays float xTarget_tmp, yTarget_tmp; int coeff_index = 0; while(coeff_index < data_size) { coordinate_in_file >> xTarget_tmp >> yTarget_tmp; for(int i = 0; i < NUM_JOINTS ; i++) { angle_out_h[coeff_index * NUM_JOINTS + i] = 0.0; } xTarget_in_h[coeff_index] = xTarget_tmp; yTarget_in_h[coeff_index++] = yTarget_tmp; } std::cout << "# Coordinates are read from file..." << std::endl; // memory allocations on the host float *xTarget_in_d, *yTarget_in_d; float *angle_out_d; cudaMalloc((void**) &xTarget_in_d, data_size * sizeof(float)); cudaMalloc((void**) &yTarget_in_d, data_size * sizeof(float)); cudaMalloc((void**) &angle_out_d, data_size * NUM_JOINTS * sizeof(float)); std::cout << "# Memory allocation on GPU is done..." << std::endl; cudaMemcpy(xTarget_in_d, xTarget_in_h, data_size * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(yTarget_in_d, yTarget_in_h, data_size * sizeof(float), cudaMemcpyHostToDevice); std::cout << "# Data are transfered to GPU..." << std::endl; dim3 dimBlock ( 512, 1 ); dim3 dimGrid ( data_size / 512, 1 ); cudaEventRecord(start, 0); #pragma parrot.start("invkin_kernel") invkin_kernel<<<dimGrid, dimBlock>>>(xTarget_in_d, yTarget_in_d, angle_out_d, data_size, err_thresh, speed); #pragma parrot.end("invkin_kernel") cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { std::cout << "Something was wrong! Error code: " << cudaStatus << std::endl; } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); std::cout << "# Elapsed Time in `nrpoly3` kernel = " << elapsedTime << std::endl; std::cout << "# GPU computation is done ..." << std::endl; cudaMemcpy(angle_out_h, angle_out_d, data_size * NUM_JOINTS * sizeof(float), cudaMemcpyDeviceToHost); for(int i = 0; i < data_size; i++) { // angle_out_file << xTarget_in_h[i] << " " << yTarget_in_h[i] << " "; //compare output, not need to store this for(int j = 0 ; j < NUM_JOINTS; j++) { angle_out_file << angle_out_h[i * NUM_JOINTS + j] << " "; } angle_out_file << std::endl; } // close files coordinate_in_file.close(); angle_out_file.close(); // de-allocate the memory delete[] xTarget_in_h; delete[] yTarget_in_h; delete[] angle_out_h; // de-allocate cuda memory cudaFree(xTarget_in_d); cudaFree(yTarget_in_d); cudaFree(angle_out_d); std::cout << "Thank you..." << std::endl; }
adea8e8dc7f292030fbcdc328bab5e2e21a271a1.hip
// !!! This is a file automatically generated by hipify!!! /* * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <iostream> #include <hip/hip_runtime_api.h> namespace nvinfer1 { namespace plugin { #define checkCudaErrors(status_) \ { \ auto const status = status_; \ if (status != 0) \ { \ std::cout << "Cuda failure: " << hipGetErrorString(status) \ << " at line " << __LINE__ \ << " in file " << __FILE__ \ << " error status: " << status \ << std::endl; \ abort(); \ } \ } #ifndef M_PI #define M_PI 3.14159265358979323846 #endif __device__ float sigmoid(const float x) { return 1.0f / (1.0f + expf(-x)); } __global__ void postprocess_kernal(const float *cls_input, float const* box_input, const float *dir_cls_input, float *anchors, float *anchors_bottom_height, float *bndbox_output, int *object_counter, const float min_x_range, const float max_x_range, const float min_y_range, const float max_y_range, const int feature_x_size, const int feature_y_size, const int num_anchors, const int num_classes, const int num_box_values, const float score_thresh, const float dir_offset, const float dir_limit_offset, const int num_dir_bins) { int max_box_num = feature_x_size * feature_y_size * num_anchors; int loc_index =blockIdx.x; int batch_idx = blockIdx.x / (feature_x_size * feature_y_size); int loc_index_in_frame = blockIdx.x % (feature_x_size * feature_y_size); int ith_anchor = threadIdx.x; if (ith_anchor >= num_anchors) { return; } int col = loc_index_in_frame % feature_x_size; int row = loc_index_in_frame / feature_x_size; float x_offset = min_x_range + col * (max_x_range - min_x_range) / (feature_x_size - 1); float y_offset = min_y_range + row * (max_y_range - min_y_range) / (feature_y_size - 1); int cls_offset = loc_index * num_classes * num_anchors + ith_anchor * num_classes; float dev_cls[2] = {-1, 0}; const float *scores = cls_input + cls_offset; float max_score = sigmoid(scores[0]); int cls_id = 0; for (int i = 1; i < num_classes; i++) { float cls_score = sigmoid(scores[i]); if (cls_score > max_score) { max_score = cls_score; cls_id = i; } } dev_cls[0] = static_cast<float>(cls_id); dev_cls[1] = max_score; if (dev_cls[1] >= score_thresh) { int box_offset = loc_index * num_anchors * num_box_values + ith_anchor * num_box_values; int dir_cls_offset = loc_index * num_anchors * 2 + ith_anchor * 2; float *anchor_ptr = anchors + ith_anchor * 4; float z_offset = anchor_ptr[2] / 2 + anchors_bottom_height[ith_anchor / 2]; float anchor[7] = {x_offset, y_offset, z_offset, anchor_ptr[0], anchor_ptr[1], anchor_ptr[2], anchor_ptr[3]}; float const* box_encodings = box_input + box_offset; float xa = anchor[0]; float ya = anchor[1]; float za = anchor[2]; float dxa = anchor[3]; float dya = anchor[4]; float dza = anchor[5]; float ra = anchor[6]; float diagonal = sqrtf(dxa * dxa + dya * dya); float be0 = box_encodings[0] * diagonal + xa; float be1 = box_encodings[1] * diagonal + ya; float be2 = box_encodings[2] * dza + za; float be3 = expf(box_encodings[3]) * dxa; float be4 = expf(box_encodings[4]) * dya; float be5 = expf(box_encodings[5]) * dza; float be6 = box_encodings[6] + ra; float yaw; int dir_label = dir_cls_input[dir_cls_offset] > dir_cls_input[dir_cls_offset + 1] ? 0 : 1; float period = 2.0f * float(M_PI) / num_dir_bins; float val = be6 - dir_offset; float dir_rot = val - floor(val / period + dir_limit_offset) * period; yaw = dir_rot + dir_offset + period * dir_label; int resCount = atomicAdd(object_counter + batch_idx, 1); float *data = bndbox_output + (batch_idx * max_box_num + resCount) * 9; data[0] = be0; data[1] = be1; data[2] = be2; data[3] = be3; data[4] = be4; data[5] = be5; data[6] = yaw; data[7] = dev_cls[0]; data[8] = dev_cls[1]; } } void decodeBbox3DLaunch( const int batch_size, const float *cls_input, const float *box_input, const float *dir_cls_input, float *anchors, float *anchors_bottom_height, float *bndbox_output, int *object_counter, const float min_x_range, const float max_x_range, const float min_y_range, const float max_y_range, const int feature_x_size, const int feature_y_size, const int num_anchors, const int num_classes, const int num_box_values, const float score_thresh, const float dir_offset, const float dir_limit_offset, const int num_dir_bins, hipStream_t stream) { int bev_size = batch_size * feature_x_size * feature_y_size; dim3 threads (num_anchors); dim3 blocks (bev_size); hipLaunchKernelGGL(( postprocess_kernal), dim3(blocks), dim3(threads), 0, stream, cls_input, box_input, dir_cls_input, anchors, anchors_bottom_height, bndbox_output, object_counter, min_x_range, max_x_range, min_y_range, max_y_range, feature_x_size, feature_y_size, num_anchors, num_classes, num_box_values, score_thresh, dir_offset, dir_limit_offset, num_dir_bins); checkCudaErrors(hipGetLastError()); } } // namespace plugin } // namespace nvinfer1
adea8e8dc7f292030fbcdc328bab5e2e21a271a1.cu
/* * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <iostream> #include <cuda_runtime_api.h> namespace nvinfer1 { namespace plugin { #define checkCudaErrors(status_) \ { \ auto const status = status_; \ if (status != 0) \ { \ std::cout << "Cuda failure: " << cudaGetErrorString(status) \ << " at line " << __LINE__ \ << " in file " << __FILE__ \ << " error status: " << status \ << std::endl; \ abort(); \ } \ } #ifndef M_PI #define M_PI 3.14159265358979323846 #endif __device__ float sigmoid(const float x) { return 1.0f / (1.0f + expf(-x)); } __global__ void postprocess_kernal(const float *cls_input, float const* box_input, const float *dir_cls_input, float *anchors, float *anchors_bottom_height, float *bndbox_output, int *object_counter, const float min_x_range, const float max_x_range, const float min_y_range, const float max_y_range, const int feature_x_size, const int feature_y_size, const int num_anchors, const int num_classes, const int num_box_values, const float score_thresh, const float dir_offset, const float dir_limit_offset, const int num_dir_bins) { int max_box_num = feature_x_size * feature_y_size * num_anchors; int loc_index =blockIdx.x; int batch_idx = blockIdx.x / (feature_x_size * feature_y_size); int loc_index_in_frame = blockIdx.x % (feature_x_size * feature_y_size); int ith_anchor = threadIdx.x; if (ith_anchor >= num_anchors) { return; } int col = loc_index_in_frame % feature_x_size; int row = loc_index_in_frame / feature_x_size; float x_offset = min_x_range + col * (max_x_range - min_x_range) / (feature_x_size - 1); float y_offset = min_y_range + row * (max_y_range - min_y_range) / (feature_y_size - 1); int cls_offset = loc_index * num_classes * num_anchors + ith_anchor * num_classes; float dev_cls[2] = {-1, 0}; const float *scores = cls_input + cls_offset; float max_score = sigmoid(scores[0]); int cls_id = 0; for (int i = 1; i < num_classes; i++) { float cls_score = sigmoid(scores[i]); if (cls_score > max_score) { max_score = cls_score; cls_id = i; } } dev_cls[0] = static_cast<float>(cls_id); dev_cls[1] = max_score; if (dev_cls[1] >= score_thresh) { int box_offset = loc_index * num_anchors * num_box_values + ith_anchor * num_box_values; int dir_cls_offset = loc_index * num_anchors * 2 + ith_anchor * 2; float *anchor_ptr = anchors + ith_anchor * 4; float z_offset = anchor_ptr[2] / 2 + anchors_bottom_height[ith_anchor / 2]; float anchor[7] = {x_offset, y_offset, z_offset, anchor_ptr[0], anchor_ptr[1], anchor_ptr[2], anchor_ptr[3]}; float const* box_encodings = box_input + box_offset; float xa = anchor[0]; float ya = anchor[1]; float za = anchor[2]; float dxa = anchor[3]; float dya = anchor[4]; float dza = anchor[5]; float ra = anchor[6]; float diagonal = sqrtf(dxa * dxa + dya * dya); float be0 = box_encodings[0] * diagonal + xa; float be1 = box_encodings[1] * diagonal + ya; float be2 = box_encodings[2] * dza + za; float be3 = expf(box_encodings[3]) * dxa; float be4 = expf(box_encodings[4]) * dya; float be5 = expf(box_encodings[5]) * dza; float be6 = box_encodings[6] + ra; float yaw; int dir_label = dir_cls_input[dir_cls_offset] > dir_cls_input[dir_cls_offset + 1] ? 0 : 1; float period = 2.0f * float(M_PI) / num_dir_bins; float val = be6 - dir_offset; float dir_rot = val - floor(val / period + dir_limit_offset) * period; yaw = dir_rot + dir_offset + period * dir_label; int resCount = atomicAdd(object_counter + batch_idx, 1); float *data = bndbox_output + (batch_idx * max_box_num + resCount) * 9; data[0] = be0; data[1] = be1; data[2] = be2; data[3] = be3; data[4] = be4; data[5] = be5; data[6] = yaw; data[7] = dev_cls[0]; data[8] = dev_cls[1]; } } void decodeBbox3DLaunch( const int batch_size, const float *cls_input, const float *box_input, const float *dir_cls_input, float *anchors, float *anchors_bottom_height, float *bndbox_output, int *object_counter, const float min_x_range, const float max_x_range, const float min_y_range, const float max_y_range, const int feature_x_size, const int feature_y_size, const int num_anchors, const int num_classes, const int num_box_values, const float score_thresh, const float dir_offset, const float dir_limit_offset, const int num_dir_bins, cudaStream_t stream) { int bev_size = batch_size * feature_x_size * feature_y_size; dim3 threads (num_anchors); dim3 blocks (bev_size); postprocess_kernal<<<blocks, threads, 0, stream>>> (cls_input, box_input, dir_cls_input, anchors, anchors_bottom_height, bndbox_output, object_counter, min_x_range, max_x_range, min_y_range, max_y_range, feature_x_size, feature_y_size, num_anchors, num_classes, num_box_values, score_thresh, dir_offset, dir_limit_offset, num_dir_bins); checkCudaErrors(cudaGetLastError()); } } // namespace plugin } // namespace nvinfer1